Merge remote-tracking branch 'upstream/v1.8.x' into merge_18x
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 094e43e..cb32281 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -4,3 +4,4 @@
 /**/OWNERS @markdroth @nicolasnoble @ctiller
 /bazel/** @nicolasnoble @dgquintas @ctiller
 /src/core/ext/filters/client_channel/** @markdroth @dgquintas @ctiller
+/tools/run_tests/performance/** @ncteisen @matt-kwong @ctiller
diff --git a/.gitignore b/.gitignore
index 5ccad2e..2b35c5f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,6 +56,7 @@
 
 # Temporary test reports
 report.xml
+*/sponge_log.xml
 latency_trace.txt
 latency_trace.*.txt
 
@@ -120,6 +121,7 @@
 tags
 
 # perf data
+memory_usage.csv
 perf.data
 perf.data.old
 
diff --git a/BUILD b/BUILD
index ef345e5..dba6592 100644
--- a/BUILD
+++ b/BUILD
@@ -38,12 +38,17 @@
     values = {"define": "grpc_no_ares=true"},
 )
 
+config_setting(
+    name = "remote_execution",
+    values = {"define": "GRPC_PORT_ISOLATED_RUNTIME=1"},
+)
+
 # This should be updated along with build.yaml
-g_stands_for = "generous"
+g_stands_for = "glossy"
 
-core_version = "5.0.0"
+core_version = "5.0.0-dev"
 
-version = "1.8.3"
+version = "1.9.0-dev"
 
 GPR_PUBLIC_HDRS = [
     "include/grpc/support/alloc.h",
@@ -54,7 +59,6 @@
     "include/grpc/support/avl.h",
     "include/grpc/support/cmdline.h",
     "include/grpc/support/cpu.h",
-    "include/grpc/support/histogram.h",
     "include/grpc/support/host_port.h",
     "include/grpc/support/log.h",
     "include/grpc/support/log_windows.h",
@@ -447,7 +451,6 @@
         "src/core/lib/support/env_posix.cc",
         "src/core/lib/support/env_windows.cc",
         "src/core/lib/support/fork.cc",
-        "src/core/lib/support/histogram.cc",
         "src/core/lib/support/host_port.cc",
         "src/core/lib/support/log.cc",
         "src/core/lib/support/log_android.cc",
@@ -540,6 +543,28 @@
 )
 
 grpc_cc_library(
+    name = "debug_location",
+    public_hdrs = ["src/core/lib/support/debug_location.h"],
+    language = "c++",
+)
+
+grpc_cc_library(
+    name = "ref_counted",
+    public_hdrs = ["src/core/lib/support/ref_counted.h"],
+    language = "c++",
+    deps = [
+        "grpc_trace",
+        "debug_location",
+    ],
+)
+
+grpc_cc_library(
+    name = "ref_counted_ptr",
+    public_hdrs = ["src/core/lib/support/ref_counted_ptr.h"],
+    language = "c++",
+)
+
+grpc_cc_library(
     name = "grpc_base_c",
     srcs = [
         "src/core/lib/backoff/backoff.cc",
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 461b4d2..eed1205 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -24,7 +24,7 @@
 cmake_minimum_required(VERSION 2.8)
 
 set(PACKAGE_NAME      "grpc")
-set(PACKAGE_VERSION   "1.8.3")
+set(PACKAGE_VERSION   "1.9.0-dev")
 set(PACKAGE_STRING    "${PACKAGE_NAME} ${PACKAGE_VERSION}")
 set(PACKAGE_TARNAME   "${PACKAGE_NAME}-${PACKAGE_VERSION}")
 set(PACKAGE_BUGREPORT "https://github.com/grpc/grpc/issues/")
@@ -102,181 +102,12 @@
   set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf")
 endif()
 
-if("${gRPC_ZLIB_PROVIDER}" STREQUAL "module")
-  if(NOT ZLIB_ROOT_DIR)
-    set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib)
-  endif()
-  set(ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}")
-  if(EXISTS "${ZLIB_ROOT_DIR}/CMakeLists.txt")
-      # TODO(jtattermusch): workaround for https://github.com/madler/zlib/issues/218
-      include_directories(${ZLIB_INCLUDE_DIR})
-
-      add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib)
-      if(TARGET zlibstatic)
-          set(_gRPC_ZLIB_LIBRARIES zlibstatic)
-      endif()
-  else()
-      message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong")
-  endif()
-  if(gRPC_INSTALL)
-    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_ZLIB_PROVIDER is \"module\"")
-    set(gRPC_INSTALL FALSE)
-  endif()
-elseif("${gRPC_ZLIB_PROVIDER}" STREQUAL "package")
-  find_package(ZLIB REQUIRED)
-  set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES})
-  set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n  find_package(ZLIB)\nendif()")
-endif()
-
-if("${gRPC_CARES_PROVIDER}" STREQUAL "module")
-  if(NOT CARES_ROOT_DIR)
-    set(CARES_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares)
-  endif()
-  set(CARES_SHARED OFF CACHE BOOL "disable shared library")
-  set(CARES_STATIC ON CACHE BOOL "link cares statically")
-  set(CARES_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares")
-  add_subdirectory(third_party/cares/cares)
-  if(TARGET c-ares)
-    set(_gRPC_CARES_LIBRARIES c-ares)
-  endif()
-  if(gRPC_INSTALL)
-    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_CARES_PROVIDER is \"module\"")
-    set(gRPC_INSTALL FALSE)
-  endif()
-elseif("${gRPC_CARES_PROVIDER}" STREQUAL "package")
-  find_package(c-ares REQUIRED CONFIG)
-  if(TARGET c-ares::cares)
-    set(_gRPC_CARES_LIBRARIES c-ares::cares)
-  endif()
-  set(_gRPC_FIND_CARES "if(NOT c-ares_FOUND)\n  find_package(c-ares CONFIG)\nendif()")
-endif()
-
-if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
-  # Building the protobuf tests require gmock what is not part of a standard protobuf checkout.
-  # Disable them unless they are explicitly requested from the cmake command line (when we assume
-  # gmock is downloaded to the right location inside protobuf).
-  if(NOT protobuf_BUILD_TESTS)
-    set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
-  endif()
-  # Disable building protobuf with zlib. Building protobuf with zlib breaks
-  # the build if zlib is not installed on the system.
-  if(NOT protobuf_WITH_ZLIB)
-    set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.")
-  endif()
-  if(NOT PROTOBUF_ROOT_DIR)
-    set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf)
-  endif()
-  set(PROTOBUF_WELLKNOWN_IMPORT_DIR ${PROTOBUF_ROOT_DIR}/src)
-  if(EXISTS "${PROTOBUF_ROOT_DIR}/cmake/CMakeLists.txt")
-    set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries")
-    add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf)
-    if(TARGET ${_gRPC_PROTOBUF_LIBRARY_NAME})
-      set(_gRPC_PROTOBUF_LIBRARIES ${_gRPC_PROTOBUF_LIBRARY_NAME})
-    endif()
-    if(TARGET libprotoc)
-      set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc)
-    endif()
-    if(TARGET protoc)
-      set(_gRPC_PROTOBUF_PROTOC protoc)
-      set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
-    endif()
-  else()
-      message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong")
-  endif()
-  if(gRPC_INSTALL)
-    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_PROTOBUF_PROVIDER is \"module\"")
-    set(gRPC_INSTALL FALSE)
-  endif()
-elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
-  find_package(Protobuf REQUIRED ${gRPC_PROTOBUF_PACKAGE_TYPE})
-  if(Protobuf_FOUND OR PROTOBUF_FOUND)
-    if(TARGET protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
-      set(_gRPC_PROTOBUF_LIBRARIES protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
-    else()
-      set(_gRPC_PROTOBUF_LIBRARIES ${PROTOBUF_LIBRARIES})
-    endif()
-    if(TARGET protobuf::libprotoc)
-      set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc)
-    else()
-      set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ${PROTOBUF_PROTOC_LIBRARIES})
-    endif()
-    if(TARGET protobuf::protoc)
-      set(_gRPC_PROTOBUF_PROTOC protobuf::protoc)
-      set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
-    else()
-      set(_gRPC_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE})
-      set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
-    endif()
-    set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n  find_package(Protobuf ${gRPC_PROTOBUF_PACKAGE_TYPE})\nendif()")
-  endif()
-  if(PROTOBUF_FOUND)
-    include_directories(${PROTOBUF_INCLUDE_DIRS})
-  endif()
-  set(PROTOBUF_WELLKNOWN_IMPORT_DIR /usr/local/include)
-endif()
-
-if("${gRPC_SSL_PROVIDER}" STREQUAL "module")
-  if(NOT BORINGSSL_ROOT_DIR)
-    set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl)
-  endif()
-  if(EXISTS "${BORINGSSL_ROOT_DIR}/CMakeLists.txt")
-    set(OPENSSL_NO_ASM ON)  # make boringssl buildable with Visual Studio
-    add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl)
-    if(TARGET ssl)
-      set(_gRPC_SSL_LIBRARIES ssl)
-    endif()
-  else()
-      message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong")
-  endif()
-  if(gRPC_INSTALL)
-    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_SSL_PROVIDER is \"module\"")
-    set(gRPC_INSTALL FALSE)
-  endif()
-elseif("${gRPC_SSL_PROVIDER}" STREQUAL "package")
-  find_package(OpenSSL REQUIRED)
-  set(_gRPC_SSL_LIBRARIES ${OPENSSL_LIBRARIES})
-  set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n  find_package(OpenSSL)\nendif()")
-endif()
-
-if("${gRPC_GFLAGS_PROVIDER}" STREQUAL "module")
-  if(NOT GFLAGS_ROOT_DIR)
-    set(GFLAGS_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags)
-  endif()
-  if(EXISTS "${GFLAGS_ROOT_DIR}/CMakeLists.txt")
-      add_subdirectory(${GFLAGS_ROOT_DIR} third_party/gflags)
-      if(TARGET gflags_static)
-          set(_gRPC_GFLAGS_LIBRARIES gflags_static)
-      endif()
-  else()
-      message(WARNING "gRPC_GFLAGS_PROVIDER is \"module\" but GFLAGS_ROOT_DIR is wrong")
-  endif()
-elseif("${gRPC_GFLAGS_PROVIDER}" STREQUAL "package")
-  find_package(gflags)
-  if(TARGET gflags::gflags)
-    set(_gRPC_GFLAGS_LIBRARIES gflags::gflags)
-  endif()
-  set(_gRPC_FIND_GFLAGS "if(NOT gflags_FOUND)\n  find_package(gflags)\nendif()")
-endif()
-
-if("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "module")
-  if(NOT BENCHMARK_ROOT_DIR)
-    set(BENCHMARK_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/benchmark)
-  endif()
-  if(EXISTS "${BENCHMARK_ROOT_DIR}/CMakeLists.txt")
-      add_subdirectory(${BENCHMARK_ROOT_DIR} third_party/benchmark)
-      if(TARGET benchmark)
-          set(_gRPC_BENCHMARK_LIBRARIES benchmark)
-      endif()
-  else()
-      message(WARNING "gRPC_BENCHMARK_PROVIDER is \"module\" but BENCHMARK_ROOT_DIR is wrong")
-  endif()
-elseif("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "package")
-  find_package(benchmark)
-  if(TARGET benchmark::benchmark)
-    set(_gRPC_BENCHMARK_LIBRARIES benchmark::benchmark)
-  endif()
-  set(_gRPC_FIND_BENCHMARK "if(NOT benchmark_FOUND)\n  find_package(benchmark)\nendif()")
-endif()
+include(cmake/zlib.cmake)
+include(cmake/cares.cmake)
+include(cmake/protobuf.cmake)
+include(cmake/ssl.cmake)
+include(cmake/gflags.cmake)
+include(cmake/benchmark.cmake)
 
 if(NOT MSVC)
   set(CMAKE_C_FLAGS   "${CMAKE_C_FLAGS} -std=c99")
@@ -357,12 +188,12 @@
 add_custom_target(tools_c
   DEPENDS
   check_epollexclusive
-  gen_hpack_tables
-  gen_legal_metadata_characters
-  gen_percent_encoding_tables
   grpc_create_jwt
   grpc_print_google_default_creds_token
   grpc_verify_jwt
+  gen_hpack_tables
+  gen_legal_metadata_characters
+  gen_percent_encoding_tables
 )
 
 add_custom_target(tools_cxx
@@ -379,7 +210,6 @@
 add_dependencies(buildtests_c alloc_test)
 add_dependencies(buildtests_c alpn_test)
 add_dependencies(buildtests_c arena_test)
-add_dependencies(buildtests_c backoff_test)
 add_dependencies(buildtests_c bad_server_response_test)
 add_dependencies(buildtests_c bin_decoder_test)
 add_dependencies(buildtests_c bin_encoder_test)
@@ -427,7 +257,6 @@
 add_dependencies(buildtests_c gpr_cmdline_test)
 add_dependencies(buildtests_c gpr_cpu_test)
 add_dependencies(buildtests_c gpr_env_test)
-add_dependencies(buildtests_c gpr_histogram_test)
 add_dependencies(buildtests_c gpr_host_port_test)
 add_dependencies(buildtests_c gpr_log_test)
 add_dependencies(buildtests_c gpr_manual_constructor_test)
@@ -465,6 +294,7 @@
 if(_gRPC_PLATFORM_LINUX)
 add_dependencies(buildtests_c handshake_server_with_readahead_handshaker)
 endif()
+add_dependencies(buildtests_c histogram_test)
 add_dependencies(buildtests_c hpack_parser_test)
 add_dependencies(buildtests_c hpack_table_test)
 add_dependencies(buildtests_c http_parser_test)
@@ -632,6 +462,7 @@
 add_dependencies(buildtests_cxx alarm_cpp_test)
 add_dependencies(buildtests_cxx async_end2end_test)
 add_dependencies(buildtests_cxx auth_property_iterator_test)
+add_dependencies(buildtests_cxx backoff_test)
 add_dependencies(buildtests_cxx bdp_estimator_test)
 if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
 add_dependencies(buildtests_cxx bm_arena)
@@ -739,6 +570,8 @@
 add_dependencies(buildtests_cxx qps_worker)
 add_dependencies(buildtests_cxx reconnect_interop_client)
 add_dependencies(buildtests_cxx reconnect_interop_server)
+add_dependencies(buildtests_cxx ref_counted_ptr_test)
+add_dependencies(buildtests_cxx ref_counted_test)
 add_dependencies(buildtests_cxx secure_auth_context_test)
 if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
 add_dependencies(buildtests_cxx secure_sync_unary_ping_pong_test)
@@ -798,7 +631,6 @@
   src/core/lib/support/env_posix.cc
   src/core/lib/support/env_windows.cc
   src/core/lib/support/fork.cc
-  src/core/lib/support/histogram.cc
   src/core/lib/support/host_port.cc
   src/core/lib/support/log.cc
   src/core/lib/support/log_android.cc
@@ -845,7 +677,7 @@
 target_include_directories(gpr
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -868,7 +700,6 @@
   include/grpc/support/avl.h
   include/grpc/support/cmdline.h
   include/grpc/support/cpu.h
-  include/grpc/support/histogram.h
   include/grpc/support/host_port.h
   include/grpc/support/log.h
   include/grpc/support/log_windows.h
@@ -938,7 +769,7 @@
 target_include_directories(gpr_test_util
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1225,7 +1056,7 @@
 target_include_directories(grpc
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1539,7 +1370,7 @@
 target_include_directories(grpc_cronet
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1616,11 +1447,13 @@
   test/core/iomgr/endpoint_tests.cc
   test/core/util/debugger_macros.cc
   test/core/util/grpc_profiler.cc
+  test/core/util/histogram.cc
   test/core/util/memory_counters.cc
   test/core/util/mock_endpoint.cc
   test/core/util/parse_hexstring.cc
   test/core/util/passthru_endpoint.cc
   test/core/util/port.cc
+  test/core/util/port_isolated_runtime_environment.cc
   test/core/util/port_server_client.cc
   test/core/util/slice_splitter.cc
   test/core/util/tracer_util.cc
@@ -1825,7 +1658,7 @@
 target_include_directories(grpc_test_util
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -1884,11 +1717,13 @@
   test/core/iomgr/endpoint_tests.cc
   test/core/util/debugger_macros.cc
   test/core/util/grpc_profiler.cc
+  test/core/util/histogram.cc
   test/core/util/memory_counters.cc
   test/core/util/mock_endpoint.cc
   test/core/util/parse_hexstring.cc
   test/core/util/passthru_endpoint.cc
   test/core/util/port.cc
+  test/core/util/port_isolated_runtime_environment.cc
   test/core/util/port_server_client.cc
   test/core/util/slice_splitter.cc
   test/core/util/tracer_util.cc
@@ -2093,7 +1928,7 @@
 target_include_directories(grpc_test_util_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2381,7 +2216,7 @@
 target_include_directories(grpc_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2473,7 +2308,7 @@
 target_include_directories(reconnect_server
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2515,7 +2350,7 @@
 target_include_directories(test_tcp_server
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2596,7 +2431,7 @@
 target_include_directories(grpc++
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -2670,7 +2505,6 @@
   include/grpc/support/avl.h
   include/grpc/support/cmdline.h
   include/grpc/support/cpu.h
-  include/grpc/support/histogram.h
   include/grpc/support/host_port.h
   include/grpc/support/log.h
   include/grpc/support/log_windows.h
@@ -2800,7 +2634,7 @@
 target_include_directories(grpc++_core_stats
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3082,7 +2916,7 @@
 target_include_directories(grpc++_cronet
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3157,7 +2991,6 @@
   include/grpc/support/avl.h
   include/grpc/support/cmdline.h
   include/grpc/support/cpu.h
-  include/grpc/support/histogram.h
   include/grpc/support/host_port.h
   include/grpc/support/log.h
   include/grpc/support/log_windows.h
@@ -3285,7 +3118,7 @@
 target_include_directories(grpc++_error_details
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3350,7 +3183,7 @@
 target_include_directories(grpc++_proto_reflection_desc_db
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3411,7 +3244,7 @@
 target_include_directories(grpc++_reflection
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3469,7 +3302,7 @@
 target_include_directories(grpc++_test_config
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3547,7 +3380,7 @@
 target_include_directories(grpc++_test_util
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3688,7 +3521,7 @@
 target_include_directories(grpc++_test_util_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3831,7 +3664,7 @@
 target_include_directories(grpc++_unsecure
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -3904,7 +3737,6 @@
   include/grpc/support/avl.h
   include/grpc/support/cmdline.h
   include/grpc/support/cpu.h
-  include/grpc/support/histogram.h
   include/grpc/support/host_port.h
   include/grpc/support/log.h
   include/grpc/support/log_windows.h
@@ -4025,7 +3857,7 @@
 target_include_directories(grpc_benchmark
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4084,7 +3916,7 @@
 target_include_directories(grpc_cli_libs
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4144,7 +3976,7 @@
 target_include_directories(grpc_plugin_support
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4222,7 +4054,7 @@
 target_include_directories(http2_client_main
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4277,7 +4109,7 @@
 target_include_directories(interop_client_helper
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4347,7 +4179,7 @@
 target_include_directories(interop_client_main
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4398,7 +4230,7 @@
 target_include_directories(interop_server_helper
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4467,7 +4299,7 @@
 target_include_directories(interop_server_lib
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4518,7 +4350,7 @@
 target_include_directories(interop_server_main
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4606,7 +4438,7 @@
 target_include_directories(qps
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4653,7 +4485,7 @@
 target_include_directories(grpc_csharp_ext
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4681,92 +4513,6 @@
 
 if (gRPC_BUILD_TESTS)
 
-add_library(ares
-  third_party/cares/cares/ares__close_sockets.c
-  third_party/cares/cares/ares__get_hostent.c
-  third_party/cares/cares/ares__read_line.c
-  third_party/cares/cares/ares__timeval.c
-  third_party/cares/cares/ares_cancel.c
-  third_party/cares/cares/ares_create_query.c
-  third_party/cares/cares/ares_data.c
-  third_party/cares/cares/ares_destroy.c
-  third_party/cares/cares/ares_expand_name.c
-  third_party/cares/cares/ares_expand_string.c
-  third_party/cares/cares/ares_fds.c
-  third_party/cares/cares/ares_free_hostent.c
-  third_party/cares/cares/ares_free_string.c
-  third_party/cares/cares/ares_getenv.c
-  third_party/cares/cares/ares_gethostbyaddr.c
-  third_party/cares/cares/ares_gethostbyname.c
-  third_party/cares/cares/ares_getnameinfo.c
-  third_party/cares/cares/ares_getopt.c
-  third_party/cares/cares/ares_getsock.c
-  third_party/cares/cares/ares_init.c
-  third_party/cares/cares/ares_library_init.c
-  third_party/cares/cares/ares_llist.c
-  third_party/cares/cares/ares_mkquery.c
-  third_party/cares/cares/ares_nowarn.c
-  third_party/cares/cares/ares_options.c
-  third_party/cares/cares/ares_parse_a_reply.c
-  third_party/cares/cares/ares_parse_aaaa_reply.c
-  third_party/cares/cares/ares_parse_mx_reply.c
-  third_party/cares/cares/ares_parse_naptr_reply.c
-  third_party/cares/cares/ares_parse_ns_reply.c
-  third_party/cares/cares/ares_parse_ptr_reply.c
-  third_party/cares/cares/ares_parse_soa_reply.c
-  third_party/cares/cares/ares_parse_srv_reply.c
-  third_party/cares/cares/ares_parse_txt_reply.c
-  third_party/cares/cares/ares_platform.c
-  third_party/cares/cares/ares_process.c
-  third_party/cares/cares/ares_query.c
-  third_party/cares/cares/ares_search.c
-  third_party/cares/cares/ares_send.c
-  third_party/cares/cares/ares_strcasecmp.c
-  third_party/cares/cares/ares_strdup.c
-  third_party/cares/cares/ares_strerror.c
-  third_party/cares/cares/ares_timeout.c
-  third_party/cares/cares/ares_version.c
-  third_party/cares/cares/ares_writev.c
-  third_party/cares/cares/bitncmp.c
-  third_party/cares/cares/inet_net_pton.c
-  third_party/cares/cares/inet_ntop.c
-  third_party/cares/cares/windows_port.c
-)
-
-if(WIN32 AND MSVC)
-  set_target_properties(ares PROPERTIES COMPILE_PDB_NAME "ares"
-    COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}"
-  )
-  if (gRPC_INSTALL)
-    install(FILES ${CMAKE_CURRENT_BINARY_DIR}/ares.pdb
-      DESTINATION ${gRPC_INSTALL_LIBDIR} OPTIONAL
-    )
-  endif()
-endif()
-
-
-target_include_directories(ares
-  PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${ZLIB_INCLUDE_DIR}
-  PRIVATE ${BENCHMARK}/include
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(ares
-  ${_gRPC_SSL_LIBRARIES}
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-)
-
-
-endif (gRPC_BUILD_TESTS)
-if (gRPC_BUILD_TESTS)
-
 add_library(bad_client_test
   test/core/bad_client/bad_client.cc
 )
@@ -4786,7 +4532,7 @@
 target_include_directories(bad_client_test
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4827,7 +4573,7 @@
 target_include_directories(bad_ssl_test_server
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4872,6 +4618,7 @@
   test/core/end2end/tests/filter_call_init_fails.cc
   test/core/end2end/tests/filter_causes_close.cc
   test/core/end2end/tests/filter_latency.cc
+  test/core/end2end/tests/filter_status_code.cc
   test/core/end2end/tests/graceful_server_shutdown.cc
   test/core/end2end/tests/high_initial_seqno.cc
   test/core/end2end/tests/hpack_size.cc
@@ -4928,7 +4675,7 @@
 target_include_directories(end2end_tests
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -4973,6 +4720,7 @@
   test/core/end2end/tests/filter_call_init_fails.cc
   test/core/end2end/tests/filter_causes_close.cc
   test/core/end2end/tests/filter_latency.cc
+  test/core/end2end/tests/filter_status_code.cc
   test/core/end2end/tests/graceful_server_shutdown.cc
   test/core/end2end/tests/high_initial_seqno.cc
   test/core/end2end/tests/hpack_size.cc
@@ -5029,7 +4777,7 @@
 target_include_directories(end2end_nosec_tests
   PUBLIC $<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${ZLIB_INCLUDE_DIR}
   PRIVATE ${BENCHMARK}/include
@@ -5060,7 +4808,7 @@
 target_include_directories(alarm_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5089,7 +4837,7 @@
 target_include_directories(algorithm_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5118,7 +4866,7 @@
 target_include_directories(alloc_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5145,7 +4893,7 @@
 target_include_directories(alpn_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5174,7 +4922,7 @@
 target_include_directories(arena_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5193,35 +4941,6 @@
 endif (gRPC_BUILD_TESTS)
 if (gRPC_BUILD_TESTS)
 
-add_executable(backoff_test
-  test/core/backoff/backoff_test.cc
-)
-
-
-target_include_directories(backoff_test
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(backoff_test
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-  grpc_test_util
-  grpc
-  gpr_test_util
-  gpr
-)
-
-endif (gRPC_BUILD_TESTS)
-if (gRPC_BUILD_TESTS)
-
 add_executable(bad_server_response_test
   test/core/end2end/bad_server_response_test.cc
 )
@@ -5230,7 +4949,7 @@
 target_include_directories(bad_server_response_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5260,7 +4979,7 @@
 target_include_directories(bin_decoder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5287,7 +5006,7 @@
 target_include_directories(bin_encoder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5314,7 +5033,7 @@
 target_include_directories(byte_stream_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5343,7 +5062,7 @@
 target_include_directories(channel_create_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5371,7 +5090,7 @@
 target_include_directories(check_epollexclusive
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5406,7 +5125,7 @@
 target_include_directories(chttp2_hpack_encoder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5435,7 +5154,7 @@
 target_include_directories(chttp2_stream_map_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5464,7 +5183,7 @@
 target_include_directories(chttp2_varint_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5493,7 +5212,7 @@
 target_include_directories(combiner_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5522,7 +5241,7 @@
 target_include_directories(compression_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5551,7 +5270,7 @@
 target_include_directories(concurrent_connectivity_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5580,7 +5299,7 @@
 target_include_directories(connection_refused_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5609,7 +5328,7 @@
 target_include_directories(dns_resolver_connectivity_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5638,7 +5357,7 @@
 target_include_directories(dns_resolver_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5668,7 +5387,7 @@
 target_include_directories(dualstack_socket_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5698,7 +5417,7 @@
 target_include_directories(endpoint_pair_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5727,7 +5446,7 @@
 target_include_directories(error_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5757,7 +5476,7 @@
 target_include_directories(ev_epollsig_linux_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5787,7 +5506,7 @@
 target_include_directories(fake_resolver_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5818,7 +5537,7 @@
 target_include_directories(fake_transport_security_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5848,7 +5567,7 @@
 target_include_directories(fd_conservation_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5879,7 +5598,7 @@
 target_include_directories(fd_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5909,7 +5628,7 @@
 target_include_directories(fling_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5938,7 +5657,7 @@
 target_include_directories(fling_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5968,7 +5687,7 @@
 target_include_directories(fling_stream_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -5999,7 +5718,7 @@
 target_include_directories(fling_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6019,104 +5738,6 @@
 
 endif()
 endif (gRPC_BUILD_TESTS)
-
-add_executable(gen_hpack_tables
-  tools/codegen/core/gen_hpack_tables.c
-)
-
-
-target_include_directories(gen_hpack_tables
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gen_hpack_tables
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-  gpr
-  grpc
-)
-
-
-if (gRPC_INSTALL)
-  install(TARGETS gen_hpack_tables EXPORT gRPCTargets
-    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
-    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
-  )
-endif()
-
-
-add_executable(gen_legal_metadata_characters
-  tools/codegen/core/gen_legal_metadata_characters.c
-)
-
-
-target_include_directories(gen_legal_metadata_characters
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gen_legal_metadata_characters
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-)
-
-
-if (gRPC_INSTALL)
-  install(TARGETS gen_legal_metadata_characters EXPORT gRPCTargets
-    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
-    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
-  )
-endif()
-
-
-add_executable(gen_percent_encoding_tables
-  tools/codegen/core/gen_percent_encoding_tables.c
-)
-
-
-target_include_directories(gen_percent_encoding_tables
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gen_percent_encoding_tables
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-)
-
-
-if (gRPC_INSTALL)
-  install(TARGETS gen_percent_encoding_tables EXPORT gRPCTargets
-    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
-    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
-    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
-  )
-endif()
-
 if (gRPC_BUILD_TESTS)
 if(_gRPC_PLATFORM_LINUX OR _gRPC_PLATFORM_MAC OR _gRPC_PLATFORM_POSIX)
 
@@ -6128,7 +5749,7 @@
 target_include_directories(goaway_server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6158,7 +5779,7 @@
 target_include_directories(gpr_avl_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6185,7 +5806,7 @@
 target_include_directories(gpr_cmdline_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6212,7 +5833,7 @@
 target_include_directories(gpr_cpu_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6239,7 +5860,7 @@
 target_include_directories(gpr_env_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6258,33 +5879,6 @@
 endif (gRPC_BUILD_TESTS)
 if (gRPC_BUILD_TESTS)
 
-add_executable(gpr_histogram_test
-  test/core/support/histogram_test.cc
-)
-
-
-target_include_directories(gpr_histogram_test
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
-  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
-  PRIVATE ${PROTOBUF_ROOT_DIR}/src
-  PRIVATE ${BENCHMARK_ROOT_DIR}/include
-  PRIVATE ${ZLIB_ROOT_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
-  PRIVATE ${CARES_INCLUDE_DIR}
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
-  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
-)
-
-target_link_libraries(gpr_histogram_test
-  ${_gRPC_ALLTARGETS_LIBRARIES}
-  gpr_test_util
-  gpr
-)
-
-endif (gRPC_BUILD_TESTS)
-if (gRPC_BUILD_TESTS)
-
 add_executable(gpr_host_port_test
   test/core/support/host_port_test.cc
 )
@@ -6293,7 +5887,7 @@
 target_include_directories(gpr_host_port_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6320,7 +5914,7 @@
 target_include_directories(gpr_log_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6347,7 +5941,7 @@
 target_include_directories(gpr_manual_constructor_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6374,7 +5968,7 @@
 target_include_directories(gpr_mpscq_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6401,7 +5995,7 @@
 target_include_directories(gpr_spinlock_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6428,7 +6022,7 @@
 target_include_directories(gpr_string_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6455,7 +6049,7 @@
 target_include_directories(gpr_sync_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6482,7 +6076,7 @@
 target_include_directories(gpr_thd_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6509,7 +6103,7 @@
 target_include_directories(gpr_time_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6536,7 +6130,7 @@
 target_include_directories(gpr_tls_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6563,7 +6157,7 @@
 target_include_directories(gpr_useful_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6590,7 +6184,7 @@
 target_include_directories(grpc_auth_context_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6619,7 +6213,7 @@
 target_include_directories(grpc_b64_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6648,7 +6242,7 @@
 target_include_directories(grpc_byte_buffer_reader_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6677,7 +6271,7 @@
 target_include_directories(grpc_channel_args_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6706,7 +6300,7 @@
 target_include_directories(grpc_channel_stack_builder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6735,7 +6329,7 @@
 target_include_directories(grpc_channel_stack_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6764,7 +6358,7 @@
 target_include_directories(grpc_completion_queue_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6793,7 +6387,7 @@
 target_include_directories(grpc_completion_queue_threading_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6821,7 +6415,7 @@
 target_include_directories(grpc_create_jwt
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6857,7 +6451,7 @@
 target_include_directories(grpc_credentials_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6886,7 +6480,7 @@
 target_include_directories(grpc_fetch_oauth2
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6915,7 +6509,7 @@
 target_include_directories(grpc_invalid_channel_args_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6945,7 +6539,7 @@
 target_include_directories(grpc_json_token_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -6975,7 +6569,7 @@
 target_include_directories(grpc_jwt_verifier_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7003,7 +6597,7 @@
 target_include_directories(grpc_print_google_default_creds_token
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7038,7 +6632,7 @@
 target_include_directories(grpc_security_connector_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7067,7 +6661,7 @@
 target_include_directories(grpc_ssl_credentials_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7095,7 +6689,7 @@
 target_include_directories(grpc_verify_jwt
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7131,7 +6725,7 @@
 target_include_directories(handshake_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7164,7 +6758,7 @@
 target_include_directories(handshake_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7197,7 +6791,7 @@
 target_include_directories(handshake_server_with_readahead_handshaker
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7220,6 +6814,33 @@
 endif (gRPC_BUILD_TESTS)
 if (gRPC_BUILD_TESTS)
 
+add_executable(histogram_test
+  test/core/util/histogram_test.cc
+)
+
+
+target_include_directories(histogram_test
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(histogram_test
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  grpc_test_util
+  gpr
+)
+
+endif (gRPC_BUILD_TESTS)
+if (gRPC_BUILD_TESTS)
+
 add_executable(hpack_parser_test
   test/core/transport/chttp2/hpack_parser_test.cc
 )
@@ -7228,7 +6849,7 @@
 target_include_directories(hpack_parser_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7257,7 +6878,7 @@
 target_include_directories(hpack_table_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7286,7 +6907,7 @@
 target_include_directories(http_parser_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7315,7 +6936,7 @@
 target_include_directories(httpcli_format_request_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7345,7 +6966,7 @@
 target_include_directories(httpcli_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7376,7 +6997,7 @@
 target_include_directories(httpscli_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7406,7 +7027,7 @@
 target_include_directories(init_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7435,7 +7056,7 @@
 target_include_directories(invalid_call_argument_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7464,7 +7085,7 @@
 target_include_directories(json_rewrite
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7491,7 +7112,7 @@
 target_include_directories(json_rewrite_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7520,7 +7141,7 @@
 target_include_directories(json_stream_error_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7549,7 +7170,7 @@
 target_include_directories(json_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7578,7 +7199,7 @@
 target_include_directories(lame_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7607,7 +7228,7 @@
 target_include_directories(lb_policies_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7636,7 +7257,7 @@
 target_include_directories(load_file_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7665,7 +7286,7 @@
 target_include_directories(memory_profile_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7694,7 +7315,7 @@
 target_include_directories(memory_profile_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7724,7 +7345,7 @@
 target_include_directories(memory_profile_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7754,7 +7375,7 @@
 target_include_directories(message_compress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7783,7 +7404,7 @@
 target_include_directories(minimal_stack_is_minimal_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7812,7 +7433,7 @@
 target_include_directories(multiple_server_queues_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7841,7 +7462,7 @@
 target_include_directories(murmur_hash_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7868,7 +7489,7 @@
 target_include_directories(no_server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7897,7 +7518,7 @@
 target_include_directories(num_external_connectivity_watchers_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7926,7 +7547,7 @@
 target_include_directories(parse_address_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7955,7 +7576,7 @@
 target_include_directories(percent_encoding_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -7985,7 +7606,7 @@
 target_include_directories(pollset_set_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8016,7 +7637,7 @@
 target_include_directories(resolve_address_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8046,7 +7667,7 @@
 target_include_directories(resolve_address_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8075,7 +7696,7 @@
 target_include_directories(resource_quota_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8104,7 +7725,7 @@
 target_include_directories(secure_channel_create_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8133,7 +7754,7 @@
 target_include_directories(secure_endpoint_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8162,7 +7783,7 @@
 target_include_directories(sequential_connectivity_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8191,7 +7812,7 @@
 target_include_directories(server_chttp2_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8220,7 +7841,7 @@
 target_include_directories(server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8249,7 +7870,7 @@
 target_include_directories(slice_buffer_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8278,7 +7899,7 @@
 target_include_directories(slice_hash_table_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8307,7 +7928,7 @@
 target_include_directories(slice_string_helpers_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8336,7 +7957,7 @@
 target_include_directories(slice_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8365,7 +7986,7 @@
 target_include_directories(sockaddr_resolver_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8394,7 +8015,7 @@
 target_include_directories(sockaddr_utils_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8424,7 +8045,7 @@
 target_include_directories(socket_utils_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8456,7 +8077,7 @@
 target_include_directories(ssl_transport_security_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8485,7 +8106,7 @@
 target_include_directories(status_conversion_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8514,7 +8135,7 @@
 target_include_directories(stream_compression_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8543,7 +8164,7 @@
 target_include_directories(stream_owned_slice_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8573,7 +8194,7 @@
 target_include_directories(tcp_client_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8603,7 +8224,7 @@
 target_include_directories(tcp_client_uv_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8633,7 +8254,7 @@
 target_include_directories(tcp_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8664,7 +8285,7 @@
 target_include_directories(tcp_server_posix_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8694,7 +8315,7 @@
 target_include_directories(tcp_server_uv_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8723,7 +8344,7 @@
 target_include_directories(time_averaged_stats_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8752,7 +8373,7 @@
 target_include_directories(timeout_encoding_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8781,7 +8402,7 @@
 target_include_directories(timer_heap_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8810,7 +8431,7 @@
 target_include_directories(timer_list_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8839,7 +8460,7 @@
 target_include_directories(transport_connectivity_state_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8868,7 +8489,7 @@
 target_include_directories(transport_metadata_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8898,7 +8519,7 @@
 target_include_directories(transport_security_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8929,7 +8550,7 @@
 target_include_directories(udp_server_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8959,7 +8580,7 @@
 target_include_directories(uri_parser_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -8989,7 +8610,7 @@
 target_include_directories(wakeup_fd_cv_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9021,7 +8642,7 @@
 target_include_directories(alarm_cpp_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9061,7 +8682,7 @@
 target_include_directories(async_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9101,7 +8722,7 @@
 target_include_directories(auth_property_iterator_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9131,6 +8752,44 @@
 endif (gRPC_BUILD_TESTS)
 if (gRPC_BUILD_TESTS)
 
+add_executable(backoff_test
+  test/core/backoff/backoff_test.cc
+  third_party/googletest/googletest/src/gtest-all.cc
+  third_party/googletest/googlemock/src/gmock-all.cc
+)
+
+
+target_include_directories(backoff_test
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+  PRIVATE third_party/googletest/googletest/include
+  PRIVATE third_party/googletest/googletest
+  PRIVATE third_party/googletest/googlemock/include
+  PRIVATE third_party/googletest/googlemock
+  PRIVATE ${_gRPC_PROTO_GENS_DIR}
+)
+
+target_link_libraries(backoff_test
+  ${_gRPC_PROTOBUF_LIBRARIES}
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  grpc_test_util
+  grpc
+  gpr_test_util
+  gpr
+  ${_gRPC_GFLAGS_LIBRARIES}
+)
+
+endif (gRPC_BUILD_TESTS)
+if (gRPC_BUILD_TESTS)
+
 add_executable(bdp_estimator_test
   test/core/transport/bdp_estimator_test.cc
   third_party/googletest/googletest/src/gtest-all.cc
@@ -9141,7 +8800,7 @@
 target_include_directories(bdp_estimator_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9182,7 +8841,7 @@
 target_include_directories(bm_arena
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9226,7 +8885,7 @@
 target_include_directories(bm_call_create
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9270,7 +8929,7 @@
 target_include_directories(bm_chttp2_hpack
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9314,7 +8973,7 @@
 target_include_directories(bm_chttp2_transport
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9358,7 +9017,7 @@
 target_include_directories(bm_closure
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9402,7 +9061,7 @@
 target_include_directories(bm_cq
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9446,7 +9105,7 @@
 target_include_directories(bm_cq_multiple_threads
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9490,7 +9149,7 @@
 target_include_directories(bm_error
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9534,7 +9193,7 @@
 target_include_directories(bm_fullstack_streaming_ping_pong
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9578,7 +9237,7 @@
 target_include_directories(bm_fullstack_streaming_pump
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9622,7 +9281,7 @@
 target_include_directories(bm_fullstack_trickle
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9667,7 +9326,7 @@
 target_include_directories(bm_fullstack_unary_ping_pong
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9711,7 +9370,7 @@
 target_include_directories(bm_metadata
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9755,7 +9414,7 @@
 target_include_directories(bm_pollset
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9798,7 +9457,7 @@
 target_include_directories(channel_arguments_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9835,7 +9494,7 @@
 target_include_directories(channel_filter_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9872,7 +9531,7 @@
 target_include_directories(chttp2_settings_timeout_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9910,7 +9569,7 @@
 target_include_directories(cli_call_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9958,7 +9617,7 @@
 target_include_directories(client_channel_stress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -9999,7 +9658,7 @@
 target_include_directories(client_crash_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10040,7 +9699,7 @@
 target_include_directories(client_crash_test_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10080,7 +9739,7 @@
 target_include_directories(client_lb_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10155,7 +9814,7 @@
 target_include_directories(codegen_test_full
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10229,7 +9888,7 @@
 target_include_directories(codegen_test_minimal
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10266,7 +9925,7 @@
 target_include_directories(credentials_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10303,7 +9962,7 @@
 target_include_directories(cxx_byte_buffer_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10342,7 +10001,7 @@
 target_include_directories(cxx_slice_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10381,7 +10040,7 @@
 target_include_directories(cxx_string_ref_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10417,7 +10076,7 @@
 target_include_directories(cxx_time_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10456,7 +10115,7 @@
 target_include_directories(end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10503,7 +10162,7 @@
 target_include_directories(error_details_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10539,7 +10198,7 @@
 target_include_directories(filter_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10579,7 +10238,7 @@
 target_include_directories(generic_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10626,7 +10285,7 @@
 target_include_directories(golden_file_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10663,7 +10322,7 @@
 target_include_directories(grpc_cli
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10700,7 +10359,7 @@
 target_include_directories(grpc_cpp_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10736,7 +10395,7 @@
 target_include_directories(grpc_csharp_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10772,7 +10431,7 @@
 target_include_directories(grpc_node_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10808,7 +10467,7 @@
 target_include_directories(grpc_objective_c_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10844,7 +10503,7 @@
 target_include_directories(grpc_php_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10880,7 +10539,7 @@
 target_include_directories(grpc_python_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10916,7 +10575,7 @@
 target_include_directories(grpc_ruby_plugin
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -10969,7 +10628,7 @@
 target_include_directories(grpc_tool_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11019,7 +10678,7 @@
 target_include_directories(grpclb_api_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11064,7 +10723,7 @@
 target_include_directories(grpclb_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11111,7 +10770,7 @@
 target_include_directories(grpclb_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11151,7 +10810,7 @@
 target_include_directories(h2_ssl_cert_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11190,7 +10849,7 @@
 target_include_directories(health_service_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11230,7 +10889,7 @@
 target_include_directories(http2_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11271,7 +10930,7 @@
 target_include_directories(hybrid_end2end_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11312,7 +10971,7 @@
 target_include_directories(inproc_sync_unary_ping_pong_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11356,7 +11015,7 @@
 target_include_directories(interop_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11400,7 +11059,7 @@
 target_include_directories(interop_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11446,7 +11105,7 @@
 target_include_directories(interop_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11487,7 +11146,7 @@
 target_include_directories(json_run_localhost
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11529,7 +11188,7 @@
 target_include_directories(memory_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11575,7 +11234,7 @@
 target_include_directories(metrics_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11613,7 +11272,7 @@
 target_include_directories(mock_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11653,7 +11312,7 @@
 target_include_directories(noop-benchmark
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11688,7 +11347,7 @@
 target_include_directories(proto_server_reflection_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11730,7 +11389,7 @@
 target_include_directories(proto_utils_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11767,7 +11426,7 @@
 target_include_directories(qps_interarrival_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11810,7 +11469,7 @@
 target_include_directories(qps_json_driver
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11854,7 +11513,7 @@
 target_include_directories(qps_openloop_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11898,7 +11557,7 @@
 target_include_directories(qps_worker
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -11962,7 +11621,7 @@
 target_include_directories(reconnect_interop_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12024,7 +11683,7 @@
 target_include_directories(reconnect_interop_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12057,6 +11716,84 @@
 endif (gRPC_BUILD_TESTS)
 if (gRPC_BUILD_TESTS)
 
+add_executable(ref_counted_ptr_test
+  test/core/support/ref_counted_ptr_test.cc
+  third_party/googletest/googletest/src/gtest-all.cc
+  third_party/googletest/googlemock/src/gmock-all.cc
+)
+
+
+target_include_directories(ref_counted_ptr_test
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+  PRIVATE third_party/googletest/googletest/include
+  PRIVATE third_party/googletest/googletest
+  PRIVATE third_party/googletest/googlemock/include
+  PRIVATE third_party/googletest/googlemock
+  PRIVATE ${_gRPC_PROTO_GENS_DIR}
+)
+
+target_link_libraries(ref_counted_ptr_test
+  ${_gRPC_PROTOBUF_LIBRARIES}
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  grpc_test_util
+  grpc++
+  grpc
+  gpr_test_util
+  gpr
+  ${_gRPC_GFLAGS_LIBRARIES}
+)
+
+endif (gRPC_BUILD_TESTS)
+if (gRPC_BUILD_TESTS)
+
+add_executable(ref_counted_test
+  test/core/support/ref_counted_test.cc
+  third_party/googletest/googletest/src/gtest-all.cc
+  third_party/googletest/googlemock/src/gmock-all.cc
+)
+
+
+target_include_directories(ref_counted_test
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+  PRIVATE third_party/googletest/googletest/include
+  PRIVATE third_party/googletest/googletest
+  PRIVATE third_party/googletest/googlemock/include
+  PRIVATE third_party/googletest/googlemock
+  PRIVATE ${_gRPC_PROTO_GENS_DIR}
+)
+
+target_link_libraries(ref_counted_test
+  ${_gRPC_PROTOBUF_LIBRARIES}
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  grpc_test_util
+  grpc++
+  grpc
+  gpr_test_util
+  gpr
+  ${_gRPC_GFLAGS_LIBRARIES}
+)
+
+endif (gRPC_BUILD_TESTS)
+if (gRPC_BUILD_TESTS)
+
 add_executable(secure_auth_context_test
   test/cpp/common/secure_auth_context_test.cc
   third_party/googletest/googletest/src/gtest-all.cc
@@ -12067,7 +11804,7 @@
 target_include_directories(secure_auth_context_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12108,7 +11845,7 @@
 target_include_directories(secure_sync_unary_ping_pong_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12152,7 +11889,7 @@
 target_include_directories(server_builder_plugin_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12206,7 +11943,7 @@
 target_include_directories(server_builder_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12246,7 +11983,7 @@
 target_include_directories(server_context_test_spouse_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12286,7 +12023,7 @@
 target_include_directories(server_crash_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12327,7 +12064,7 @@
 target_include_directories(server_crash_test_client
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12381,7 +12118,7 @@
 target_include_directories(server_request_call_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12421,7 +12158,7 @@
 target_include_directories(shutdown_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12461,7 +12198,7 @@
 target_include_directories(stats_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12500,7 +12237,7 @@
 target_include_directories(status_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12540,7 +12277,7 @@
 target_include_directories(streaming_throughput_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12612,7 +12349,7 @@
 target_include_directories(stress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12653,7 +12390,7 @@
 target_include_directories(thread_manager_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12691,7 +12428,7 @@
 target_include_directories(thread_stress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12731,7 +12468,7 @@
 target_include_directories(transport_pid_controller_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12772,7 +12509,7 @@
 target_include_directories(writes_per_rpc_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12811,7 +12548,7 @@
 target_include_directories(public_headers_must_be_c89
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12828,6 +12565,104 @@
 )
 
 endif (gRPC_BUILD_TESTS)
+
+add_executable(gen_hpack_tables
+  tools/codegen/core/gen_hpack_tables.cc
+)
+
+
+target_include_directories(gen_hpack_tables
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(gen_hpack_tables
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+  gpr
+  grpc
+)
+
+
+if (gRPC_INSTALL)
+  install(TARGETS gen_hpack_tables EXPORT gRPCTargets
+    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
+    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
+  )
+endif()
+
+
+add_executable(gen_legal_metadata_characters
+  tools/codegen/core/gen_legal_metadata_characters.cc
+)
+
+
+target_include_directories(gen_legal_metadata_characters
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(gen_legal_metadata_characters
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+)
+
+
+if (gRPC_INSTALL)
+  install(TARGETS gen_legal_metadata_characters EXPORT gRPCTargets
+    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
+    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
+  )
+endif()
+
+
+add_executable(gen_percent_encoding_tables
+  tools/codegen/core/gen_percent_encoding_tables.cc
+)
+
+
+target_include_directories(gen_percent_encoding_tables
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
+  PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
+  PRIVATE ${PROTOBUF_ROOT_DIR}/src
+  PRIVATE ${BENCHMARK_ROOT_DIR}/include
+  PRIVATE ${ZLIB_ROOT_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/zlib
+  PRIVATE ${CARES_INCLUDE_DIR}
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/cares/cares
+  PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/third_party/gflags/include
+)
+
+target_link_libraries(gen_percent_encoding_tables
+  ${_gRPC_ALLTARGETS_LIBRARIES}
+)
+
+
+if (gRPC_INSTALL)
+  install(TARGETS gen_percent_encoding_tables EXPORT gRPCTargets
+    RUNTIME DESTINATION ${gRPC_INSTALL_BINDIR}
+    LIBRARY DESTINATION ${gRPC_INSTALL_LIBDIR}
+    ARCHIVE DESTINATION ${gRPC_INSTALL_LIBDIR}
+  )
+endif()
+
 if (gRPC_BUILD_TESTS)
 
 add_executable(badreq_bad_client_test
@@ -12838,7 +12673,7 @@
 target_include_directories(badreq_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12869,7 +12704,7 @@
 target_include_directories(connection_prefix_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12900,7 +12735,7 @@
 target_include_directories(head_of_line_blocking_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12931,7 +12766,7 @@
 target_include_directories(headers_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12962,7 +12797,7 @@
 target_include_directories(initial_settings_frame_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -12993,7 +12828,7 @@
 target_include_directories(server_registered_method_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13024,7 +12859,7 @@
 target_include_directories(simple_request_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13055,7 +12890,7 @@
 target_include_directories(unknown_frame_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13086,7 +12921,7 @@
 target_include_directories(window_overflow_bad_client_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13118,7 +12953,7 @@
 target_include_directories(bad_ssl_cert_server
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13150,7 +12985,7 @@
 target_include_directories(bad_ssl_cert_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13180,7 +13015,7 @@
 target_include_directories(h2_census_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13210,7 +13045,7 @@
 target_include_directories(h2_compress_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13240,7 +13075,7 @@
 target_include_directories(h2_fakesec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13271,7 +13106,7 @@
 target_include_directories(h2_fd_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13302,7 +13137,7 @@
 target_include_directories(h2_full_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13333,7 +13168,7 @@
 target_include_directories(h2_full+pipe_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13364,7 +13199,7 @@
 target_include_directories(h2_full+trace_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13394,7 +13229,7 @@
 target_include_directories(h2_full+workarounds_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13424,7 +13259,7 @@
 target_include_directories(h2_http_proxy_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13454,7 +13289,7 @@
 target_include_directories(h2_load_reporting_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13484,7 +13319,7 @@
 target_include_directories(h2_oauth2_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13514,7 +13349,7 @@
 target_include_directories(h2_proxy_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13544,7 +13379,7 @@
 target_include_directories(h2_sockpair_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13574,7 +13409,7 @@
 target_include_directories(h2_sockpair+trace_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13604,7 +13439,7 @@
 target_include_directories(h2_sockpair_1byte_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13634,7 +13469,7 @@
 target_include_directories(h2_ssl_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13664,7 +13499,7 @@
 target_include_directories(h2_ssl_proxy_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13695,7 +13530,7 @@
 target_include_directories(h2_uds_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13726,7 +13561,7 @@
 target_include_directories(inproc_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13756,7 +13591,7 @@
 target_include_directories(h2_census_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13786,7 +13621,7 @@
 target_include_directories(h2_compress_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13817,7 +13652,7 @@
 target_include_directories(h2_fd_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13848,7 +13683,7 @@
 target_include_directories(h2_full_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13879,7 +13714,7 @@
 target_include_directories(h2_full+pipe_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13910,7 +13745,7 @@
 target_include_directories(h2_full+trace_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13940,7 +13775,7 @@
 target_include_directories(h2_full+workarounds_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -13970,7 +13805,7 @@
 target_include_directories(h2_http_proxy_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14000,7 +13835,7 @@
 target_include_directories(h2_load_reporting_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14030,7 +13865,7 @@
 target_include_directories(h2_proxy_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14060,7 +13895,7 @@
 target_include_directories(h2_sockpair_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14090,7 +13925,7 @@
 target_include_directories(h2_sockpair+trace_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14120,7 +13955,7 @@
 target_include_directories(h2_sockpair_1byte_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14151,7 +13986,7 @@
 target_include_directories(h2_uds_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14182,7 +14017,7 @@
 target_include_directories(inproc_nosec_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14215,7 +14050,7 @@
 target_include_directories(resolver_component_test_unsecure
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14258,7 +14093,7 @@
 target_include_directories(resolver_component_test
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14301,7 +14136,7 @@
 target_include_directories(resolver_component_tests_runner_invoker_unsecure
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14344,7 +14179,7 @@
 target_include_directories(resolver_component_tests_runner_invoker
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14385,7 +14220,7 @@
 target_include_directories(api_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14415,7 +14250,7 @@
 target_include_directories(client_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14445,7 +14280,7 @@
 target_include_directories(hpack_parser_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14475,7 +14310,7 @@
 target_include_directories(http_request_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14505,7 +14340,7 @@
 target_include_directories(http_response_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14535,7 +14370,7 @@
 target_include_directories(json_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14565,7 +14400,7 @@
 target_include_directories(nanopb_fuzzer_response_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14595,7 +14430,7 @@
 target_include_directories(nanopb_fuzzer_serverlist_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14625,7 +14460,7 @@
 target_include_directories(percent_decode_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14655,7 +14490,7 @@
 target_include_directories(percent_encode_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14685,7 +14520,7 @@
 target_include_directories(server_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14715,7 +14550,7 @@
 target_include_directories(ssl_server_fuzzer_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
@@ -14745,7 +14580,7 @@
 target_include_directories(uri_fuzzer_test_one_entry
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}
   PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/include
-  PRIVATE ${BORINGSSL_ROOT_DIR}/include
+  PRIVATE ${_gRPC_SSL_INCLUDE_DIR}
   PRIVATE ${PROTOBUF_ROOT_DIR}/src
   PRIVATE ${BENCHMARK_ROOT_DIR}/include
   PRIVATE ${ZLIB_ROOT_DIR}
diff --git a/INSTALL.md b/INSTALL.md
index a18f569..430fd71 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -94,42 +94,51 @@
 ### Building using CMake (RECOMMENDED)
 
 Builds gRPC C and C++ with boringssl.
-- Install [Git](https://git-scm.com/).
 - Install Visual Studio 2015 or 2017 (Visual C++ compiler will be used).
+- Install [Git](https://git-scm.com/).
 - Install [CMake](https://cmake.org/download/).
-- Install [Active State Perl](https://www.activestate.com/activeperl/) (`choco install activeperl`)
-- Install [Ninja](https://ninja-build.org/) (`choco install ninja`)
-- Install [Go](https://golang.org/dl/) (`choco install golang`)
-- Install [yasm](http://yasm.tortall.net/) and add it to `PATH` (`choco install yasm`)
-- Run these commands in the repo root directory
+- Install [Active State Perl](https://www.activestate.com/activeperl/) (`choco install activeperl`) - *required by boringssl*
+- Install [Go](https://golang.org/dl/) (`choco install golang`) - *required by boringssl*
+- Install [yasm](http://yasm.tortall.net/) and add it to `PATH` (`choco install yasm`) - *required by boringssl*
+- (Optional) Install [Ninja](https://ninja-build.org/) (`choco install ninja`)
 
-#### cmake: Using Ninja (faster build, supports boringssl's assembly optimizations).
-Please note that when using Ninja, you'll still need Visual C++ (part of Visual Studio)
-installed to be able to compile the C/C++ sources.
+#### Clone grpc sources including submodules
+Before building, you need to clone the gRPC github repository and download submodules containing source code 
+for gRPC's dependencies (that's done by the `submodule` command).
 ```
+> @rem You can also do just "git clone --recursive -b THE_BRANCH_YOU_WANT https://github.com/grpc/grpc"
 > powershell git clone --recursive -b ((New-Object System.Net.WebClient).DownloadString(\"https://grpc.io/release\").Trim()) https://github.com/grpc/grpc
 > cd grpc
-> md .build
-> cd .build
-> call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" x64
-> cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release
-> cmake --build .
-> ninja
+> @rem To update submodules at later time, run "git submodule update --init"
 ```
 
-#### cmake: Using Visual Studio 2015 (can only build with OPENSSL_NO_ASM).
+#### cmake: Using Visual Studio 2015 or 2017 (can only build with OPENSSL_NO_ASM).
 When using the "Visual Studio" generator,
 cmake will generate a solution (`grpc.sln`) that contains a VS project for 
 every target defined in `CMakeLists.txt` (+ few extra convenience projects
 added automatically by cmake). After opening the solution with Visual Studio 
 you will be able to browse and build the code as usual.
 ```
+> @rem Run from grpc directory after cloning the repo with --recursive or updating submodules.
 > md .build
 > cd .build
 > cmake .. -G "Visual Studio 14 2015" -DCMAKE_BUILD_TYPE=Release
 > cmake --build .
 ```
 
+#### cmake: Using Ninja (faster build, supports boringssl's assembly optimizations).
+Please note that when using Ninja, you'll still need Visual C++ (part of Visual Studio)
+installed to be able to compile the C/C++ sources.
+```
+> @rem Run from grpc directory after cloning the repo with --recursive or updating submodules.
+> md .build
+> cd .build
+> call "%VS140COMNTOOLS%..\..\VC\vcvarsall.bat" x64
+> cmake .. -GNinja -DCMAKE_BUILD_TYPE=Release
+> cmake --build .
+> ninja
+```
+
 ### msys2 (with mingw)
 
 The Makefile (and source code) should support msys2's mingw32 and mingw64
diff --git a/Makefile b/Makefile
index 3d8f77a..c962a12 100644
--- a/Makefile
+++ b/Makefile
@@ -411,9 +411,9 @@
 Q = @
 endif
 
-CORE_VERSION = 5.0.0
-CPP_VERSION = 1.8.3
-CSHARP_VERSION = 1.8.3
+CORE_VERSION = 5.0.0-dev
+CPP_VERSION = 1.9.0-dev
+CSHARP_VERSION = 1.9.0-dev
 
 CPPFLAGS_NO_ARCH += $(addprefix -I, $(INCLUDES)) $(addprefix -D, $(DEFINES))
 CPPFLAGS += $(CPPFLAGS_NO_ARCH) $(ARCH_FLAGS)
@@ -951,7 +951,6 @@
 alpn_test: $(BINDIR)/$(CONFIG)/alpn_test
 api_fuzzer: $(BINDIR)/$(CONFIG)/api_fuzzer
 arena_test: $(BINDIR)/$(CONFIG)/arena_test
-backoff_test: $(BINDIR)/$(CONFIG)/backoff_test
 bad_server_response_test: $(BINDIR)/$(CONFIG)/bad_server_response_test
 bin_decoder_test: $(BINDIR)/$(CONFIG)/bin_decoder_test
 bin_encoder_test: $(BINDIR)/$(CONFIG)/bin_encoder_test
@@ -980,15 +979,11 @@
 fling_server: $(BINDIR)/$(CONFIG)/fling_server
 fling_stream_test: $(BINDIR)/$(CONFIG)/fling_stream_test
 fling_test: $(BINDIR)/$(CONFIG)/fling_test
-gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables
-gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
-gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
 goaway_server_test: $(BINDIR)/$(CONFIG)/goaway_server_test
 gpr_avl_test: $(BINDIR)/$(CONFIG)/gpr_avl_test
 gpr_cmdline_test: $(BINDIR)/$(CONFIG)/gpr_cmdline_test
 gpr_cpu_test: $(BINDIR)/$(CONFIG)/gpr_cpu_test
 gpr_env_test: $(BINDIR)/$(CONFIG)/gpr_env_test
-gpr_histogram_test: $(BINDIR)/$(CONFIG)/gpr_histogram_test
 gpr_host_port_test: $(BINDIR)/$(CONFIG)/gpr_host_port_test
 gpr_log_test: $(BINDIR)/$(CONFIG)/gpr_log_test
 gpr_manual_constructor_test: $(BINDIR)/$(CONFIG)/gpr_manual_constructor_test
@@ -1021,6 +1016,7 @@
 handshake_client: $(BINDIR)/$(CONFIG)/handshake_client
 handshake_server: $(BINDIR)/$(CONFIG)/handshake_server
 handshake_server_with_readahead_handshaker: $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker
+histogram_test: $(BINDIR)/$(CONFIG)/histogram_test
 hpack_parser_fuzzer_test: $(BINDIR)/$(CONFIG)/hpack_parser_fuzzer_test
 hpack_parser_test: $(BINDIR)/$(CONFIG)/hpack_parser_test
 hpack_table_test: $(BINDIR)/$(CONFIG)/hpack_table_test
@@ -1097,6 +1093,7 @@
 alarm_cpp_test: $(BINDIR)/$(CONFIG)/alarm_cpp_test
 async_end2end_test: $(BINDIR)/$(CONFIG)/async_end2end_test
 auth_property_iterator_test: $(BINDIR)/$(CONFIG)/auth_property_iterator_test
+backoff_test: $(BINDIR)/$(CONFIG)/backoff_test
 bdp_estimator_test: $(BINDIR)/$(CONFIG)/bdp_estimator_test
 bm_arena: $(BINDIR)/$(CONFIG)/bm_arena
 bm_call_create: $(BINDIR)/$(CONFIG)/bm_call_create
@@ -1165,6 +1162,8 @@
 qps_worker: $(BINDIR)/$(CONFIG)/qps_worker
 reconnect_interop_client: $(BINDIR)/$(CONFIG)/reconnect_interop_client
 reconnect_interop_server: $(BINDIR)/$(CONFIG)/reconnect_interop_server
+ref_counted_ptr_test: $(BINDIR)/$(CONFIG)/ref_counted_ptr_test
+ref_counted_test: $(BINDIR)/$(CONFIG)/ref_counted_test
 secure_auth_context_test: $(BINDIR)/$(CONFIG)/secure_auth_context_test
 secure_sync_unary_ping_pong_test: $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test
 server_builder_plugin_test: $(BINDIR)/$(CONFIG)/server_builder_plugin_test
@@ -1183,6 +1182,9 @@
 transport_pid_controller_test: $(BINDIR)/$(CONFIG)/transport_pid_controller_test
 writes_per_rpc_test: $(BINDIR)/$(CONFIG)/writes_per_rpc_test
 public_headers_must_be_c89: $(BINDIR)/$(CONFIG)/public_headers_must_be_c89
+gen_hpack_tables: $(BINDIR)/$(CONFIG)/gen_hpack_tables
+gen_legal_metadata_characters: $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
+gen_percent_encoding_tables: $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
 boringssl_aes_test: $(BINDIR)/$(CONFIG)/boringssl_aes_test
 boringssl_asn1_test: $(BINDIR)/$(CONFIG)/boringssl_asn1_test
 boringssl_base64_test: $(BINDIR)/$(CONFIG)/boringssl_base64_test
@@ -1350,7 +1352,6 @@
   $(BINDIR)/$(CONFIG)/alloc_test \
   $(BINDIR)/$(CONFIG)/alpn_test \
   $(BINDIR)/$(CONFIG)/arena_test \
-  $(BINDIR)/$(CONFIG)/backoff_test \
   $(BINDIR)/$(CONFIG)/bad_server_response_test \
   $(BINDIR)/$(CONFIG)/bin_decoder_test \
   $(BINDIR)/$(CONFIG)/bin_encoder_test \
@@ -1382,7 +1383,6 @@
   $(BINDIR)/$(CONFIG)/gpr_cmdline_test \
   $(BINDIR)/$(CONFIG)/gpr_cpu_test \
   $(BINDIR)/$(CONFIG)/gpr_env_test \
-  $(BINDIR)/$(CONFIG)/gpr_histogram_test \
   $(BINDIR)/$(CONFIG)/gpr_host_port_test \
   $(BINDIR)/$(CONFIG)/gpr_log_test \
   $(BINDIR)/$(CONFIG)/gpr_manual_constructor_test \
@@ -1412,6 +1412,7 @@
   $(BINDIR)/$(CONFIG)/handshake_client \
   $(BINDIR)/$(CONFIG)/handshake_server \
   $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker \
+  $(BINDIR)/$(CONFIG)/histogram_test \
   $(BINDIR)/$(CONFIG)/hpack_parser_test \
   $(BINDIR)/$(CONFIG)/hpack_table_test \
   $(BINDIR)/$(CONFIG)/http_parser_test \
@@ -1539,6 +1540,7 @@
   $(BINDIR)/$(CONFIG)/alarm_cpp_test \
   $(BINDIR)/$(CONFIG)/async_end2end_test \
   $(BINDIR)/$(CONFIG)/auth_property_iterator_test \
+  $(BINDIR)/$(CONFIG)/backoff_test \
   $(BINDIR)/$(CONFIG)/bdp_estimator_test \
   $(BINDIR)/$(CONFIG)/bm_arena \
   $(BINDIR)/$(CONFIG)/bm_call_create \
@@ -1600,6 +1602,8 @@
   $(BINDIR)/$(CONFIG)/qps_worker \
   $(BINDIR)/$(CONFIG)/reconnect_interop_client \
   $(BINDIR)/$(CONFIG)/reconnect_interop_server \
+  $(BINDIR)/$(CONFIG)/ref_counted_ptr_test \
+  $(BINDIR)/$(CONFIG)/ref_counted_test \
   $(BINDIR)/$(CONFIG)/secure_auth_context_test \
   $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
   $(BINDIR)/$(CONFIG)/server_builder_plugin_test \
@@ -1665,6 +1669,7 @@
   $(BINDIR)/$(CONFIG)/alarm_cpp_test \
   $(BINDIR)/$(CONFIG)/async_end2end_test \
   $(BINDIR)/$(CONFIG)/auth_property_iterator_test \
+  $(BINDIR)/$(CONFIG)/backoff_test \
   $(BINDIR)/$(CONFIG)/bdp_estimator_test \
   $(BINDIR)/$(CONFIG)/bm_arena \
   $(BINDIR)/$(CONFIG)/bm_call_create \
@@ -1726,6 +1731,8 @@
   $(BINDIR)/$(CONFIG)/qps_worker \
   $(BINDIR)/$(CONFIG)/reconnect_interop_client \
   $(BINDIR)/$(CONFIG)/reconnect_interop_server \
+  $(BINDIR)/$(CONFIG)/ref_counted_ptr_test \
+  $(BINDIR)/$(CONFIG)/ref_counted_test \
   $(BINDIR)/$(CONFIG)/secure_auth_context_test \
   $(BINDIR)/$(CONFIG)/secure_sync_unary_ping_pong_test \
   $(BINDIR)/$(CONFIG)/server_builder_plugin_test \
@@ -1766,8 +1773,6 @@
 	$(Q) $(BINDIR)/$(CONFIG)/alpn_test || ( echo test alpn_test failed ; exit 1 )
 	$(E) "[RUN]     Testing arena_test"
 	$(Q) $(BINDIR)/$(CONFIG)/arena_test || ( echo test arena_test failed ; exit 1 )
-	$(E) "[RUN]     Testing backoff_test"
-	$(Q) $(BINDIR)/$(CONFIG)/backoff_test || ( echo test backoff_test failed ; exit 1 )
 	$(E) "[RUN]     Testing bad_server_response_test"
 	$(Q) $(BINDIR)/$(CONFIG)/bad_server_response_test || ( echo test bad_server_response_test failed ; exit 1 )
 	$(E) "[RUN]     Testing bin_decoder_test"
@@ -1826,8 +1831,6 @@
 	$(Q) $(BINDIR)/$(CONFIG)/gpr_cpu_test || ( echo test gpr_cpu_test failed ; exit 1 )
 	$(E) "[RUN]     Testing gpr_env_test"
 	$(Q) $(BINDIR)/$(CONFIG)/gpr_env_test || ( echo test gpr_env_test failed ; exit 1 )
-	$(E) "[RUN]     Testing gpr_histogram_test"
-	$(Q) $(BINDIR)/$(CONFIG)/gpr_histogram_test || ( echo test gpr_histogram_test failed ; exit 1 )
 	$(E) "[RUN]     Testing gpr_host_port_test"
 	$(Q) $(BINDIR)/$(CONFIG)/gpr_host_port_test || ( echo test gpr_host_port_test failed ; exit 1 )
 	$(E) "[RUN]     Testing gpr_log_test"
@@ -1884,6 +1887,8 @@
 	$(Q) $(BINDIR)/$(CONFIG)/handshake_server || ( echo test handshake_server failed ; exit 1 )
 	$(E) "[RUN]     Testing handshake_server_with_readahead_handshaker"
 	$(Q) $(BINDIR)/$(CONFIG)/handshake_server_with_readahead_handshaker || ( echo test handshake_server_with_readahead_handshaker failed ; exit 1 )
+	$(E) "[RUN]     Testing histogram_test"
+	$(Q) $(BINDIR)/$(CONFIG)/histogram_test || ( echo test histogram_test failed ; exit 1 )
 	$(E) "[RUN]     Testing hpack_parser_test"
 	$(Q) $(BINDIR)/$(CONFIG)/hpack_parser_test || ( echo test hpack_parser_test failed ; exit 1 )
 	$(E) "[RUN]     Testing hpack_table_test"
@@ -2032,6 +2037,8 @@
 	$(Q) $(BINDIR)/$(CONFIG)/async_end2end_test || ( echo test async_end2end_test failed ; exit 1 )
 	$(E) "[RUN]     Testing auth_property_iterator_test"
 	$(Q) $(BINDIR)/$(CONFIG)/auth_property_iterator_test || ( echo test auth_property_iterator_test failed ; exit 1 )
+	$(E) "[RUN]     Testing backoff_test"
+	$(Q) $(BINDIR)/$(CONFIG)/backoff_test || ( echo test backoff_test failed ; exit 1 )
 	$(E) "[RUN]     Testing bdp_estimator_test"
 	$(Q) $(BINDIR)/$(CONFIG)/bdp_estimator_test || ( echo test bdp_estimator_test failed ; exit 1 )
 	$(E) "[RUN]     Testing bm_arena"
@@ -2128,6 +2135,10 @@
 	$(Q) $(BINDIR)/$(CONFIG)/proto_utils_test || ( echo test proto_utils_test failed ; exit 1 )
 	$(E) "[RUN]     Testing qps_openloop_test"
 	$(Q) $(BINDIR)/$(CONFIG)/qps_openloop_test || ( echo test qps_openloop_test failed ; exit 1 )
+	$(E) "[RUN]     Testing ref_counted_ptr_test"
+	$(Q) $(BINDIR)/$(CONFIG)/ref_counted_ptr_test || ( echo test ref_counted_ptr_test failed ; exit 1 )
+	$(E) "[RUN]     Testing ref_counted_test"
+	$(Q) $(BINDIR)/$(CONFIG)/ref_counted_test || ( echo test ref_counted_test failed ; exit 1 )
 	$(E) "[RUN]     Testing secure_auth_context_test"
 	$(Q) $(BINDIR)/$(CONFIG)/secure_auth_context_test || ( echo test secure_auth_context_test failed ; exit 1 )
 	$(E) "[RUN]     Testing secure_sync_unary_ping_pong_test"
@@ -2177,7 +2188,7 @@
 tools: tools_c tools_cxx
 
 
-tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/check_epollexclusive $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt
+tools_c: privatelibs_c $(BINDIR)/$(CONFIG)/check_epollexclusive $(BINDIR)/$(CONFIG)/grpc_create_jwt $(BINDIR)/$(CONFIG)/grpc_print_google_default_creds_token $(BINDIR)/$(CONFIG)/grpc_verify_jwt $(BINDIR)/$(CONFIG)/gen_hpack_tables $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
 
 tools_cxx: privatelibs_cxx
 
@@ -2786,14 +2797,14 @@
 install-pkg-config_c: pc_c pc_c_unsecure
 	$(E) "[INSTALL] Installing C pkg-config files"
 	$(Q) $(INSTALL) -d $(prefix)/lib/pkgconfig
-	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(prefix)/lib/pkgconfig/grpc.pc
-	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(prefix)/lib/pkgconfig/grpc_unsecure.pc
+	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(prefix)/lib/pkgconfig/grpc.pc
+	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(prefix)/lib/pkgconfig/grpc_unsecure.pc
 
 install-pkg-config_cxx: pc_cxx pc_cxx_unsecure
 	$(E) "[INSTALL] Installing C++ pkg-config files"
 	$(Q) $(INSTALL) -d $(prefix)/lib/pkgconfig
-	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc $(prefix)/lib/pkgconfig/grpc++.pc
-	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc $(prefix)/lib/pkgconfig/grpc++_unsecure.pc
+	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc $(prefix)/lib/pkgconfig/grpc++.pc
+	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc $(prefix)/lib/pkgconfig/grpc++_unsecure.pc
 
 install-certs: etc/roots.pem
 	$(E) "[INSTALL] Installing root certificates"
@@ -2824,7 +2835,6 @@
     src/core/lib/support/env_posix.cc \
     src/core/lib/support/env_windows.cc \
     src/core/lib/support/fork.cc \
-    src/core/lib/support/histogram.cc \
     src/core/lib/support/host_port.cc \
     src/core/lib/support/log.cc \
     src/core/lib/support/log_android.cc \
@@ -2864,7 +2874,6 @@
     include/grpc/support/avl.h \
     include/grpc/support/cmdline.h \
     include/grpc/support/cpu.h \
-    include/grpc/support/histogram.h \
     include/grpc/support/host_port.h \
     include/grpc/support/log.h \
     include/grpc/support/log_windows.h \
@@ -3618,11 +3627,13 @@
     test/core/iomgr/endpoint_tests.cc \
     test/core/util/debugger_macros.cc \
     test/core/util/grpc_profiler.cc \
+    test/core/util/histogram.cc \
     test/core/util/memory_counters.cc \
     test/core/util/mock_endpoint.cc \
     test/core/util/parse_hexstring.cc \
     test/core/util/passthru_endpoint.cc \
     test/core/util/port.cc \
+    test/core/util/port_isolated_runtime_environment.cc \
     test/core/util/port_server_client.cc \
     test/core/util/slice_splitter.cc \
     test/core/util/tracer_util.cc \
@@ -3877,11 +3888,13 @@
     test/core/iomgr/endpoint_tests.cc \
     test/core/util/debugger_macros.cc \
     test/core/util/grpc_profiler.cc \
+    test/core/util/histogram.cc \
     test/core/util/memory_counters.cc \
     test/core/util/mock_endpoint.cc \
     test/core/util/parse_hexstring.cc \
     test/core/util/passthru_endpoint.cc \
     test/core/util/port.cc \
+    test/core/util/port_isolated_runtime_environment.cc \
     test/core/util/port_server_client.cc \
     test/core/util/slice_splitter.cc \
     test/core/util/tracer_util.cc \
@@ -4586,7 +4599,6 @@
     include/grpc/support/avl.h \
     include/grpc/support/cmdline.h \
     include/grpc/support/cpu.h \
-    include/grpc/support/histogram.h \
     include/grpc/support/host_port.h \
     include/grpc/support/log.h \
     include/grpc/support/log_windows.h \
@@ -5074,7 +5086,6 @@
     include/grpc/support/avl.h \
     include/grpc/support/cmdline.h \
     include/grpc/support/cpu.h \
-    include/grpc/support/histogram.h \
     include/grpc/support/host_port.h \
     include/grpc/support/log.h \
     include/grpc/support/log_windows.h \
@@ -5796,7 +5807,6 @@
     include/grpc/support/avl.h \
     include/grpc/support/cmdline.h \
     include/grpc/support/cpu.h \
-    include/grpc/support/histogram.h \
     include/grpc/support/host_port.h \
     include/grpc/support/log.h \
     include/grpc/support/log_windows.h \
@@ -8547,6 +8557,7 @@
     test/core/end2end/tests/filter_call_init_fails.cc \
     test/core/end2end/tests/filter_causes_close.cc \
     test/core/end2end/tests/filter_latency.cc \
+    test/core/end2end/tests/filter_status_code.cc \
     test/core/end2end/tests/graceful_server_shutdown.cc \
     test/core/end2end/tests/high_initial_seqno.cc \
     test/core/end2end/tests/hpack_size.cc \
@@ -8645,6 +8656,7 @@
     test/core/end2end/tests/filter_call_init_fails.cc \
     test/core/end2end/tests/filter_causes_close.cc \
     test/core/end2end/tests/filter_latency.cc \
+    test/core/end2end/tests/filter_status_code.cc \
     test/core/end2end/tests/graceful_server_shutdown.cc \
     test/core/end2end/tests/high_initial_seqno.cc \
     test/core/end2end/tests/hpack_size.cc \
@@ -8903,38 +8915,6 @@
 endif
 
 
-BACKOFF_TEST_SRC = \
-    test/core/backoff/backoff_test.cc \
-
-BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BACKOFF_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/backoff_test: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/backoff_test: $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/backoff_test
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/core/backoff/backoff_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-
-deps_backoff_test: $(BACKOFF_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(BACKOFF_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
 BAD_SERVER_RESPONSE_TEST_SRC = \
     test/core/end2end/bad_server_response_test.cc \
 
@@ -9834,102 +9814,6 @@
 endif
 
 
-GEN_HPACK_TABLES_SRC = \
-    tools/codegen/core/gen_hpack_tables.c \
-
-GEN_HPACK_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_HPACK_TABLES_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gen_hpack_tables: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_hpack_tables
-
-endif
-
-$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_hpack_tables.o:  $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
-
-deps_gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GEN_HPACK_TABLES_OBJS:.o=.dep)
-endif
-endif
-
-
-GEN_LEGAL_METADATA_CHARACTERS_SRC = \
-    tools/codegen/core/gen_legal_metadata_characters.c \
-
-GEN_LEGAL_METADATA_CHARACTERS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_LEGAL_METADATA_CHARACTERS_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS)
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GEN_LEGAL_METADATA_CHARACTERS_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
-
-endif
-
-$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_legal_metadata_characters.o: 
-
-deps_gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
-endif
-endif
-
-
-GEN_PERCENT_ENCODING_TABLES_SRC = \
-    tools/codegen/core/gen_percent_encoding_tables.c \
-
-GEN_PERCENT_ENCODING_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_PERCENT_ENCODING_TABLES_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS)
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GEN_PERCENT_ENCODING_TABLES_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
-
-endif
-
-$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_percent_encoding_tables.o: 
-
-deps_gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
-endif
-endif
-
-
 GOAWAY_SERVER_TEST_SRC = \
     test/core/end2end/goaway_server_test.cc \
 
@@ -10090,38 +9974,6 @@
 endif
 
 
-GPR_HISTOGRAM_TEST_SRC = \
-    test/core/support/histogram_test.cc \
-
-GPR_HISTOGRAM_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GPR_HISTOGRAM_TEST_SRC))))
-ifeq ($(NO_SECURE),true)
-
-# You can't build secure targets if you don't have OpenSSL.
-
-$(BINDIR)/$(CONFIG)/gpr_histogram_test: openssl_dep_error
-
-else
-
-
-
-$(BINDIR)/$(CONFIG)/gpr_histogram_test: $(GPR_HISTOGRAM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-	$(E) "[LD]      Linking $@"
-	$(Q) mkdir -p `dirname $@`
-	$(Q) $(LD) $(LDFLAGS) $(GPR_HISTOGRAM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gpr_histogram_test
-
-endif
-
-$(OBJDIR)/$(CONFIG)/test/core/support/histogram_test.o:  $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
-
-deps_gpr_histogram_test: $(GPR_HISTOGRAM_TEST_OBJS:.o=.dep)
-
-ifneq ($(NO_SECURE),true)
-ifneq ($(NO_DEPS),true)
--include $(GPR_HISTOGRAM_TEST_OBJS:.o=.dep)
-endif
-endif
-
-
 GPR_HOST_PORT_TEST_SRC = \
     test/core/support/host_port_test.cc \
 
@@ -11152,6 +11004,38 @@
 endif
 
 
+HISTOGRAM_TEST_SRC = \
+    test/core/util/histogram_test.cc \
+
+HISTOGRAM_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(HISTOGRAM_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/histogram_test: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/histogram_test: $(HISTOGRAM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(HISTOGRAM_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/histogram_test
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/util/histogram_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_histogram_test: $(HISTOGRAM_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(HISTOGRAM_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 HPACK_PARSER_FUZZER_TEST_SRC = \
     test/core/transport/chttp2/hpack_parser_fuzzer_test.cc \
 
@@ -13620,6 +13504,49 @@
 endif
 
 
+BACKOFF_TEST_SRC = \
+    test/core/backoff/backoff_test.cc \
+
+BACKOFF_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(BACKOFF_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/backoff_test: openssl_dep_error
+
+else
+
+
+
+
+ifeq ($(NO_PROTOBUF),true)
+
+# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
+
+$(BINDIR)/$(CONFIG)/backoff_test: protobuf_dep_error
+
+else
+
+$(BINDIR)/$(CONFIG)/backoff_test: $(PROTOBUF_DEP) $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(BACKOFF_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/backoff_test
+
+endif
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/backoff/backoff_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_backoff_test: $(BACKOFF_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(BACKOFF_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 BDP_ESTIMATOR_TEST_SRC = \
     test/core/transport/bdp_estimator_test.cc \
 
@@ -16530,6 +16457,92 @@
 $(OBJDIR)/$(CONFIG)/test/cpp/interop/reconnect_interop_server.o: $(GENDIR)/src/proto/grpc/testing/empty.pb.cc $(GENDIR)/src/proto/grpc/testing/empty.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.pb.cc $(GENDIR)/src/proto/grpc/testing/messages.grpc.pb.cc $(GENDIR)/src/proto/grpc/testing/test.pb.cc $(GENDIR)/src/proto/grpc/testing/test.grpc.pb.cc
 
 
+REF_COUNTED_PTR_TEST_SRC = \
+    test/core/support/ref_counted_ptr_test.cc \
+
+REF_COUNTED_PTR_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(REF_COUNTED_PTR_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/ref_counted_ptr_test: openssl_dep_error
+
+else
+
+
+
+
+ifeq ($(NO_PROTOBUF),true)
+
+# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
+
+$(BINDIR)/$(CONFIG)/ref_counted_ptr_test: protobuf_dep_error
+
+else
+
+$(BINDIR)/$(CONFIG)/ref_counted_ptr_test: $(PROTOBUF_DEP) $(REF_COUNTED_PTR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(REF_COUNTED_PTR_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/ref_counted_ptr_test
+
+endif
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/support/ref_counted_ptr_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_ref_counted_ptr_test: $(REF_COUNTED_PTR_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(REF_COUNTED_PTR_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
+REF_COUNTED_TEST_SRC = \
+    test/core/support/ref_counted_test.cc \
+
+REF_COUNTED_TEST_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(REF_COUNTED_TEST_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/ref_counted_test: openssl_dep_error
+
+else
+
+
+
+
+ifeq ($(NO_PROTOBUF),true)
+
+# You can't build the protoc plugins or protobuf-enabled targets if you don't have protobuf 3.0.0+.
+
+$(BINDIR)/$(CONFIG)/ref_counted_test: protobuf_dep_error
+
+else
+
+$(BINDIR)/$(CONFIG)/ref_counted_test: $(PROTOBUF_DEP) $(REF_COUNTED_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LDXX) $(LDFLAGS) $(REF_COUNTED_TEST_OBJS) $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a $(LDLIBSXX) $(LDLIBS_PROTOBUF) $(LDLIBS) $(LDLIBS_SECURE) $(GTEST_LIB) -o $(BINDIR)/$(CONFIG)/ref_counted_test
+
+endif
+
+endif
+
+$(OBJDIR)/$(CONFIG)/test/core/support/ref_counted_test.o:  $(LIBDIR)/$(CONFIG)/libgrpc_test_util.a $(LIBDIR)/$(CONFIG)/libgrpc++.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LIBDIR)/$(CONFIG)/libgpr_test_util.a $(LIBDIR)/$(CONFIG)/libgpr.a
+
+deps_ref_counted_test: $(REF_COUNTED_TEST_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(REF_COUNTED_TEST_OBJS:.o=.dep)
+endif
+endif
+
+
 SECURE_AUTH_CONTEXT_TEST_SRC = \
     test/cpp/common/secure_auth_context_test.cc \
 
@@ -17336,6 +17349,102 @@
 endif
 
 
+GEN_HPACK_TABLES_SRC = \
+    tools/codegen/core/gen_hpack_tables.cc \
+
+GEN_HPACK_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_HPACK_TABLES_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/gen_hpack_tables: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GEN_HPACK_TABLES_OBJS) $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_hpack_tables
+
+endif
+
+$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_hpack_tables.o:  $(LIBDIR)/$(CONFIG)/libgpr.a $(LIBDIR)/$(CONFIG)/libgrpc.a
+
+deps_gen_hpack_tables: $(GEN_HPACK_TABLES_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GEN_HPACK_TABLES_OBJS:.o=.dep)
+endif
+endif
+
+
+GEN_LEGAL_METADATA_CHARACTERS_SRC = \
+    tools/codegen/core/gen_legal_metadata_characters.cc \
+
+GEN_LEGAL_METADATA_CHARACTERS_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_LEGAL_METADATA_CHARACTERS_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS)
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GEN_LEGAL_METADATA_CHARACTERS_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_legal_metadata_characters
+
+endif
+
+$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_legal_metadata_characters.o: 
+
+deps_gen_legal_metadata_characters: $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GEN_LEGAL_METADATA_CHARACTERS_OBJS:.o=.dep)
+endif
+endif
+
+
+GEN_PERCENT_ENCODING_TABLES_SRC = \
+    tools/codegen/core/gen_percent_encoding_tables.cc \
+
+GEN_PERCENT_ENCODING_TABLES_OBJS = $(addprefix $(OBJDIR)/$(CONFIG)/, $(addsuffix .o, $(basename $(GEN_PERCENT_ENCODING_TABLES_SRC))))
+ifeq ($(NO_SECURE),true)
+
+# You can't build secure targets if you don't have OpenSSL.
+
+$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: openssl_dep_error
+
+else
+
+
+
+$(BINDIR)/$(CONFIG)/gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS)
+	$(E) "[LD]      Linking $@"
+	$(Q) mkdir -p `dirname $@`
+	$(Q) $(LD) $(LDFLAGS) $(GEN_PERCENT_ENCODING_TABLES_OBJS) $(LDLIBS) $(LDLIBS_SECURE) -o $(BINDIR)/$(CONFIG)/gen_percent_encoding_tables
+
+endif
+
+$(OBJDIR)/$(CONFIG)/tools/codegen/core/gen_percent_encoding_tables.o: 
+
+deps_gen_percent_encoding_tables: $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
+
+ifneq ($(NO_SECURE),true)
+ifneq ($(NO_DEPS),true)
+-include $(GEN_PERCENT_ENCODING_TABLES_OBJS:.o=.dep)
+endif
+endif
+
+
 
 # boringssl needs an override to ensure that it does not include
 # system openssl headers regardless of other configuration
diff --git a/README.md b/README.md
index 1550bb5..fc72c7c 100644
--- a/README.md
+++ b/README.md
@@ -5,7 +5,8 @@
 
 [![Join the chat at https://gitter.im/grpc/grpc](https://badges.gitter.im/grpc/grpc.svg)](https://gitter.im/grpc/grpc?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
 
-Copyright 2015 Google Inc.
+Copyright 2015
+[The gRPC Authors](https://github.com/grpc/grpc/blob/master/AUTHORS)
 
 # Documentation
 
@@ -25,15 +26,15 @@
 
 Libraries in different languages may be in different states of development. We are seeking contributions for all of these libraries.
 
-| Language                | Source                              | Status  |
-|-------------------------|-------------------------------------|---------|
-| Shared C [core library] | [src/core](src/core)                | 1.8     |
-| C++                     | [src/cpp](src/cpp)                  | 1.8     |
-| Ruby                    | [src/ruby](src/ruby)                | 1.8     |
-| Python                  | [src/python](src/python)            | 1.8     |
-| PHP                     | [src/php](src/php)                  | 1.8     |
-| C#                      | [src/csharp](src/csharp)            | 1.8     |
-| Objective-C             | [src/objective-c](src/objective-c)  | 1.8     |
+| Language                | Source                              |
+|-------------------------|-------------------------------------|
+| Shared C [core library] | [src/core](src/core)                |
+| C++                     | [src/cpp](src/cpp)                  |
+| Ruby                    | [src/ruby](src/ruby)                |
+| Python                  | [src/python](src/python)            |
+| PHP                     | [src/php](src/php)                  |
+| C#                      | [src/csharp](src/csharp)            |
+| Objective-C             | [src/objective-c](src/objective-c)  |
 
 Java source code is in the [grpc-java](http://github.com/grpc/grpc-java)
 repository. Go source code is in the
diff --git a/bazel/grpc_build_system.bzl b/bazel/grpc_build_system.bzl
index 60ae9d6..d146ca9 100644
--- a/bazel/grpc_build_system.bzl
+++ b/bazel/grpc_build_system.bzl
@@ -23,6 +23,9 @@
 # each change must be ported from one to the other.
 #
 
+# The set of pollers to test against if a test exercises polling
+POLLERS = ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv']
+
 def _get_external_deps(external_deps):
   ret = []
   for dep in external_deps:
@@ -55,6 +58,8 @@
     name = name,
     srcs = srcs,
     defines = select({"//:grpc_no_ares": ["GRPC_ARES=0"],
+                      "//conditions:default": [],}) +
+              select({"//:remote_execution":  ["GRPC_PORT_ISOLATED_RUNTIME=1"],
                       "//conditions:default": [],}),
     hdrs = _maybe_update_cc_library_hdrs(hdrs + public_hdrs),
     deps = deps + _get_external_deps(external_deps),
@@ -89,19 +94,35 @@
     generate_mock = generate_mock,
   )
 
-def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++"):
+def grpc_cc_test(name, srcs = [], deps = [], external_deps = [], args = [], data = [], uses_polling = True, language = "C++"):
   copts = []
   if language.upper() == "C":
     copts = ["-std=c99"]
-  native.cc_test(
-    name = name,
-    srcs = srcs,
-    args = args,
-    data = data,
-    deps = deps + _get_external_deps(external_deps),
-    copts = copts,
-    linkopts = ["-pthread"],
-  )
+  args = {
+    'name': name,
+    'srcs': srcs,
+    'args': args,
+    'data': data,
+    'deps': deps + _get_external_deps(external_deps),
+    'copts': copts,
+    'linkopts': ["-pthread"],
+  }
+  if uses_polling:
+    native.cc_test(testonly=True, tags=['manual'], **args)
+    for poller in POLLERS:
+      native.sh_test(
+        name = name + '@poller=' + poller,
+        data = [name],
+        srcs = [
+          '//test/core/util:run_with_poller_sh',
+        ],
+        args = [
+          poller,
+          '$(location %s)' % name
+        ] + args['args'],
+      )
+  else:
+    native.cc_test(**args)
 
 def grpc_cc_binary(name, srcs = [], deps = [], external_deps = [], args = [], data = [], language = "C++", testonly = False, linkshared = False, linkopts = []):
   copts = []
diff --git a/bazel/grpc_deps.bzl b/bazel/grpc_deps.bzl
index fcbef49..e465312 100644
--- a/bazel/grpc_deps.bzl
+++ b/bazel/grpc_deps.bzl
@@ -116,3 +116,14 @@
             strip_prefix = "abseil-cpp-cc4bed2d74f7c8717e31f9579214ab52a9c9c610",
             url = "https://github.com/abseil/abseil-cpp/archive/cc4bed2d74f7c8717e31f9579214ab52a9c9c610.tar.gz",
         )
+
+    if "com_github_bazelbuild_bazeltoolchains" not in native.existing_rules():
+        native.http_archive(
+            name = "com_github_bazelbuild_bazeltoolchains",
+            strip_prefix = "bazel-toolchains-af4681c3d19f063f090222ec3d04108c4e0ca255",
+            urls = [
+                "https://mirror.bazel.build/github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz",
+                "https://github.com/bazelbuild/bazel-toolchains/archive/af4681c3d19f063f090222ec3d04108c4e0ca255.tar.gz",
+            ],
+            sha256 = "d58bb2d6c8603f600d522b6104d6192a65339aa26cbba9f11ff5c4b36dedb928",
+        )
diff --git a/build.yaml b/build.yaml
index 5cce4e6..42d7245 100644
--- a/build.yaml
+++ b/build.yaml
@@ -12,9 +12,9 @@
   '#08': Use "-preN" suffixes to identify pre-release versions
   '#09': Per-language overrides are possible with (eg) ruby_version tag here
   '#10': See the expand_version.py for all the quirks here
-  core_version: 5.0.0
-  g_stands_for: generous
-  version: 1.8.3
+  core_version: 5.0.0-dev
+  g_stands_for: glossy
+  version: 1.9.0-dev
 filegroups:
 - name: census
   public_headers:
@@ -41,7 +41,6 @@
   - src/core/lib/support/env_posix.cc
   - src/core/lib/support/env_windows.cc
   - src/core/lib/support/fork.cc
-  - src/core/lib/support/histogram.cc
   - src/core/lib/support/host_port.cc
   - src/core/lib/support/log.cc
   - src/core/lib/support/log_android.cc
@@ -83,7 +82,6 @@
   - include/grpc/support/avl.h
   - include/grpc/support/cmdline.h
   - include/grpc/support/cpu.h
-  - include/grpc/support/histogram.h
   - include/grpc/support/host_port.h
   - include/grpc/support/log.h
   - include/grpc/support/log_windows.h
@@ -398,6 +396,9 @@
   - src/core/lib/slice/slice_hash_table.h
   - src/core/lib/slice/slice_internal.h
   - src/core/lib/slice/slice_string_helpers.h
+  - src/core/lib/support/debug_location.h
+  - src/core/lib/support/ref_counted.h
+  - src/core/lib/support/ref_counted_ptr.h
   - src/core/lib/surface/alarm_internal.h
   - src/core/lib/surface/api_trace.h
   - src/core/lib/surface/call.h
@@ -711,6 +712,7 @@
   - test/core/iomgr/endpoint_tests.h
   - test/core/util/debugger_macros.h
   - test/core/util/grpc_profiler.h
+  - test/core/util/histogram.h
   - test/core/util/memory_counters.h
   - test/core/util/mock_endpoint.h
   - test/core/util/parse_hexstring.h
@@ -728,11 +730,13 @@
   - test/core/iomgr/endpoint_tests.cc
   - test/core/util/debugger_macros.cc
   - test/core/util/grpc_profiler.cc
+  - test/core/util/histogram.cc
   - test/core/util/memory_counters.cc
   - test/core/util/mock_endpoint.cc
   - test/core/util/parse_hexstring.cc
   - test/core/util/passthru_endpoint.cc
   - test/core/util/port.cc
+  - test/core/util/port_isolated_runtime_environment.cc
   - test/core/util/port_server_client.cc
   - test/core/util/slice_splitter.cc
   - test/core/util/tracer_util.cc
@@ -1765,17 +1769,6 @@
   - gpr_test_util
   - gpr
   uses_polling: false
-- name: backoff_test
-  build: test
-  language: c
-  src:
-  - test/core/backoff/backoff_test.cc
-  deps:
-  - grpc_test_util
-  - grpc
-  - gpr_test_util
-  - gpr
-  uses_polling: false
 - name: bad_server_response_test
   build: test
   language: c
@@ -2117,28 +2110,6 @@
   - mac
   - linux
   - posix
-- name: gen_hpack_tables
-  build: tool
-  language: c
-  src:
-  - tools/codegen/core/gen_hpack_tables.c
-  deps:
-  - gpr
-  - grpc
-  uses_polling: false
-- name: gen_legal_metadata_characters
-  build: tool
-  language: c
-  src:
-  - tools/codegen/core/gen_legal_metadata_characters.c
-  deps: []
-- name: gen_percent_encoding_tables
-  build: tool
-  language: c
-  src:
-  - tools/codegen/core/gen_percent_encoding_tables.c
-  deps: []
-  uses_polling: false
 - name: goaway_server_test
   cpu_cost: 0.1
   build: test
@@ -2193,15 +2164,6 @@
   - gpr_test_util
   - gpr
   uses_polling: false
-- name: gpr_histogram_test
-  build: test
-  language: c
-  src:
-  - test/core/support/histogram_test.cc
-  deps:
-  - gpr_test_util
-  - gpr
-  uses_polling: false
 - name: gpr_host_port_test
   build: test
   language: c
@@ -2550,6 +2512,15 @@
   platforms:
   - linux
   secure: true
+- name: histogram_test
+  build: test
+  language: c
+  src:
+  - test/core/util/histogram_test.cc
+  deps:
+  - grpc_test_util
+  - gpr
+  uses_polling: false
 - name: hpack_parser_fuzzer_test
   build: fuzzer
   language: c
@@ -3475,6 +3446,17 @@
   - gpr_test_util
   - gpr
   uses_polling: false
+- name: backoff_test
+  build: test
+  language: c++
+  src:
+  - test/core/backoff/backoff_test.cc
+  deps:
+  - grpc_test_util
+  - grpc
+  - gpr_test_util
+  - gpr
+  uses_polling: false
 - name: bdp_estimator_test
   build: test
   language: c++
@@ -4547,6 +4529,34 @@
   - gpr_test_util
   - gpr
   - grpc++_test_config
+- name: ref_counted_ptr_test
+  gtest: true
+  build: test
+  language: c++
+  src:
+  - test/core/support/ref_counted_ptr_test.cc
+  deps:
+  - grpc_test_util
+  - grpc++
+  - grpc
+  - gpr_test_util
+  - gpr
+  uses:
+  - grpc++_test
+- name: ref_counted_test
+  gtest: true
+  build: test
+  language: c++
+  src:
+  - test/core/support/ref_counted_test.cc
+  deps:
+  - grpc_test_util
+  - grpc++
+  - grpc
+  - gpr_test_util
+  - gpr
+  uses:
+  - grpc++_test
 - name: secure_auth_context_test
   gtest: true
   build: test
@@ -4814,6 +4824,28 @@
   deps:
   - grpc
   - gpr
+- name: gen_hpack_tables
+  build: tool
+  language: cc
+  src:
+  - tools/codegen/core/gen_hpack_tables.cc
+  deps:
+  - gpr
+  - grpc
+  uses_polling: false
+- name: gen_legal_metadata_characters
+  build: tool
+  language: cc
+  src:
+  - tools/codegen/core/gen_legal_metadata_characters.cc
+  deps: []
+- name: gen_percent_encoding_tables
+  build: tool
+  language: cc
+  src:
+  - tools/codegen/core/gen_percent_encoding_tables.cc
+  deps: []
+  uses_polling: false
 vspackages:
 - linkage: static
   name: grpc.dependencies.zlib
diff --git a/cmake/benchmark.cmake b/cmake/benchmark.cmake
new file mode 100644
index 0000000..c628422
--- /dev/null
+++ b/cmake/benchmark.cmake
@@ -0,0 +1,33 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "module")
+  if(NOT BENCHMARK_ROOT_DIR)
+    set(BENCHMARK_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/benchmark)
+  endif()
+  if(EXISTS "${BENCHMARK_ROOT_DIR}/CMakeLists.txt")
+      add_subdirectory(${BENCHMARK_ROOT_DIR} third_party/benchmark)
+      if(TARGET benchmark)
+          set(_gRPC_BENCHMARK_LIBRARIES benchmark)
+      endif()
+  else()
+      message(WARNING "gRPC_BENCHMARK_PROVIDER is \"module\" but BENCHMARK_ROOT_DIR is wrong")
+  endif()
+elseif("${gRPC_BENCHMARK_PROVIDER}" STREQUAL "package")
+  find_package(benchmark)
+  if(TARGET benchmark::benchmark)
+    set(_gRPC_BENCHMARK_LIBRARIES benchmark::benchmark)
+  endif()
+  set(_gRPC_FIND_BENCHMARK "if(NOT benchmark_FOUND)\n  find_package(benchmark)\nendif()")
+endif()
diff --git a/cmake/cares.cmake b/cmake/cares.cmake
new file mode 100644
index 0000000..521cf52
--- /dev/null
+++ b/cmake/cares.cmake
@@ -0,0 +1,36 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if("${gRPC_CARES_PROVIDER}" STREQUAL "module")
+  if(NOT CARES_ROOT_DIR)
+    set(CARES_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares)
+  endif()
+  set(CARES_SHARED OFF CACHE BOOL "disable shared library")
+  set(CARES_STATIC ON CACHE BOOL "link cares statically")
+  set(CARES_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/third_party/cares/cares")
+  add_subdirectory(third_party/cares/cares)
+  if(TARGET c-ares)
+    set(_gRPC_CARES_LIBRARIES c-ares)
+  endif()
+  if(gRPC_INSTALL)
+    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_CARES_PROVIDER is \"module\"")
+    set(gRPC_INSTALL FALSE)
+  endif()
+elseif("${gRPC_CARES_PROVIDER}" STREQUAL "package")
+  find_package(c-ares REQUIRED CONFIG)
+  if(TARGET c-ares::cares)
+    set(_gRPC_CARES_LIBRARIES c-ares::cares)
+  endif()
+  set(_gRPC_FIND_CARES "if(NOT c-ares_FOUND)\n  find_package(c-ares CONFIG)\nendif()")
+endif()
diff --git a/cmake/gflags.cmake b/cmake/gflags.cmake
new file mode 100644
index 0000000..1864bda
--- /dev/null
+++ b/cmake/gflags.cmake
@@ -0,0 +1,33 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if("${gRPC_GFLAGS_PROVIDER}" STREQUAL "module")
+  if(NOT GFLAGS_ROOT_DIR)
+    set(GFLAGS_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/gflags)
+  endif()
+  if(EXISTS "${GFLAGS_ROOT_DIR}/CMakeLists.txt")
+      add_subdirectory(${GFLAGS_ROOT_DIR} third_party/gflags)
+      if(TARGET gflags_static)
+          set(_gRPC_GFLAGS_LIBRARIES gflags_static)
+      endif()
+  else()
+      message(WARNING "gRPC_GFLAGS_PROVIDER is \"module\" but GFLAGS_ROOT_DIR is wrong")
+  endif()
+elseif("${gRPC_GFLAGS_PROVIDER}" STREQUAL "package")
+  find_package(gflags)
+  if(TARGET gflags::gflags)
+    set(_gRPC_GFLAGS_LIBRARIES gflags::gflags)
+  endif()
+  set(_gRPC_FIND_GFLAGS "if(NOT gflags_FOUND)\n  find_package(gflags)\nendif()")
+endif()
diff --git a/cmake/msvc_static_runtime.cmake b/cmake/msvc_static_runtime.cmake
index fc6d1d6..844bd02 100644
--- a/cmake/msvc_static_runtime.cmake
+++ b/cmake/msvc_static_runtime.cmake
@@ -1,3 +1,17 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
 option(gRPC_MSVC_STATIC_RUNTIME "Link with static msvc runtime libraries" OFF)
 
 if(gRPC_MSVC_STATIC_RUNTIME)
diff --git a/cmake/protobuf.cmake b/cmake/protobuf.cmake
new file mode 100644
index 0000000..e2206a2
--- /dev/null
+++ b/cmake/protobuf.cmake
@@ -0,0 +1,77 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "module")
+  # Building the protobuf tests require gmock what is not part of a standard protobuf checkout.
+  # Disable them unless they are explicitly requested from the cmake command line (when we assume
+  # gmock is downloaded to the right location inside protobuf).
+  if(NOT protobuf_BUILD_TESTS)
+    set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
+  endif()
+  # Disable building protobuf with zlib. Building protobuf with zlib breaks
+  # the build if zlib is not installed on the system.
+  if(NOT protobuf_WITH_ZLIB)
+    set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.")
+  endif()
+  if(NOT PROTOBUF_ROOT_DIR)
+    set(PROTOBUF_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/protobuf)
+  endif()
+  set(PROTOBUF_WELLKNOWN_IMPORT_DIR ${PROTOBUF_ROOT_DIR}/src)
+  if(EXISTS "${PROTOBUF_ROOT_DIR}/cmake/CMakeLists.txt")
+    set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries")
+    add_subdirectory(${PROTOBUF_ROOT_DIR}/cmake third_party/protobuf)
+    if(TARGET ${_gRPC_PROTOBUF_LIBRARY_NAME})
+      set(_gRPC_PROTOBUF_LIBRARIES ${_gRPC_PROTOBUF_LIBRARY_NAME})
+    endif()
+    if(TARGET libprotoc)
+      set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc)
+    endif()
+    if(TARGET protoc)
+      set(_gRPC_PROTOBUF_PROTOC protoc)
+      set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
+    endif()
+  else()
+      message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong")
+  endif()
+  if(gRPC_INSTALL)
+    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_PROTOBUF_PROVIDER is \"module\"")
+    set(gRPC_INSTALL FALSE)
+  endif()
+elseif("${gRPC_PROTOBUF_PROVIDER}" STREQUAL "package")
+  find_package(Protobuf REQUIRED ${gRPC_PROTOBUF_PACKAGE_TYPE})
+  if(Protobuf_FOUND OR PROTOBUF_FOUND)
+    if(TARGET protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
+      set(_gRPC_PROTOBUF_LIBRARIES protobuf::${_gRPC_PROTOBUF_LIBRARY_NAME})
+    else()
+      set(_gRPC_PROTOBUF_LIBRARIES ${PROTOBUF_LIBRARIES})
+    endif()
+    if(TARGET protobuf::libprotoc)
+      set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc)
+    else()
+      set(_gRPC_PROTOBUF_PROTOC_LIBRARIES ${PROTOBUF_PROTOC_LIBRARIES})
+    endif()
+    if(TARGET protobuf::protoc)
+      set(_gRPC_PROTOBUF_PROTOC protobuf::protoc)
+      set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
+    else()
+      set(_gRPC_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE})
+      set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE ${PROTOBUF_PROTOC_EXECUTABLE})
+    endif()
+    set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n  find_package(Protobuf ${gRPC_PROTOBUF_PACKAGE_TYPE})\nendif()")
+  endif()
+  if(PROTOBUF_FOUND)
+    include_directories(${PROTOBUF_INCLUDE_DIRS})
+  endif()
+  set(PROTOBUF_WELLKNOWN_IMPORT_DIR /usr/local/include)
+endif()
diff --git a/cmake/ssl.cmake b/cmake/ssl.cmake
new file mode 100644
index 0000000..75ce069
--- /dev/null
+++ b/cmake/ssl.cmake
@@ -0,0 +1,38 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if("${gRPC_SSL_PROVIDER}" STREQUAL "module")
+  if(NOT BORINGSSL_ROOT_DIR)
+    set(BORINGSSL_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/boringssl)
+  endif()
+  if(EXISTS "${BORINGSSL_ROOT_DIR}/CMakeLists.txt")
+    set(OPENSSL_NO_ASM ON)  # make boringssl buildable with Visual Studio
+    add_subdirectory(${BORINGSSL_ROOT_DIR} third_party/boringssl)
+    if(TARGET ssl)
+      set(_gRPC_SSL_LIBRARIES ssl)
+      set(_gRPC_SSL_INCLUDE_DIR ${BORINGSSL_ROOT_DIR}/include)
+    endif()
+  else()
+      message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong")
+  endif()
+  if(gRPC_INSTALL)
+    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_SSL_PROVIDER is \"module\"")
+    set(gRPC_INSTALL FALSE)
+  endif()
+elseif("${gRPC_SSL_PROVIDER}" STREQUAL "package")
+  find_package(OpenSSL REQUIRED)
+  set(_gRPC_SSL_LIBRARIES ${OPENSSL_LIBRARIES})
+  set(_gRPC_SSL_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR})
+  set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n  find_package(OpenSSL)\nendif()")
+endif()
diff --git a/cmake/zlib.cmake b/cmake/zlib.cmake
new file mode 100644
index 0000000..16cd9e6
--- /dev/null
+++ b/cmake/zlib.cmake
@@ -0,0 +1,39 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+if("${gRPC_ZLIB_PROVIDER}" STREQUAL "module")
+  if(NOT ZLIB_ROOT_DIR)
+    set(ZLIB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/third_party/zlib)
+  endif()
+  set(ZLIB_INCLUDE_DIR "${ZLIB_ROOT_DIR}")
+  if(EXISTS "${ZLIB_ROOT_DIR}/CMakeLists.txt")
+      # TODO(jtattermusch): workaround for https://github.com/madler/zlib/issues/218
+      include_directories(${ZLIB_INCLUDE_DIR})
+
+      add_subdirectory(${ZLIB_ROOT_DIR} third_party/zlib)
+      if(TARGET zlibstatic)
+          set(_gRPC_ZLIB_LIBRARIES zlibstatic)
+      endif()
+  else()
+      message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong")
+  endif()
+  if(gRPC_INSTALL)
+    message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_ZLIB_PROVIDER is \"module\"")
+    set(gRPC_INSTALL FALSE)
+  endif()
+elseif("${gRPC_ZLIB_PROVIDER}" STREQUAL "package")
+  find_package(ZLIB REQUIRED)
+  set(_gRPC_ZLIB_LIBRARIES ${ZLIB_LIBRARIES})
+  set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n  find_package(ZLIB)\nendif()")
+endif()
diff --git a/config.m4 b/config.m4
index 6fe897f..c026b83 100644
--- a/config.m4
+++ b/config.m4
@@ -54,7 +54,6 @@
     src/core/lib/support/env_posix.cc \
     src/core/lib/support/env_windows.cc \
     src/core/lib/support/fork.cc \
-    src/core/lib/support/histogram.cc \
     src/core/lib/support/host_port.cc \
     src/core/lib/support/log.cc \
     src/core/lib/support/log_android.cc \
diff --git a/config.w32 b/config.w32
index c2a4327..cd3a16a 100644
--- a/config.w32
+++ b/config.w32
@@ -31,7 +31,6 @@
     "src\\core\\lib\\support\\env_posix.cc " +
     "src\\core\\lib\\support\\env_windows.cc " +
     "src\\core\\lib\\support\\fork.cc " +
-    "src\\core\\lib\\support\\histogram.cc " +
     "src\\core\\lib\\support\\host_port.cc " +
     "src\\core\\lib\\support\\log.cc " +
     "src\\core\\lib\\support\\log_android.cc " +
diff --git a/doc/PROTOCOL-HTTP2.md b/doc/PROTOCOL-HTTP2.md
index 29d3cc2..107a8e8 100644
--- a/doc/PROTOCOL-HTTP2.md
+++ b/doc/PROTOCOL-HTTP2.md
@@ -1,7 +1,7 @@
 # gRPC over HTTP2
 
 ## Introduction
-This document serves as a detailed description for an implementation of gRPC carried over HTTP2 draft 17 framing. It assumes familiarity with the HTTP2 specification.
+This document serves as a detailed description for an implementation of gRPC carried over <a href="https://tools.ietf.org/html/rfc7540">HTTP2 framing</a>. It assumes familiarity with the HTTP2 specification.
 
 ## Protocol
 Production rules are using <a href="http://tools.ietf.org/html/rfc5234">ABNF syntax</a>.
@@ -24,7 +24,7 @@
 * **Call-Definition** → Method Scheme Path TE [Authority] [Timeout] Content-Type [Message-Type] [Message-Encoding] [Message-Accept-Encoding] [User-Agent]
 * **Method** →  ":method POST"
 * **Scheme** → ":scheme "  ("http" / "https")
-* **Path** → ":path" "/" Service-Name "/" {_method name_}
+* **Path** → ":path" "/" Service-Name "/" {_method name_}  # But see note below.
 * **Service-Name** → {_IDL-specific service name_}
 * **Authority** → ":authority" {_virtual host name of authority_}
 * **TE** → "te" "trailers"  # Used to detect incompatible proxies
@@ -170,6 +170,7 @@
 grpc-status = 0 # OK
 trace-proto-bin = jher831yy13JHy3hc
 ```
+
 #### User Agents
 
 While the protocol does not require a user-agent to function it is recommended that clients provide a structured user-agent string that provides a basic description of the calling library, version & platform to facilitate issue diagnosis in heterogeneous environments. The following structure is recommended to library developers
@@ -197,7 +198,7 @@
 #### HTTP2 Transport Mapping
 
 ##### Stream Identification
-All GRPC calls need to specify an internal ID. We will use HTTP2 stream-ids as call identifiers in this scheme. NOTE: These id’s are contextual to an open HTTP2 session and will not be unique within a given process that is handling more than one HTTP2 session nor can they be used as GUIDs.
+All GRPC calls need to specify an internal ID. We will use HTTP2 stream-ids as call identifiers in this scheme. NOTE: These ids are contextual to an open HTTP2 session and will not be unique within a given process that is handling more than one HTTP2 session nor can they be used as GUIDs.
 
 ##### Data Frames
 DATA frame boundaries have no relation to **Length-Prefixed-Message** boundaries and implementations should make no assumptions about their alignment.
@@ -232,6 +233,7 @@
 The HTTP2 specification mandates the use of TLS 1.2 or higher when TLS is used with HTTP2. It also places some additional constraints on the allowed ciphers in deployments to avoid known-problems as well as requiring SNI support. It is also expected that HTTP2 will be used in conjunction with proprietary transport security mechanisms about which the specification can make no meaningful recommendations.
 
 ##### Connection Management
+
 ###### GOAWAY Frame
 Sent by servers to clients to indicate that they will no longer accept any new streams on the associated connections. This frame includes the id of the last successfully accepted stream by the server. Clients should consider any stream initiated after the last successfully accepted stream as UNAVAILABLE and retry the call elsewhere. Clients are free to continue working with the already accepted streams until they complete or the connection is terminated.
 
diff --git a/doc/PROTOCOL-WEB.md b/doc/PROTOCOL-WEB.md
index 226871d..c31a048 100644
--- a/doc/PROTOCOL-WEB.md
+++ b/doc/PROTOCOL-WEB.md
@@ -3,14 +3,14 @@
 gRPC-Web provides a JS client library that supports the same API
 as gRPC-Node to access a gRPC service. Due to browser limitation,
 the Web client library implements a different protocol than the
-[native gRPC protocol](https://grpc.io/docs/guides/wire.html).
+[native gRPC protocol](PROTOCOL-HTTP2.md).
 This protocol is designed to make it easy for a proxy to translate
 between the protocols as this is the most likely deployment model.
 
 This document lists the differences between the two protocols.
 To help tracking future revisions, this document describes a delta
 with the protocol details specified in the
-[native gRPC protocol](https://grpc.io/docs/guides/wire.html).
+[native gRPC protocol](PROTOCOL-HTTP2.md).
 
 # Design goals
 
@@ -31,7 +31,7 @@
 * become optional (in 1-2 years) when browsers are able to speak the native
 gRPC protocol via the new [whatwg fetch/streams API](https://github.com/whatwg/fetch)
 
-# Protocol differences vs [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html)
+# Protocol differences vs [gRPC over HTTP2](PROTOCOL-HTTP2.md)
 
 Content-Type
 
@@ -53,14 +53,14 @@
 
 ---
 
-HTTP/2 related behavior (specified in [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html))
+HTTP/2 related behavior (specified in [gRPC over HTTP2](PROTOCOL-HTTP2.md))
 
 1. stream-id is not supported or used
 2. go-away is not supported or used
 
 ---
 
-Message framing (vs. [http2-transport-mapping](https://grpc.io/docs/guides/wire.html#http2-transport-mapping))
+Message framing (vs. [http2-transport-mapping](PROTOCOL-HTTP2.md#http2-transport-mapping))
 
 1. Response status encoded as part of the response body
   * Key-value pairs encoded as a HTTP/1 headers block (without the terminating newline), per https://tools.ietf.org/html/rfc7230#section-3.2
@@ -86,7 +86,7 @@
 User Agent
 
 * Do NOT use User-Agent header (which is to be set by browsers, by default)
-* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](https://grpc.io/docs/guides/wire.html))
+* Use X-User-Agent: grpc-web-javascript/0.1 (follow the same format as specified in [gRPC over HTTP2](PROTOCOL-HTTP2.md))
 
 ---
 
diff --git a/doc/md b/doc/md
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/doc/md
diff --git a/doc/service_config.md b/doc/service_config.md
index 0abbd7f..dd1cbc5 100644
--- a/doc/service_config.md
+++ b/doc/service_config.md
@@ -12,7 +12,7 @@
 
 ```
 {
-  // Load balancing policy name.
+  // Load balancing policy name (case insensitive).
   // Currently, the only selectable client-side policy provided with gRPC
   // is 'round_robin', but third parties may add their own policies.
   // This field is optional; if unset, the default behavior is to pick
diff --git a/examples/cpp/helloworld/CMakeLists.txt b/examples/cpp/helloworld/CMakeLists.txt
index 71a8db4..49684a1 100644
--- a/examples/cpp/helloworld/CMakeLists.txt
+++ b/examples/cpp/helloworld/CMakeLists.txt
@@ -6,13 +6,29 @@
 

 if(NOT MSVC)

   set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")

+else()

+  add_definitions(-D_WIN32_WINNT=0x600)

 endif()

 

 # Protobuf

-set(protobuf_MODULE_COMPATIBLE TRUE)

-find_package(protobuf CONFIG REQUIRED)

+# NOTE: we cannot use "CONFIG" mode here because protobuf-config.cmake

+# is broken when used with CMAKE_INSTALL_PREFIX

+find_package(Protobuf REQUIRED)

 message(STATUS "Using protobuf ${protobuf_VERSION}")

 

+if(Protobuf_FOUND)

+  # Protobuf_FOUND is set for package type "CONFIG"

+  set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf)

+  set(_PROTOBUF_PROTOC protobuf::protoc)

+elseif(PROTOBUF_FOUND)

+  # PROTOBUF_FOUND is set for package type "MODULE"

+  set(_PROTOBUF_LIBPROTOBUF ${PROTOBUF_LIBRARIES})

+  set(_PROTOBUF_PROTOC ${PROTOBUF_PROTOC_EXECUTABLE})

+  include_directories(${PROTOBUF_INCLUDE_DIRS})

+else()

+  message(WARNING "Failed to locate libprotobuf and protoc!")

+endif()

+

 # gRPC

 find_package(gRPC CONFIG REQUIRED)

 message(STATUS "Using gRPC ${gRPC_VERSION}")

@@ -31,7 +47,7 @@
 set(hw_grpc_hdrs "${CMAKE_CURRENT_BINARY_DIR}/helloworld.grpc.pb.h")

 add_custom_command(

       OUTPUT "${hw_grpc_srcs}" "${hw_grpc_hdrs}"

-      COMMAND protobuf::protoc

+      COMMAND ${_PROTOBUF_PROTOC}

       ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}" -I "${hw_proto_path}"

         --plugin=protoc-gen-grpc="${gRPC_CPP_PLUGIN_EXECUTABLE}"

         "${hw_proto}"

@@ -48,6 +64,6 @@
     ${hw_proto_srcs}

     ${hw_grpc_srcs})

   target_link_libraries(${_target}

-    protobuf::libprotobuf

+    ${_PROTOBUF_LIBPROTOBUF}

     gRPC::grpc++_unsecure)

 endforeach()

diff --git a/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj b/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj
index 6b26be1..3bff4a5 100644
--- a/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj
+++ b/examples/csharp/helloworld-from-cli/Greeter/Greeter.csproj
@@ -6,14 +6,13 @@
     <DebugType>portable</DebugType>
     <AssemblyName>Greeter</AssemblyName>
     <PackageId>Greeter</PackageId>
-    <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
   </PropertyGroup>
 
   <ItemGroup>
-    <PackageReference Include="Google.Protobuf" Version="3.2.0" />
-    <PackageReference Include="Google.Protobuf.Tools" Version="3.2.0" />
-    <PackageReference Include="Grpc" Version="1.2.2" />
-    <PackageReference Include="Grpc.Tools" Version="1.2.2" />
+    <PackageReference Include="Google.Protobuf" Version="3.5.0" />
+    <PackageReference Include="Google.Protobuf.Tools" Version="3.5.0" />
+    <PackageReference Include="Grpc" Version="1.8.0" />
+    <PackageReference Include="Grpc.Tools" Version="1.8.0" />
   </ItemGroup>
 
 </Project>
diff --git a/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs b/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs
index 8168b28..c808884 100644
--- a/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs
+++ b/examples/csharp/helloworld-from-cli/Greeter/HelloworldGrpc.cs
@@ -15,6 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+#pragma warning disable 1591
 #region Designer generated code
 
 using System;
diff --git a/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj b/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj
index 24cacfc..d1ed040 100644
--- a/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj
+++ b/examples/csharp/helloworld-from-cli/GreeterClient/GreeterClient.csproj
@@ -7,7 +7,6 @@
     <AssemblyName>GreeterClient</AssemblyName>
     <OutputType>Exe</OutputType>
     <PackageId>GreeterClient</PackageId>
-    <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
   </PropertyGroup>
 
   <ItemGroup>
diff --git a/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj b/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj
index f7980fa..159fbd8 100644
--- a/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj
+++ b/examples/csharp/helloworld-from-cli/GreeterServer/GreeterServer.csproj
@@ -7,7 +7,6 @@
     <AssemblyName>GreeterServer</AssemblyName>
     <OutputType>Exe</OutputType>
     <PackageId>GreeterServer</PackageId>
-    <RuntimeFrameworkVersion Condition=" '$(TargetFramework)' == 'netcoreapp1.0' ">1.0.4</RuntimeFrameworkVersion>
   </PropertyGroup>
 
   <ItemGroup>
diff --git a/examples/csharp/helloworld-from-cli/generate_protos.bat b/examples/csharp/helloworld-from-cli/generate_protos.bat
index e290be6..dcf6084 100644
--- a/examples/csharp/helloworld-from-cli/generate_protos.bat
+++ b/examples/csharp/helloworld-from-cli/generate_protos.bat
@@ -19,8 +19,8 @@
 @rem enter this directory
 cd /d %~dp0
 
-set PROTOC=%UserProfile%\.nuget\packages\Google.Protobuf.Tools\3.2.0\tools\windows_x64\protoc.exe
-set PLUGIN=%UserProfile%\.nuget\packages\Grpc.Tools\1.2.2\tools\windows_x64\grpc_csharp_plugin.exe
+set PROTOC=%UserProfile%\.nuget\packages\Google.Protobuf.Tools\3.5.0\tools\windows_x64\protoc.exe
+set PLUGIN=%UserProfile%\.nuget\packages\Grpc.Tools\1.8.0\tools\windows_x64\grpc_csharp_plugin.exe
 
 %PROTOC% -I../../protos --csharp_out Greeter  ../../protos/helloworld.proto --grpc_out Greeter --plugin=protoc-gen-grpc=%PLUGIN%
 
diff --git a/examples/csharp/helloworld/Greeter/Greeter.csproj b/examples/csharp/helloworld/Greeter/Greeter.csproj
index 8dcd2d9..d2597f1 100644
--- a/examples/csharp/helloworld/Greeter/Greeter.csproj
+++ b/examples/csharp/helloworld/Greeter/Greeter.csproj
@@ -32,12 +32,12 @@
     <ConsolePause>false</ConsolePause>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="System" />
@@ -62,11 +62,11 @@
     <None Include="packages.config" />
   </ItemGroup>
   <ItemGroup />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs b/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
index 8168b28..c808884 100644
--- a/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
+++ b/examples/csharp/helloworld/Greeter/HelloworldGrpc.cs
@@ -15,6 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+#pragma warning disable 1591
 #region Designer generated code
 
 using System;
diff --git a/examples/csharp/helloworld/Greeter/packages.config b/examples/csharp/helloworld/Greeter/packages.config
index ec83cd8..38297f8 100644
--- a/examples/csharp/helloworld/Greeter/packages.config
+++ b/examples/csharp/helloworld/Greeter/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Tools" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Tools" version="1.8.0" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj b/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
index 4b6b1b3..470749a 100644
--- a/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
+++ b/examples/csharp/helloworld/GreeterClient/GreeterClient.csproj
@@ -32,12 +32,12 @@
     <Externalconsole>true</Externalconsole>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="System" />
@@ -60,11 +60,11 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterClient/packages.config b/examples/csharp/helloworld/GreeterClient/packages.config
index b912fd4..4b3684e 100644
--- a/examples/csharp/helloworld/GreeterClient/packages.config
+++ b/examples/csharp/helloworld/GreeterClient/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj b/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
index 97978fa..82e2961 100644
--- a/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
+++ b/examples/csharp/helloworld/GreeterServer/GreeterServer.csproj
@@ -32,12 +32,12 @@
     <Externalconsole>true</Externalconsole>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="System" />
@@ -60,11 +60,11 @@
   <ItemGroup>
     <None Include="packages.config" />
   </ItemGroup>
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
 </Project>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/GreeterServer/packages.config b/examples/csharp/helloworld/GreeterServer/packages.config
index b912fd4..4b3684e 100644
--- a/examples/csharp/helloworld/GreeterServer/packages.config
+++ b/examples/csharp/helloworld/GreeterServer/packages.config
@@ -1,7 +1,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/helloworld/generate_protos.bat b/examples/csharp/helloworld/generate_protos.bat
index f955470..45b097e 100644
--- a/examples/csharp/helloworld/generate_protos.bat
+++ b/examples/csharp/helloworld/generate_protos.bat
@@ -19,7 +19,7 @@
 @rem enter this directory
 cd /d %~dp0
 
-set TOOLS_PATH=packages\Grpc.Tools.1.2.2\tools\windows_x86
+set TOOLS_PATH=packages\Grpc.Tools.1.8.0\tools\windows_x86
 
 %TOOLS_PATH%\protoc.exe -I../../protos --csharp_out Greeter  ../../protos/helloworld.proto --grpc_out Greeter --plugin=protoc-gen-grpc=%TOOLS_PATH%\grpc_csharp_plugin.exe
 
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj b/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
index 360444e..e66e986 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuide.csproj
@@ -32,12 +32,12 @@
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
@@ -75,12 +75,12 @@
     </None>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs b/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
index 26278ea..765d5d5 100644
--- a/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
+++ b/examples/csharp/route_guide/RouteGuide/RouteGuideGrpc.cs
@@ -15,6 +15,7 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 //
+#pragma warning disable 1591
 #region Designer generated code
 
 using System;
diff --git a/examples/csharp/route_guide/RouteGuide/packages.config b/examples/csharp/route_guide/RouteGuide/packages.config
index 2dde11f..fe2c995 100644
--- a/examples/csharp/route_guide/RouteGuide/packages.config
+++ b/examples/csharp/route_guide/RouteGuide/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj b/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
index 162eaed..612f60c 100644
--- a/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
+++ b/examples/csharp/route_guide/RouteGuideClient/RouteGuideClient.csproj
@@ -34,12 +34,12 @@
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
@@ -72,12 +72,12 @@
     </ProjectReference>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuideClient/packages.config b/examples/csharp/route_guide/RouteGuideClient/packages.config
index 2dde11f..fe2c995 100644
--- a/examples/csharp/route_guide/RouteGuideClient/packages.config
+++ b/examples/csharp/route_guide/RouteGuideClient/packages.config
@@ -1,8 +1,8 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj b/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
index b6f2f35..4d9d9d7 100644
--- a/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
+++ b/examples/csharp/route_guide/RouteGuideServer/RouteGuideServer.csproj
@@ -34,12 +34,12 @@
     <WarningLevel>4</WarningLevel>
   </PropertyGroup>
   <ItemGroup>
-    <Reference Include="Google.Protobuf, Version=3.2.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
-      <HintPath>..\packages\Google.Protobuf.3.2.0\lib\net45\Google.Protobuf.dll</HintPath>
+    <Reference Include="Google.Protobuf, Version=3.5.0.0, Culture=neutral, PublicKeyToken=a7d26565bac4d604, processorArchitecture=MSIL">
+      <HintPath>..\packages\Google.Protobuf.3.5.0\lib\net45\Google.Protobuf.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Grpc.Core, Version=1.0.0.0, Culture=neutral, PublicKeyToken=d754f35622e28bad, processorArchitecture=MSIL">
-      <HintPath>..\packages\Grpc.Core.1.2.2\lib\net45\Grpc.Core.dll</HintPath>
+      <HintPath>..\packages\Grpc.Core.1.8.0\lib\net45\Grpc.Core.dll</HintPath>
       <Private>True</Private>
     </Reference>
     <Reference Include="Newtonsoft.Json, Version=7.0.0.0, Culture=neutral, PublicKeyToken=30ad4fe6b2a6aeed, processorArchitecture=MSIL">
@@ -73,12 +73,12 @@
     </ProjectReference>
   </ItemGroup>
   <Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
-  <Import Project="..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" />
+  <Import Project="..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets" Condition="Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" />
   <Target Name="EnsureNuGetPackageBuildImports" BeforeTargets="PrepareForBuild">
     <PropertyGroup>
       <ErrorText>This project references NuGet package(s) that are missing on this computer. Use NuGet Package Restore to download them.  For more information, see http://go.microsoft.com/fwlink/?LinkID=322105. The missing file is {0}.</ErrorText>
     </PropertyGroup>
-    <Error Condition="!Exists('..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.2.2\build\net45\Grpc.Core.targets'))" />
+    <Error Condition="!Exists('..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets')" Text="$([System.String]::Format('$(ErrorText)', '..\packages\Grpc.Core.1.8.0\build\net45\Grpc.Core.targets'))" />
   </Target>
   <!-- To modify your build process, add your task inside one of the targets below and uncomment it. 
        Other similar extension points exist, see Microsoft.Common.targets.
diff --git a/examples/csharp/route_guide/RouteGuideServer/packages.config b/examples/csharp/route_guide/RouteGuideServer/packages.config
index 46df645..2bb1f0d 100644
--- a/examples/csharp/route_guide/RouteGuideServer/packages.config
+++ b/examples/csharp/route_guide/RouteGuideServer/packages.config
@@ -1,9 +1,9 @@
 <?xml version="1.0" encoding="utf-8"?>
 <packages>
-  <package id="Google.Protobuf" version="3.2.0" targetFramework="net45" />
-  <package id="Grpc" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Core" version="1.2.2" targetFramework="net45" />
-  <package id="Grpc.Tools" version="1.2.2" targetFramework="net45" />
+  <package id="Google.Protobuf" version="3.5.0" targetFramework="net45" />
+  <package id="Grpc" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Core" version="1.8.0" targetFramework="net45" />
+  <package id="Grpc.Tools" version="1.8.0" targetFramework="net45" />
   <package id="Newtonsoft.Json" version="7.0.1" targetFramework="net45" />
   <package id="System.Interactive.Async" version="3.1.1" targetFramework="net45" />
 </packages>
\ No newline at end of file
diff --git a/examples/csharp/route_guide/generate_protos.bat b/examples/csharp/route_guide/generate_protos.bat
index 7311683..a8c9cb5 100644
--- a/examples/csharp/route_guide/generate_protos.bat
+++ b/examples/csharp/route_guide/generate_protos.bat
@@ -19,7 +19,7 @@
 @rem enter this directory
 cd /d %~dp0
 
-set TOOLS_PATH=packages\Grpc.Tools.1.2.2\tools\windows_x86
+set TOOLS_PATH=packages\Grpc.Tools.1.8.0\tools\windows_x86
 
 %TOOLS_PATH%\protoc.exe -I../../protos --csharp_out RouteGuide  ../../protos/route_guide.proto --grpc_out RouteGuide --plugin=protoc-gen-grpc=%TOOLS_PATH%\grpc_csharp_plugin.exe
 
diff --git a/examples/python/helloworld/greeter_client.py b/examples/python/helloworld/greeter_client.py
index d9b2bdf..a0aeb47 100644
--- a/examples/python/helloworld/greeter_client.py
+++ b/examples/python/helloworld/greeter_client.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the GRPC helloworld.Greeter client."""
 
 from __future__ import print_function
@@ -23,11 +22,11 @@
 
 
 def run():
-  channel = grpc.insecure_channel('localhost:50051')
-  stub = helloworld_pb2_grpc.GreeterStub(channel)
-  response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
-  print("Greeter client received: " + response.message)
+    channel = grpc.insecure_channel('localhost:50051')
+    stub = helloworld_pb2_grpc.GreeterStub(channel)
+    response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
+    print("Greeter client received: " + response.message)
 
 
 if __name__ == '__main__':
-  run()
+    run()
diff --git a/examples/python/helloworld/greeter_server.py b/examples/python/helloworld/greeter_server.py
index be61695..c355662 100644
--- a/examples/python/helloworld/greeter_server.py
+++ b/examples/python/helloworld/greeter_server.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the GRPC helloworld.Greeter server."""
 
 from concurrent import futures
@@ -27,20 +26,21 @@
 
 class Greeter(helloworld_pb2_grpc.GreeterServicer):
 
-  def SayHello(self, request, context):
-    return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
+    def SayHello(self, request, context):
+        return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
 
 
 def serve():
-  server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
-  server.add_insecure_port('[::]:50051')
-  server.start()
-  try:
-    while True:
-      time.sleep(_ONE_DAY_IN_SECONDS)
-  except KeyboardInterrupt:
-    server.stop(0)
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
+
 
 if __name__ == '__main__':
-  serve()
+    serve()
diff --git a/examples/python/interceptors/headers/header_manipulator_client_interceptor.py b/examples/python/interceptors/headers/header_manipulator_client_interceptor.py
index ac7c605..89b1bef 100644
--- a/examples/python/interceptors/headers/header_manipulator_client_interceptor.py
+++ b/examples/python/interceptors/headers/header_manipulator_client_interceptor.py
@@ -20,9 +20,10 @@
 
 
 class _ClientCallDetails(
-        collections.namedtuple('_ClientCallDetails',
-                               ('method', 'timeout', 'metadata',
-                                'credentials')), grpc.ClientCallDetails):
+        collections.namedtuple(
+            '_ClientCallDetails',
+            ('method', 'timeout', 'metadata', 'credentials')),
+        grpc.ClientCallDetails):
     pass
 
 
@@ -33,7 +34,10 @@
         metadata = []
         if client_call_details.metadata is not None:
             metadata = list(client_call_details.metadata)
-        metadata.append((header, value,))
+        metadata.append((
+            header,
+            value,
+        ))
         client_call_details = _ClientCallDetails(
             client_call_details.method, client_call_details.timeout, metadata,
             client_call_details.credentials)
diff --git a/examples/python/multiplex/multiplex_client.py b/examples/python/multiplex/multiplex_client.py
index c8c700a..9baa102 100644
--- a/examples/python/multiplex/multiplex_client.py
+++ b/examples/python/multiplex/multiplex_client.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """A client that makes both Greeter and RouteGuide RPCs."""
 
 from __future__ import print_function
@@ -29,98 +28,99 @@
 
 
 def make_route_note(message, latitude, longitude):
-  return route_guide_pb2.RouteNote(
-      message=message,
-      location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
+    return route_guide_pb2.RouteNote(
+        message=message,
+        location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
 
 
 def guide_get_one_feature(route_guide_stub, point):
-  feature = route_guide_stub.GetFeature(point)
-  if not feature.location:
-    print("Server returned incomplete feature")
-    return
+    feature = route_guide_stub.GetFeature(point)
+    if not feature.location:
+        print("Server returned incomplete feature")
+        return
 
-  if feature.name:
-    print("Feature called %s at %s" % (feature.name, feature.location))
-  else:
-    print("Found no feature at %s" % feature.location)
+    if feature.name:
+        print("Feature called %s at %s" % (feature.name, feature.location))
+    else:
+        print("Found no feature at %s" % feature.location)
 
 
 def guide_get_feature(route_guide_stub):
-  guide_get_one_feature(
-      route_guide_stub,
-      route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
-  guide_get_one_feature(
-      route_guide_stub, route_guide_pb2.Point(latitude=0, longitude=0))
+    guide_get_one_feature(route_guide_stub,
+                          route_guide_pb2.Point(
+                              latitude=409146138, longitude=-746188906))
+    guide_get_one_feature(route_guide_stub,
+                          route_guide_pb2.Point(latitude=0, longitude=0))
 
 
 def guide_list_features(route_guide_stub):
-  rectangle = route_guide_pb2.Rectangle(
-      lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
-      hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
-  print("Looking for features between 40, -75 and 42, -73")
+    rectangle = route_guide_pb2.Rectangle(
+        lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
+        hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
+    print("Looking for features between 40, -75 and 42, -73")
 
-  features = route_guide_stub.ListFeatures(rectangle)
+    features = route_guide_stub.ListFeatures(rectangle)
 
-  for feature in features:
-    print("Feature called %s at %s" % (feature.name, feature.location))
+    for feature in features:
+        print("Feature called %s at %s" % (feature.name, feature.location))
 
 
 def generate_route(feature_list):
-  for _ in range(0, 10):
-    random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
-    print("Visiting point %s" % random_feature.location)
-    yield random_feature.location
-    time.sleep(random.uniform(0.5, 1.5))
+    for _ in range(0, 10):
+        random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
+        print("Visiting point %s" % random_feature.location)
+        yield random_feature.location
+        time.sleep(random.uniform(0.5, 1.5))
 
 
 def guide_record_route(route_guide_stub):
-  feature_list = route_guide_resources.read_route_guide_database()
+    feature_list = route_guide_resources.read_route_guide_database()
 
-  route_iterator = generate_route(feature_list)
-  route_summary = route_guide_stub.RecordRoute(route_iterator)
-  print("Finished trip with %s points " % route_summary.point_count)
-  print("Passed %s features " % route_summary.feature_count)
-  print("Travelled %s meters " % route_summary.distance)
-  print("It took %s seconds " % route_summary.elapsed_time)
+    route_iterator = generate_route(feature_list)
+    route_summary = route_guide_stub.RecordRoute(route_iterator)
+    print("Finished trip with %s points " % route_summary.point_count)
+    print("Passed %s features " % route_summary.feature_count)
+    print("Travelled %s meters " % route_summary.distance)
+    print("It took %s seconds " % route_summary.elapsed_time)
 
 
 def generate_messages():
-  messages = [
-      make_route_note("First message", 0, 0),
-      make_route_note("Second message", 0, 1),
-      make_route_note("Third message", 1, 0),
-      make_route_note("Fourth message", 0, 0),
-      make_route_note("Fifth message", 1, 0),
-  ]
-  for msg in messages:
-    print("Sending %s at %s" % (msg.message, msg.location))
-    yield msg
-    time.sleep(random.uniform(0.5, 1.0))
+    messages = [
+        make_route_note("First message", 0, 0),
+        make_route_note("Second message", 0, 1),
+        make_route_note("Third message", 1, 0),
+        make_route_note("Fourth message", 0, 0),
+        make_route_note("Fifth message", 1, 0),
+    ]
+    for msg in messages:
+        print("Sending %s at %s" % (msg.message, msg.location))
+        yield msg
+        time.sleep(random.uniform(0.5, 1.0))
 
 
 def guide_route_chat(route_guide_stub):
-  responses = route_guide_stub.RouteChat(generate_messages())
-  for response in responses:
-    print("Received message %s at %s" % (response.message, response.location))
+    responses = route_guide_stub.RouteChat(generate_messages())
+    for response in responses:
+        print("Received message %s at %s" % (response.message,
+                                             response.location))
 
 
 def run():
-  channel = grpc.insecure_channel('localhost:50051')
-  greeter_stub = helloworld_pb2_grpc.GreeterStub(channel)
-  route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel)
-  greeter_response = greeter_stub.SayHello(
-      helloworld_pb2.HelloRequest(name='you'))
-  print("Greeter client received: " + greeter_response.message)
-  print("-------------- GetFeature --------------")
-  guide_get_feature(route_guide_stub)
-  print("-------------- ListFeatures --------------")
-  guide_list_features(route_guide_stub)
-  print("-------------- RecordRoute --------------")
-  guide_record_route(route_guide_stub)
-  print("-------------- RouteChat --------------")
-  guide_route_chat(route_guide_stub)
+    channel = grpc.insecure_channel('localhost:50051')
+    greeter_stub = helloworld_pb2_grpc.GreeterStub(channel)
+    route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel)
+    greeter_response = greeter_stub.SayHello(
+        helloworld_pb2.HelloRequest(name='you'))
+    print("Greeter client received: " + greeter_response.message)
+    print("-------------- GetFeature --------------")
+    guide_get_feature(route_guide_stub)
+    print("-------------- ListFeatures --------------")
+    guide_list_features(route_guide_stub)
+    print("-------------- RecordRoute --------------")
+    guide_record_route(route_guide_stub)
+    print("-------------- RouteChat --------------")
+    guide_route_chat(route_guide_stub)
 
 
 if __name__ == '__main__':
-  run()
+    run()
diff --git a/examples/python/multiplex/multiplex_server.py b/examples/python/multiplex/multiplex_server.py
index 9a6e835..70dec3c 100644
--- a/examples/python/multiplex/multiplex_server.py
+++ b/examples/python/multiplex/multiplex_server.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """A gRPC server servicing both Greeter and RouteGuide RPCs."""
 
 from concurrent import futures
@@ -30,107 +29,111 @@
 
 
 def _get_feature(feature_db, point):
-  """Returns Feature at given location or None."""
-  for feature in feature_db:
-    if feature.location == point:
-      return feature
-  return None
+    """Returns Feature at given location or None."""
+    for feature in feature_db:
+        if feature.location == point:
+            return feature
+    return None
 
 
 def _get_distance(start, end):
-  """Distance between two points."""
-  coord_factor = 10000000.0
-  lat_1 = start.latitude / coord_factor
-  lat_2 = end.latitude / coord_factor
-  lon_1 = start.longitude / coord_factor
-  lon_2 = end.longitude / coord_factor
-  lat_rad_1 = math.radians(lat_1)
-  lat_rad_2 = math.radians(lat_2)
-  delta_lat_rad = math.radians(lat_2 - lat_1)
-  delta_lon_rad = math.radians(lon_2 - lon_1)
+    """Distance between two points."""
+    coord_factor = 10000000.0
+    lat_1 = start.latitude / coord_factor
+    lat_2 = end.latitude / coord_factor
+    lon_1 = start.longitude / coord_factor
+    lon_2 = end.longitude / coord_factor
+    lat_rad_1 = math.radians(lat_1)
+    lat_rad_2 = math.radians(lat_2)
+    delta_lat_rad = math.radians(lat_2 - lat_1)
+    delta_lon_rad = math.radians(lon_2 - lon_1)
 
-  a = (pow(math.sin(delta_lat_rad / 2), 2) +
-       (math.cos(lat_rad_1) * math.cos(lat_rad_2) *
-        pow(math.sin(delta_lon_rad / 2), 2)))
-  c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
-  R = 6371000; # metres
-  return R * c;
+    a = (pow(math.sin(delta_lat_rad / 2), 2) +
+         (math.cos(lat_rad_1) * math.cos(lat_rad_2) * pow(
+             math.sin(delta_lon_rad / 2), 2)))
+    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
+    R = 6371000
+    # metres
+    return R * c
 
 
 class _GreeterServicer(helloworld_pb2_grpc.GreeterServicer):
 
-  def SayHello(self, request, context):
-    return helloworld_pb2.HelloReply(message='Hello, {}!'.format(request.name))
+    def SayHello(self, request, context):
+        return helloworld_pb2.HelloReply(
+            message='Hello, {}!'.format(request.name))
 
 
 class _RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
-  """Provides methods that implement functionality of route guide server."""
+    """Provides methods that implement functionality of route guide server."""
 
-  def __init__(self):
-    self.db = route_guide_resources.read_route_guide_database()
+    def __init__(self):
+        self.db = route_guide_resources.read_route_guide_database()
 
-  def GetFeature(self, request, context):
-    feature = _get_feature(self.db, request)
-    if feature is None:
-      return route_guide_pb2.Feature(name="", location=request)
-    else:
-      return feature
+    def GetFeature(self, request, context):
+        feature = _get_feature(self.db, request)
+        if feature is None:
+            return route_guide_pb2.Feature(name="", location=request)
+        else:
+            return feature
 
-  def ListFeatures(self, request, context):
-    left = min(request.lo.longitude, request.hi.longitude)
-    right = max(request.lo.longitude, request.hi.longitude)
-    top = max(request.lo.latitude, request.hi.latitude)
-    bottom = min(request.lo.latitude, request.hi.latitude)
-    for feature in self.db:
-      if (feature.location.longitude >= left and
-          feature.location.longitude <= right and
-          feature.location.latitude >= bottom and
-          feature.location.latitude <= top):
-        yield feature
+    def ListFeatures(self, request, context):
+        left = min(request.lo.longitude, request.hi.longitude)
+        right = max(request.lo.longitude, request.hi.longitude)
+        top = max(request.lo.latitude, request.hi.latitude)
+        bottom = min(request.lo.latitude, request.hi.latitude)
+        for feature in self.db:
+            if (feature.location.longitude >= left and
+                    feature.location.longitude <= right and
+                    feature.location.latitude >= bottom and
+                    feature.location.latitude <= top):
+                yield feature
 
-  def RecordRoute(self, request_iterator, context):
-    point_count = 0
-    feature_count = 0
-    distance = 0.0
-    prev_point = None
+    def RecordRoute(self, request_iterator, context):
+        point_count = 0
+        feature_count = 0
+        distance = 0.0
+        prev_point = None
 
-    start_time = time.time()
-    for point in request_iterator:
-      point_count += 1
-      if _get_feature(self.db, point):
-        feature_count += 1
-      if prev_point:
-        distance += _get_distance(prev_point, point)
-      prev_point = point
+        start_time = time.time()
+        for point in request_iterator:
+            point_count += 1
+            if _get_feature(self.db, point):
+                feature_count += 1
+            if prev_point:
+                distance += _get_distance(prev_point, point)
+            prev_point = point
 
-    elapsed_time = time.time() - start_time
-    return route_guide_pb2.RouteSummary(point_count=point_count,
-                                        feature_count=feature_count,
-                                        distance=int(distance),
-                                        elapsed_time=int(elapsed_time))
+        elapsed_time = time.time() - start_time
+        return route_guide_pb2.RouteSummary(
+            point_count=point_count,
+            feature_count=feature_count,
+            distance=int(distance),
+            elapsed_time=int(elapsed_time))
 
-  def RouteChat(self, request_iterator, context):
-    prev_notes = []
-    for new_note in request_iterator:
-      for prev_note in prev_notes:
-        if prev_note.location == new_note.location:
-          yield prev_note
-      prev_notes.append(new_note)
+    def RouteChat(self, request_iterator, context):
+        prev_notes = []
+        for new_note in request_iterator:
+            for prev_note in prev_notes:
+                if prev_note.location == new_note.location:
+                    yield prev_note
+            prev_notes.append(new_note)
 
 
 def serve():
-  server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(), server)
-  route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
-      _RouteGuideServicer(), server)
-  server.add_insecure_port('[::]:50051')
-  server.start()
-  try:
-    while True:
-      time.sleep(_ONE_DAY_IN_SECONDS)
-  except KeyboardInterrupt:
-    server.stop(0)
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(),
+                                                      server)
+    route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
+        _RouteGuideServicer(), server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
 
 
 if __name__ == '__main__':
-  serve()
+    serve()
diff --git a/examples/python/multiplex/route_guide_resources.py b/examples/python/multiplex/route_guide_resources.py
index 0887863..ace85d6 100644
--- a/examples/python/multiplex/route_guide_resources.py
+++ b/examples/python/multiplex/route_guide_resources.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Common resources used in the gRPC route guide example."""
 
 import json
@@ -20,19 +19,19 @@
 
 
 def read_route_guide_database():
-  """Reads the route guide database.
+    """Reads the route guide database.
 
   Returns:
     The full contents of the route guide database as a sequence of
       route_guide_pb2.Features.
   """
-  feature_list = []
-  with open("route_guide_db.json") as route_guide_db_file:
-    for item in json.load(route_guide_db_file):
-      feature = route_guide_pb2.Feature(
-          name=item["name"],
-          location=route_guide_pb2.Point(
-              latitude=item["location"]["latitude"],
-              longitude=item["location"]["longitude"]))
-      feature_list.append(feature)
-  return feature_list
+    feature_list = []
+    with open("route_guide_db.json") as route_guide_db_file:
+        for item in json.load(route_guide_db_file):
+            feature = route_guide_pb2.Feature(
+                name=item["name"],
+                location=route_guide_pb2.Point(
+                    latitude=item["location"]["latitude"],
+                    longitude=item["location"]["longitude"]))
+            feature_list.append(feature)
+    return feature_list
diff --git a/examples/python/multiplex/run_codegen.py b/examples/python/multiplex/run_codegen.py
index f38d86c..be8915f 100644
--- a/examples/python/multiplex/run_codegen.py
+++ b/examples/python/multiplex/run_codegen.py
@@ -11,26 +11,21 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Generates protocol messages and gRPC stubs."""
 
 from grpc_tools import protoc
 
-protoc.main(
-    (
-        '',
-        '-I../../protos',
-        '--python_out=.',
-        '--grpc_python_out=.',
-        '../../protos/helloworld.proto',
-    )
-)
-protoc.main(
-    (
-        '',
-        '-I../../protos',
-        '--python_out=.',
-        '--grpc_python_out=.',
-        '../../protos/route_guide.proto',
-    )
-)
+protoc.main((
+    '',
+    '-I../../protos',
+    '--python_out=.',
+    '--grpc_python_out=.',
+    '../../protos/helloworld.proto',
+))
+protoc.main((
+    '',
+    '-I../../protos',
+    '--python_out=.',
+    '--grpc_python_out=.',
+    '../../protos/route_guide.proto',
+))
diff --git a/examples/python/route_guide/route_guide_client.py b/examples/python/route_guide/route_guide_client.py
index a0e32fb..f2d4317 100644
--- a/examples/python/route_guide/route_guide_client.py
+++ b/examples/python/route_guide/route_guide_client.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the gRPC route guide client."""
 
 from __future__ import print_function
@@ -26,89 +25,92 @@
 
 
 def make_route_note(message, latitude, longitude):
-  return route_guide_pb2.RouteNote(
-      message=message,
-      location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
+    return route_guide_pb2.RouteNote(
+        message=message,
+        location=route_guide_pb2.Point(latitude=latitude, longitude=longitude))
 
 
 def guide_get_one_feature(stub, point):
-  feature = stub.GetFeature(point)
-  if not feature.location:
-    print("Server returned incomplete feature")
-    return
+    feature = stub.GetFeature(point)
+    if not feature.location:
+        print("Server returned incomplete feature")
+        return
 
-  if feature.name:
-    print("Feature called %s at %s" % (feature.name, feature.location))
-  else:
-    print("Found no feature at %s" % feature.location)
+    if feature.name:
+        print("Feature called %s at %s" % (feature.name, feature.location))
+    else:
+        print("Found no feature at %s" % feature.location)
 
 
 def guide_get_feature(stub):
-  guide_get_one_feature(stub, route_guide_pb2.Point(latitude=409146138, longitude=-746188906))
-  guide_get_one_feature(stub, route_guide_pb2.Point(latitude=0, longitude=0))
+    guide_get_one_feature(stub,
+                          route_guide_pb2.Point(
+                              latitude=409146138, longitude=-746188906))
+    guide_get_one_feature(stub, route_guide_pb2.Point(latitude=0, longitude=0))
 
 
 def guide_list_features(stub):
-  rectangle = route_guide_pb2.Rectangle(
-      lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
-      hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
-  print("Looking for features between 40, -75 and 42, -73")
+    rectangle = route_guide_pb2.Rectangle(
+        lo=route_guide_pb2.Point(latitude=400000000, longitude=-750000000),
+        hi=route_guide_pb2.Point(latitude=420000000, longitude=-730000000))
+    print("Looking for features between 40, -75 and 42, -73")
 
-  features = stub.ListFeatures(rectangle)
+    features = stub.ListFeatures(rectangle)
 
-  for feature in features:
-    print("Feature called %s at %s" % (feature.name, feature.location))
+    for feature in features:
+        print("Feature called %s at %s" % (feature.name, feature.location))
 
 
 def generate_route(feature_list):
-  for _ in range(0, 10):
-    random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
-    print("Visiting point %s" % random_feature.location)
-    yield random_feature.location
+    for _ in range(0, 10):
+        random_feature = feature_list[random.randint(0, len(feature_list) - 1)]
+        print("Visiting point %s" % random_feature.location)
+        yield random_feature.location
 
 
 def guide_record_route(stub):
-  feature_list = route_guide_resources.read_route_guide_database()
+    feature_list = route_guide_resources.read_route_guide_database()
 
-  route_iterator = generate_route(feature_list)
-  route_summary = stub.RecordRoute(route_iterator)
-  print("Finished trip with %s points " % route_summary.point_count)
-  print("Passed %s features " % route_summary.feature_count)
-  print("Travelled %s meters " % route_summary.distance)
-  print("It took %s seconds " % route_summary.elapsed_time)
+    route_iterator = generate_route(feature_list)
+    route_summary = stub.RecordRoute(route_iterator)
+    print("Finished trip with %s points " % route_summary.point_count)
+    print("Passed %s features " % route_summary.feature_count)
+    print("Travelled %s meters " % route_summary.distance)
+    print("It took %s seconds " % route_summary.elapsed_time)
 
 
 def generate_messages():
-  messages = [
-      make_route_note("First message", 0, 0),
-      make_route_note("Second message", 0, 1),
-      make_route_note("Third message", 1, 0),
-      make_route_note("Fourth message", 0, 0),
-      make_route_note("Fifth message", 1, 0),
-  ]
-  for msg in messages:
-    print("Sending %s at %s" % (msg.message, msg.location))
-    yield msg
+    messages = [
+        make_route_note("First message", 0, 0),
+        make_route_note("Second message", 0, 1),
+        make_route_note("Third message", 1, 0),
+        make_route_note("Fourth message", 0, 0),
+        make_route_note("Fifth message", 1, 0),
+    ]
+    for msg in messages:
+        print("Sending %s at %s" % (msg.message, msg.location))
+        yield msg
 
 
 def guide_route_chat(stub):
-  responses = stub.RouteChat(generate_messages())
-  for response in responses:
-    print("Received message %s at %s" % (response.message, response.location))
+    responses = stub.RouteChat(generate_messages())
+    for response in responses:
+        print("Received message %s at %s" % (response.message,
+                                             response.location))
 
 
 def run():
-  channel = grpc.insecure_channel('localhost:50051')
-  stub = route_guide_pb2_grpc.RouteGuideStub(channel)
-  print("-------------- GetFeature --------------")
-  guide_get_feature(stub)
-  print("-------------- ListFeatures --------------")
-  guide_list_features(stub)
-  print("-------------- RecordRoute --------------")
-  guide_record_route(stub)
-  print("-------------- RouteChat --------------")
-  guide_route_chat(stub)
+    channel = grpc.insecure_channel('localhost:50051')
+    stub = route_guide_pb2_grpc.RouteGuideStub(channel)
+    print("-------------- GetFeature --------------")
+    guide_get_feature(stub)
+    print("-------------- ListFeatures --------------")
+    guide_list_features(stub)
+    print("-------------- RecordRoute --------------")
+    guide_record_route(stub)
+    print("-------------- RouteChat --------------")
+    guide_route_chat(stub)
 
 
 if __name__ == '__main__':
-  run()
+    run()
diff --git a/examples/python/route_guide/route_guide_resources.py b/examples/python/route_guide/route_guide_resources.py
index 0887863..ace85d6 100644
--- a/examples/python/route_guide/route_guide_resources.py
+++ b/examples/python/route_guide/route_guide_resources.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Common resources used in the gRPC route guide example."""
 
 import json
@@ -20,19 +19,19 @@
 
 
 def read_route_guide_database():
-  """Reads the route guide database.
+    """Reads the route guide database.
 
   Returns:
     The full contents of the route guide database as a sequence of
       route_guide_pb2.Features.
   """
-  feature_list = []
-  with open("route_guide_db.json") as route_guide_db_file:
-    for item in json.load(route_guide_db_file):
-      feature = route_guide_pb2.Feature(
-          name=item["name"],
-          location=route_guide_pb2.Point(
-              latitude=item["location"]["latitude"],
-              longitude=item["location"]["longitude"]))
-      feature_list.append(feature)
-  return feature_list
+    feature_list = []
+    with open("route_guide_db.json") as route_guide_db_file:
+        for item in json.load(route_guide_db_file):
+            feature = route_guide_pb2.Feature(
+                name=item["name"],
+                location=route_guide_pb2.Point(
+                    latitude=item["location"]["latitude"],
+                    longitude=item["location"]["longitude"]))
+            feature_list.append(feature)
+    return feature_list
diff --git a/examples/python/route_guide/route_guide_server.py b/examples/python/route_guide/route_guide_server.py
index a0aa5fd..f10008f 100644
--- a/examples/python/route_guide/route_guide_server.py
+++ b/examples/python/route_guide/route_guide_server.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """The Python implementation of the gRPC route guide server."""
 
 from concurrent import futures
@@ -28,98 +27,102 @@
 
 
 def get_feature(feature_db, point):
-  """Returns Feature at given location or None."""
-  for feature in feature_db:
-    if feature.location == point:
-      return feature
-  return None
+    """Returns Feature at given location or None."""
+    for feature in feature_db:
+        if feature.location == point:
+            return feature
+    return None
 
 
 def get_distance(start, end):
-  """Distance between two points."""
-  coord_factor = 10000000.0
-  lat_1 = start.latitude / coord_factor
-  lat_2 = end.latitude / coord_factor
-  lon_1 = start.longitude / coord_factor
-  lon_2 = end.longitude / coord_factor
-  lat_rad_1 = math.radians(lat_1)
-  lat_rad_2 = math.radians(lat_2)
-  delta_lat_rad = math.radians(lat_2 - lat_1)
-  delta_lon_rad = math.radians(lon_2 - lon_1)
+    """Distance between two points."""
+    coord_factor = 10000000.0
+    lat_1 = start.latitude / coord_factor
+    lat_2 = end.latitude / coord_factor
+    lon_1 = start.longitude / coord_factor
+    lon_2 = end.longitude / coord_factor
+    lat_rad_1 = math.radians(lat_1)
+    lat_rad_2 = math.radians(lat_2)
+    delta_lat_rad = math.radians(lat_2 - lat_1)
+    delta_lon_rad = math.radians(lon_2 - lon_1)
 
-  a = (pow(math.sin(delta_lat_rad / 2), 2) +
-       (math.cos(lat_rad_1) * math.cos(lat_rad_2) *
-        pow(math.sin(delta_lon_rad / 2), 2)))
-  c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
-  R = 6371000; # metres
-  return R * c;
+    a = (pow(math.sin(delta_lat_rad / 2), 2) +
+         (math.cos(lat_rad_1) * math.cos(lat_rad_2) * pow(
+             math.sin(delta_lon_rad / 2), 2)))
+    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
+    R = 6371000
+    # metres
+    return R * c
+
 
 class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
-  """Provides methods that implement functionality of route guide server."""
+    """Provides methods that implement functionality of route guide server."""
 
-  def __init__(self):
-    self.db = route_guide_resources.read_route_guide_database()
+    def __init__(self):
+        self.db = route_guide_resources.read_route_guide_database()
 
-  def GetFeature(self, request, context):
-    feature = get_feature(self.db, request)
-    if feature is None:
-      return route_guide_pb2.Feature(name="", location=request)
-    else:
-      return feature
+    def GetFeature(self, request, context):
+        feature = get_feature(self.db, request)
+        if feature is None:
+            return route_guide_pb2.Feature(name="", location=request)
+        else:
+            return feature
 
-  def ListFeatures(self, request, context):
-    left = min(request.lo.longitude, request.hi.longitude)
-    right = max(request.lo.longitude, request.hi.longitude)
-    top = max(request.lo.latitude, request.hi.latitude)
-    bottom = min(request.lo.latitude, request.hi.latitude)
-    for feature in self.db:
-      if (feature.location.longitude >= left and
-          feature.location.longitude <= right and
-          feature.location.latitude >= bottom and
-          feature.location.latitude <= top):
-        yield feature
+    def ListFeatures(self, request, context):
+        left = min(request.lo.longitude, request.hi.longitude)
+        right = max(request.lo.longitude, request.hi.longitude)
+        top = max(request.lo.latitude, request.hi.latitude)
+        bottom = min(request.lo.latitude, request.hi.latitude)
+        for feature in self.db:
+            if (feature.location.longitude >= left and
+                    feature.location.longitude <= right and
+                    feature.location.latitude >= bottom and
+                    feature.location.latitude <= top):
+                yield feature
 
-  def RecordRoute(self, request_iterator, context):
-    point_count = 0
-    feature_count = 0
-    distance = 0.0
-    prev_point = None
+    def RecordRoute(self, request_iterator, context):
+        point_count = 0
+        feature_count = 0
+        distance = 0.0
+        prev_point = None
 
-    start_time = time.time()
-    for point in request_iterator:
-      point_count += 1
-      if get_feature(self.db, point):
-        feature_count += 1
-      if prev_point:
-        distance += get_distance(prev_point, point)
-      prev_point = point
+        start_time = time.time()
+        for point in request_iterator:
+            point_count += 1
+            if get_feature(self.db, point):
+                feature_count += 1
+            if prev_point:
+                distance += get_distance(prev_point, point)
+            prev_point = point
 
-    elapsed_time = time.time() - start_time
-    return route_guide_pb2.RouteSummary(point_count=point_count,
-                                        feature_count=feature_count,
-                                        distance=int(distance),
-                                        elapsed_time=int(elapsed_time))
+        elapsed_time = time.time() - start_time
+        return route_guide_pb2.RouteSummary(
+            point_count=point_count,
+            feature_count=feature_count,
+            distance=int(distance),
+            elapsed_time=int(elapsed_time))
 
-  def RouteChat(self, request_iterator, context):
-    prev_notes = []
-    for new_note in request_iterator:
-      for prev_note in prev_notes:
-        if prev_note.location == new_note.location:
-          yield prev_note
-      prev_notes.append(new_note)
+    def RouteChat(self, request_iterator, context):
+        prev_notes = []
+        for new_note in request_iterator:
+            for prev_note in prev_notes:
+                if prev_note.location == new_note.location:
+                    yield prev_note
+            prev_notes.append(new_note)
 
 
 def serve():
-  server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
-      RouteGuideServicer(), server)
-  server.add_insecure_port('[::]:50051')
-  server.start()
-  try:
-    while True:
-      time.sleep(_ONE_DAY_IN_SECONDS)
-  except KeyboardInterrupt:
-    server.stop(0)
+    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
+        RouteGuideServicer(), server)
+    server.add_insecure_port('[::]:50051')
+    server.start()
+    try:
+        while True:
+            time.sleep(_ONE_DAY_IN_SECONDS)
+    except KeyboardInterrupt:
+        server.stop(0)
+
 
 if __name__ == '__main__':
-  serve()
+    serve()
diff --git a/examples/python/route_guide/run_codegen.py b/examples/python/route_guide/run_codegen.py
index 4b61cf4..8df562d 100644
--- a/examples/python/route_guide/run_codegen.py
+++ b/examples/python/route_guide/run_codegen.py
@@ -11,17 +11,14 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Runs protoc with the gRPC plugin to generate messages and gRPC stubs."""
 
 from grpc_tools import protoc
 
-protoc.main(
-    (
-	'',
-	'-I../../protos',
-	'--python_out=.',
-	'--grpc_python_out=.',
-	'../../protos/route_guide.proto',
-    )
-)
+protoc.main((
+    '',
+    '-I../../protos',
+    '--python_out=.',
+    '--grpc_python_out=.',
+    '../../protos/route_guide.proto',
+))
diff --git a/gRPC-Core.podspec b/gRPC-Core.podspec
index 030fcee..c127660 100644
--- a/gRPC-Core.podspec
+++ b/gRPC-Core.podspec
@@ -22,7 +22,7 @@
 
 Pod::Spec.new do |s|
   s.name     = 'gRPC-Core'
-  version = '1.8.3'
+  version = '1.9.0-dev'
   s.version  = version
   s.summary  = 'Core cross-platform gRPC library, written in C'
   s.homepage = 'https://grpc.io'
@@ -34,6 +34,10 @@
     :tag => "v#{version}",
   }
 
+  # gRPC podspecs depend on fix for https://github.com/CocoaPods/CocoaPods/issues/6024,
+  # which was released in Cocoapods v1.2.0.
+  s.cocoapods_version = '>= 1.2.0'
+
   s.ios.deployment_target = '7.0'
   s.osx.deployment_target = '10.9'
   s.requires_arc = false
@@ -113,7 +117,6 @@
                       'include/grpc/support/avl.h',
                       'include/grpc/support/cmdline.h',
                       'include/grpc/support/cpu.h',
-                      'include/grpc/support/histogram.h',
                       'include/grpc/support/host_port.h',
                       'include/grpc/support/log.h',
                       'include/grpc/support/log_windows.h',
@@ -223,7 +226,6 @@
                       'src/core/lib/support/env_posix.cc',
                       'src/core/lib/support/env_windows.cc',
                       'src/core/lib/support/fork.cc',
-                      'src/core/lib/support/histogram.cc',
                       'src/core/lib/support/host_port.cc',
                       'src/core/lib/support/log.cc',
                       'src/core/lib/support/log_android.cc',
@@ -418,6 +420,9 @@
                       'src/core/lib/slice/slice_hash_table.h',
                       'src/core/lib/slice/slice_internal.h',
                       'src/core/lib/slice/slice_string_helpers.h',
+                      'src/core/lib/support/debug_location.h',
+                      'src/core/lib/support/ref_counted.h',
+                      'src/core/lib/support/ref_counted_ptr.h',
                       'src/core/lib/surface/alarm_internal.h',
                       'src/core/lib/surface/api_trace.h',
                       'src/core/lib/surface/call.h',
@@ -895,6 +900,9 @@
                               'src/core/lib/slice/slice_hash_table.h',
                               'src/core/lib/slice/slice_internal.h',
                               'src/core/lib/slice/slice_string_helpers.h',
+                              'src/core/lib/support/debug_location.h',
+                              'src/core/lib/support/ref_counted.h',
+                              'src/core/lib/support/ref_counted_ptr.h',
                               'src/core/lib/surface/alarm_internal.h',
                               'src/core/lib/surface/api_trace.h',
                               'src/core/lib/surface/call.h',
@@ -955,7 +963,8 @@
     ss.dependency "#{s.name}/Cronet-Interface", version
 
     ss.source_files = 'src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc',
-                      'src/core/ext/transport/cronet/transport/cronet_transport.{cc,h}',
+                      'src/core/ext/transport/cronet/transport/cronet_transport.cc',
+                      'src/core/ext/transport/cronet/transport/cronet_transport.h',
                       'third_party/objective_c/Cronet/bidirectional_stream_c.h'
   end
 
@@ -965,17 +974,112 @@
     ss.dependency "#{s.name}/Interface", version
     ss.dependency "#{s.name}/Implementation", version
 
-    ss.source_files = 'test/core/end2end/cq_verifier.{cc,h}',
-                      'test/core/end2end/end2end_tests.{cc,h}',
-                      'test/core/end2end/end2end_test_utils.cc',
-                      'test/core/end2end/tests/*.{cc,h}',
-                      'test/core/end2end/fixtures/*.h',
-                      'test/core/end2end/data/*.{cc,h}',
-                      'test/core/util/debugger_macros.{cc,h}',
-                      'test/core/util/test_config.{cc,h}',
-                      'test/core/util/port.h',
+    ss.source_files = 'test/core/util/test_config.cc',
+                      'test/core/util/test_config.h',
+                      'test/core/end2end/data/client_certs.cc',
+                      'test/core/end2end/data/server1_cert.cc',
+                      'test/core/end2end/data/server1_key.cc',
+                      'test/core/end2end/data/test_root_cert.cc',
+                      'test/core/security/oauth2_utils.cc',
+                      'test/core/end2end/cq_verifier.cc',
+                      'test/core/end2end/fixtures/http_proxy_fixture.cc',
+                      'test/core/end2end/fixtures/proxy.cc',
+                      'test/core/iomgr/endpoint_tests.cc',
+                      'test/core/util/debugger_macros.cc',
+                      'test/core/util/grpc_profiler.cc',
+                      'test/core/util/histogram.cc',
+                      'test/core/util/memory_counters.cc',
+                      'test/core/util/mock_endpoint.cc',
+                      'test/core/util/parse_hexstring.cc',
+                      'test/core/util/passthru_endpoint.cc',
                       'test/core/util/port.cc',
-                      'test/core/util/port_server_client.{cc,h}'
+                      'test/core/util/port_isolated_runtime_environment.cc',
+                      'test/core/util/port_server_client.cc',
+                      'test/core/util/slice_splitter.cc',
+                      'test/core/util/tracer_util.cc',
+                      'test/core/util/trickle_endpoint.cc',
+                      'test/core/end2end/data/ssl_test_data.h',
+                      'test/core/security/oauth2_utils.h',
+                      'test/core/end2end/cq_verifier.h',
+                      'test/core/end2end/fixtures/http_proxy_fixture.h',
+                      'test/core/end2end/fixtures/proxy.h',
+                      'test/core/iomgr/endpoint_tests.h',
+                      'test/core/util/debugger_macros.h',
+                      'test/core/util/grpc_profiler.h',
+                      'test/core/util/histogram.h',
+                      'test/core/util/memory_counters.h',
+                      'test/core/util/mock_endpoint.h',
+                      'test/core/util/parse_hexstring.h',
+                      'test/core/util/passthru_endpoint.h',
+                      'test/core/util/port.h',
+                      'test/core/util/port_server_client.h',
+                      'test/core/util/slice_splitter.h',
+                      'test/core/util/tracer_util.h',
+                      'test/core/util/trickle_endpoint.h',
+                      'test/core/end2end/end2end_tests.cc',
+                      'test/core/end2end/end2end_test_utils.cc',
+                      'test/core/end2end/tests/authority_not_supported.cc',
+                      'test/core/end2end/tests/bad_hostname.cc',
+                      'test/core/end2end/tests/bad_ping.cc',
+                      'test/core/end2end/tests/binary_metadata.cc',
+                      'test/core/end2end/tests/call_creds.cc',
+                      'test/core/end2end/tests/cancel_after_accept.cc',
+                      'test/core/end2end/tests/cancel_after_client_done.cc',
+                      'test/core/end2end/tests/cancel_after_invoke.cc',
+                      'test/core/end2end/tests/cancel_after_round_trip.cc',
+                      'test/core/end2end/tests/cancel_before_invoke.cc',
+                      'test/core/end2end/tests/cancel_in_a_vacuum.cc',
+                      'test/core/end2end/tests/cancel_with_status.cc',
+                      'test/core/end2end/tests/compressed_payload.cc',
+                      'test/core/end2end/tests/connectivity.cc',
+                      'test/core/end2end/tests/default_host.cc',
+                      'test/core/end2end/tests/disappearing_server.cc',
+                      'test/core/end2end/tests/empty_batch.cc',
+                      'test/core/end2end/tests/filter_call_init_fails.cc',
+                      'test/core/end2end/tests/filter_causes_close.cc',
+                      'test/core/end2end/tests/filter_latency.cc',
+                      'test/core/end2end/tests/filter_status_code.cc',
+                      'test/core/end2end/tests/graceful_server_shutdown.cc',
+                      'test/core/end2end/tests/high_initial_seqno.cc',
+                      'test/core/end2end/tests/hpack_size.cc',
+                      'test/core/end2end/tests/idempotent_request.cc',
+                      'test/core/end2end/tests/invoke_large_request.cc',
+                      'test/core/end2end/tests/keepalive_timeout.cc',
+                      'test/core/end2end/tests/large_metadata.cc',
+                      'test/core/end2end/tests/load_reporting_hook.cc',
+                      'test/core/end2end/tests/max_concurrent_streams.cc',
+                      'test/core/end2end/tests/max_connection_age.cc',
+                      'test/core/end2end/tests/max_connection_idle.cc',
+                      'test/core/end2end/tests/max_message_length.cc',
+                      'test/core/end2end/tests/negative_deadline.cc',
+                      'test/core/end2end/tests/network_status_change.cc',
+                      'test/core/end2end/tests/no_logging.cc',
+                      'test/core/end2end/tests/no_op.cc',
+                      'test/core/end2end/tests/payload.cc',
+                      'test/core/end2end/tests/ping.cc',
+                      'test/core/end2end/tests/ping_pong_streaming.cc',
+                      'test/core/end2end/tests/proxy_auth.cc',
+                      'test/core/end2end/tests/registered_call.cc',
+                      'test/core/end2end/tests/request_with_flags.cc',
+                      'test/core/end2end/tests/request_with_payload.cc',
+                      'test/core/end2end/tests/resource_quota_server.cc',
+                      'test/core/end2end/tests/server_finishes_request.cc',
+                      'test/core/end2end/tests/shutdown_finishes_calls.cc',
+                      'test/core/end2end/tests/shutdown_finishes_tags.cc',
+                      'test/core/end2end/tests/simple_cacheable_request.cc',
+                      'test/core/end2end/tests/simple_delayed_request.cc',
+                      'test/core/end2end/tests/simple_metadata.cc',
+                      'test/core/end2end/tests/simple_request.cc',
+                      'test/core/end2end/tests/stream_compression_compressed_payload.cc',
+                      'test/core/end2end/tests/stream_compression_payload.cc',
+                      'test/core/end2end/tests/stream_compression_ping_pong_streaming.cc',
+                      'test/core/end2end/tests/streaming_error_response.cc',
+                      'test/core/end2end/tests/trailing_metadata.cc',
+                      'test/core/end2end/tests/workaround_cronet_compression.cc',
+                      'test/core/end2end/tests/write_buffering.cc',
+                      'test/core/end2end/tests/write_buffering_at_end.cc',
+                      'test/core/end2end/tests/cancel_test_helpers.h',
+                      'test/core/end2end/end2end_tests.h'
   end
 
   # TODO (mxyan): Instead of this hack, add include path "third_party" to C core's include path?
diff --git a/gRPC-ProtoRPC.podspec b/gRPC-ProtoRPC.podspec
index d8bfae0..cb1c548 100644
--- a/gRPC-ProtoRPC.podspec
+++ b/gRPC-ProtoRPC.podspec
@@ -21,7 +21,7 @@
 
 Pod::Spec.new do |s|
   s.name     = 'gRPC-ProtoRPC'
-  version = '1.8.3'
+  version = '1.9.0-dev'
   s.version  = version
   s.summary  = 'RPC library for Protocol Buffers, based on gRPC'
   s.homepage = 'https://grpc.io'
diff --git a/gRPC-RxLibrary.podspec b/gRPC-RxLibrary.podspec
index d7182ab..0f9abb6 100644
--- a/gRPC-RxLibrary.podspec
+++ b/gRPC-RxLibrary.podspec
@@ -21,7 +21,7 @@
 
 Pod::Spec.new do |s|
   s.name     = 'gRPC-RxLibrary'
-  version = '1.8.3'
+  version = '1.9.0-dev'
   s.version  = version
   s.summary  = 'Reactive Extensions library for iOS/OSX.'
   s.homepage = 'https://grpc.io'
diff --git a/gRPC.podspec b/gRPC.podspec
index a784ea8..1f3a0a9 100644
--- a/gRPC.podspec
+++ b/gRPC.podspec
@@ -20,7 +20,7 @@
 
 Pod::Spec.new do |s|
   s.name     = 'gRPC'
-  version = '1.8.3'
+  version = '1.9.0-dev'
   s.version  = version
   s.summary  = 'gRPC client library for iOS/OSX'
   s.homepage = 'https://grpc.io'
diff --git a/grpc.def b/grpc.def
index 07c0b3e..d4a18cc 100644
--- a/grpc.def
+++ b/grpc.def
@@ -200,21 +200,6 @@
     gpr_cmdline_usage_string
     gpr_cpu_num_cores
     gpr_cpu_current_cpu
-    gpr_histogram_create
-    gpr_histogram_destroy
-    gpr_histogram_add
-    gpr_histogram_merge
-    gpr_histogram_percentile
-    gpr_histogram_mean
-    gpr_histogram_stddev
-    gpr_histogram_variance
-    gpr_histogram_maximum
-    gpr_histogram_minimum
-    gpr_histogram_count
-    gpr_histogram_sum
-    gpr_histogram_sum_of_squares
-    gpr_histogram_get_contents
-    gpr_histogram_merge_contents
     gpr_join_host_port
     gpr_split_host_port
     gpr_log_severity_string
diff --git a/grpc.gemspec b/grpc.gemspec
index 7a850a5..d185995 100644
--- a/grpc.gemspec
+++ b/grpc.gemspec
@@ -52,7 +52,6 @@
   s.files += %w( include/grpc/support/avl.h )
   s.files += %w( include/grpc/support/cmdline.h )
   s.files += %w( include/grpc/support/cpu.h )
-  s.files += %w( include/grpc/support/histogram.h )
   s.files += %w( include/grpc/support/host_port.h )
   s.files += %w( include/grpc/support/log.h )
   s.files += %w( include/grpc/support/log_windows.h )
@@ -117,7 +116,6 @@
   s.files += %w( src/core/lib/support/env_posix.cc )
   s.files += %w( src/core/lib/support/env_windows.cc )
   s.files += %w( src/core/lib/support/fork.cc )
-  s.files += %w( src/core/lib/support/histogram.cc )
   s.files += %w( src/core/lib/support/host_port.cc )
   s.files += %w( src/core/lib/support/log.cc )
   s.files += %w( src/core/lib/support/log_android.cc )
@@ -348,6 +346,9 @@
   s.files += %w( src/core/lib/slice/slice_hash_table.h )
   s.files += %w( src/core/lib/slice/slice_internal.h )
   s.files += %w( src/core/lib/slice/slice_string_helpers.h )
+  s.files += %w( src/core/lib/support/debug_location.h )
+  s.files += %w( src/core/lib/support/ref_counted.h )
+  s.files += %w( src/core/lib/support/ref_counted_ptr.h )
   s.files += %w( src/core/lib/surface/alarm_internal.h )
   s.files += %w( src/core/lib/surface/api_trace.h )
   s.files += %w( src/core/lib/surface/call.h )
diff --git a/grpc.gyp b/grpc.gyp
index f2033b4..9f6cd52 100644
--- a/grpc.gyp
+++ b/grpc.gyp
@@ -173,7 +173,6 @@
         'src/core/lib/support/env_posix.cc',
         'src/core/lib/support/env_windows.cc',
         'src/core/lib/support/fork.cc',
-        'src/core/lib/support/histogram.cc',
         'src/core/lib/support/host_port.cc',
         'src/core/lib/support/log.cc',
         'src/core/lib/support/log_android.cc',
@@ -506,11 +505,13 @@
         'test/core/iomgr/endpoint_tests.cc',
         'test/core/util/debugger_macros.cc',
         'test/core/util/grpc_profiler.cc',
+        'test/core/util/histogram.cc',
         'test/core/util/memory_counters.cc',
         'test/core/util/mock_endpoint.cc',
         'test/core/util/parse_hexstring.cc',
         'test/core/util/passthru_endpoint.cc',
         'test/core/util/port.cc',
+        'test/core/util/port_isolated_runtime_environment.cc',
         'test/core/util/port_server_client.cc',
         'test/core/util/slice_splitter.cc',
         'test/core/util/tracer_util.cc',
@@ -716,11 +717,13 @@
         'test/core/iomgr/endpoint_tests.cc',
         'test/core/util/debugger_macros.cc',
         'test/core/util/grpc_profiler.cc',
+        'test/core/util/histogram.cc',
         'test/core/util/memory_counters.cc',
         'test/core/util/mock_endpoint.cc',
         'test/core/util/parse_hexstring.cc',
         'test/core/util/passthru_endpoint.cc',
         'test/core/util/port.cc',
+        'test/core/util/port_isolated_runtime_environment.cc',
         'test/core/util/port_server_client.cc',
         'test/core/util/slice_splitter.cc',
         'test/core/util/tracer_util.cc',
@@ -2375,6 +2378,7 @@
         'test/core/end2end/tests/filter_call_init_fails.cc',
         'test/core/end2end/tests/filter_causes_close.cc',
         'test/core/end2end/tests/filter_latency.cc',
+        'test/core/end2end/tests/filter_status_code.cc',
         'test/core/end2end/tests/graceful_server_shutdown.cc',
         'test/core/end2end/tests/high_initial_seqno.cc',
         'test/core/end2end/tests/hpack_size.cc',
@@ -2447,6 +2451,7 @@
         'test/core/end2end/tests/filter_call_init_fails.cc',
         'test/core/end2end/tests/filter_causes_close.cc',
         'test/core/end2end/tests/filter_latency.cc',
+        'test/core/end2end/tests/filter_status_code.cc',
         'test/core/end2end/tests/graceful_server_shutdown.cc',
         'test/core/end2end/tests/high_initial_seqno.cc',
         'test/core/end2end/tests/hpack_size.cc',
diff --git a/include/grpc++/generic/async_generic_service.h b/include/grpc++/generic/async_generic_service.h
index cd9a65e..b1ea4f3 100644
--- a/include/grpc++/generic/async_generic_service.h
+++ b/include/grpc++/generic/async_generic_service.h
@@ -42,6 +42,23 @@
   grpc::string host_;
 };
 
+// A generic service at the server side accepts all RPC methods and hosts. It is
+// typically used in proxies. The generic service can be registered to a server
+// which also has other services.
+// Sample usage:
+//   ServerBuilder builder;
+//   auto cq = builder.AddCompletionQueue();
+//   AsyncGenericService generic_service;
+//   builder.RegisterAsyncGeneicService(&generic_service);
+//   auto server = builder.BuildAndStart();
+//
+//   // request a new call
+//   GenericServerContext context;
+//   GenericAsyncReaderWriter stream;
+//   generic_service.RequestCall(&context, &stream, cq.get(), cq.get(), tag);
+//
+// When tag is retrieved from cq->Next(), context.method() can be used to look
+// at the method and the RPC can be handled accordingly.
 class AsyncGenericService final {
  public:
   AsyncGenericService() : server_(nullptr) {}
diff --git a/include/grpc++/impl/codegen/async_unary_call.h b/include/grpc++/impl/codegen/async_unary_call.h
index b9ea5fd..fb57300 100644
--- a/include/grpc++/impl/codegen/async_unary_call.h
+++ b/include/grpc++/impl/codegen/async_unary_call.h
@@ -103,6 +103,13 @@
     assert(size == sizeof(ClientAsyncResponseReader));
   }
 
+  // This operator should never be called as the memory should be freed as part
+  // of the arena destruction. It only exists to provide a matching operator
+  // delete to the operator new so that some compilers will not complain (see
+  // https://github.com/grpc/grpc/issues/11301) Note at the time of adding this
+  // there are no tests catching the compiler warning.
+  static void operator delete(void*, void*) { assert(0); }
+
   void StartCall() override {
     assert(!started_);
     started_ = true;
diff --git a/include/grpc++/impl/codegen/call.h b/include/grpc++/impl/codegen/call.h
index af2c2b5..e581049 100644
--- a/include/grpc++/impl/codegen/call.h
+++ b/include/grpc++/impl/codegen/call.h
@@ -558,10 +558,12 @@
 
 class CallOpClientRecvStatus {
  public:
-  CallOpClientRecvStatus() : recv_status_(nullptr) {}
+  CallOpClientRecvStatus()
+      : recv_status_(nullptr), debug_error_string_(nullptr) {}
 
   void ClientRecvStatus(ClientContext* context, Status* status) {
-    metadata_map_ = &context->trailing_metadata_;
+    client_context_ = context;
+    metadata_map_ = &client_context_->trailing_metadata_;
     recv_status_ = status;
     error_message_ = g_core_codegen_interface->grpc_empty_slice();
   }
@@ -574,7 +576,7 @@
     op->data.recv_status_on_client.trailing_metadata = metadata_map_->arr();
     op->data.recv_status_on_client.status = &status_code_;
     op->data.recv_status_on_client.status_details = &error_message_;
-    op->data.recv_status_on_client.error_string = nullptr;
+    op->data.recv_status_on_client.error_string = &debug_error_string_;
     op->flags = 0;
     op->reserved = NULL;
   }
@@ -592,13 +594,20 @@
                            grpc::string(GRPC_SLICE_START_PTR(error_message_),
                                         GRPC_SLICE_END_PTR(error_message_)),
                            binary_error_details);
+    client_context_->set_debug_error_string(
+        debug_error_string_ != nullptr ? debug_error_string_ : "");
     g_core_codegen_interface->grpc_slice_unref(error_message_);
+    if (debug_error_string_ != nullptr) {
+      g_core_codegen_interface->gpr_free((void*)debug_error_string_);
+    }
     recv_status_ = nullptr;
   }
 
  private:
+  ClientContext* client_context_;
   MetadataMap* metadata_map_;
   Status* recv_status_;
+  const char* debug_error_string_;
   grpc_status_code status_code_;
   grpc_slice error_message_;
 };
diff --git a/include/grpc++/impl/codegen/client_context.h b/include/grpc++/impl/codegen/client_context.h
index 22b581c..61d97ce 100644
--- a/include/grpc++/impl/codegen/client_context.h
+++ b/include/grpc++/impl/codegen/client_context.h
@@ -348,6 +348,13 @@
   /// Applications never need to call this method.
   grpc_call* c_call() { return call_; }
 
+  /// EXPERIMENTAL debugging API
+  ///
+  /// if status is not ok() for an RPC, this will return a detailed string
+  /// of the gRPC Core error that led to the failure. It should not be relied
+  /// upon for anything other than gaining more debug data in failure cases.
+  grpc::string debug_error_string() const { return debug_error_string_; }
+
  private:
   // Disallow copy and assign.
   ClientContext(const ClientContext&);
@@ -374,6 +381,11 @@
   template <class InputMessage, class OutputMessage>
   friend class ::grpc::internal::BlockingUnaryCallImpl;
 
+  // Used by friend class CallOpClientRecvStatus
+  void set_debug_error_string(const grpc::string& debug_error_string) {
+    debug_error_string_ = debug_error_string;
+  }
+
   grpc_call* call() const { return call_; }
   void set_call(grpc_call* call, const std::shared_ptr<Channel>& channel);
 
@@ -412,6 +424,8 @@
 
   grpc_compression_algorithm compression_algorithm_;
   bool initial_metadata_corked_;
+
+  grpc::string debug_error_string_;
 };
 
 }  // namespace grpc
diff --git a/include/grpc++/impl/codegen/client_unary_call.h b/include/grpc++/impl/codegen/client_unary_call.h
index 256dd85..543e54b 100644
--- a/include/grpc++/impl/codegen/client_unary_call.h
+++ b/include/grpc++/impl/codegen/client_unary_call.h
@@ -65,6 +65,7 @@
                             context->initial_metadata_flags());
     ops.RecvInitialMetadata(context);
     ops.RecvMessage(result);
+    ops.AllowNoMessage();
     ops.ClientSendClose();
     ops.ClientRecvStatus(context, &status_);
     call.PerformOps(&ops);
diff --git a/include/grpc++/impl/codegen/core_codegen_interface.h b/include/grpc++/impl/codegen/core_codegen_interface.h
index 1949cda..d7ad7a4 100644
--- a/include/grpc++/impl/codegen/core_codegen_interface.h
+++ b/include/grpc++/impl/codegen/core_codegen_interface.h
@@ -25,10 +25,6 @@
 #include <grpc/impl/codegen/grpc_types.h>
 #include <grpc/impl/codegen/sync.h>
 
-extern "C" {
-struct grpc_byte_buffer;
-}
-
 namespace grpc {
 
 /// Interface between the codegen library and the minimal subset of core
diff --git a/include/grpc++/support/channel_arguments.h b/include/grpc++/support/channel_arguments.h
index 9dc505f..c9879d8 100644
--- a/include/grpc++/support/channel_arguments.h
+++ b/include/grpc++/support/channel_arguments.h
@@ -122,7 +122,7 @@
   /// Default pointer argument operations.
   struct PointerVtableMembers {
     static void* Copy(void* in) { return in; }
-    static void Destroy(grpc_exec_ctx* exec_ctx, void* in) {}
+    static void Destroy(void* in) {}
     static int Compare(void* a, void* b) {
       if (a < b) return -1;
       if (a > b) return 1;
diff --git a/include/grpc/impl/codegen/grpc_types.h b/include/grpc/impl/codegen/grpc_types.h
index 77844aa..fcbc8ac 100644
--- a/include/grpc/impl/codegen/grpc_types.h
+++ b/include/grpc/impl/codegen/grpc_types.h
@@ -85,7 +85,7 @@
 
 typedef struct grpc_arg_pointer_vtable {
   void* (*copy)(void* p);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, void* p);
+  void (*destroy)(void* p);
   int (*cmp)(void* p, void* q);
 } grpc_arg_pointer_vtable;
 
diff --git a/include/grpc/impl/codegen/port_platform.h b/include/grpc/impl/codegen/port_platform.h
index 1906886..f4bc3eb 100644
--- a/include/grpc/impl/codegen/port_platform.h
+++ b/include/grpc/impl/codegen/port_platform.h
@@ -173,6 +173,7 @@
 #endif /* _LP64 */
 #ifdef __GLIBC__
 #define GPR_POSIX_CRASH_HANDLER 1
+#define GPR_LINUX_PTHREAD_NAME 1
 #else /* musl libc */
 #define GPR_MUSL_LIBC_COMPAT 1
 #endif
@@ -195,6 +196,7 @@
 #else /* __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_7 */
 #define GPR_CPU_POSIX 1
 #define GPR_GCC_TLS 1
+#define GPR_APPLE_PTHREAD_NAME 1
 #endif
 #else /* __MAC_OS_X_VERSION_MIN_REQUIRED */
 #define GPR_CPU_POSIX 1
@@ -303,20 +305,23 @@
  * This is primarily because of linker problems and toolchain misconfiguration:
  * TLS isn't supported until NDK r12b per
  * https://developer.android.com/ndk/downloads/revision_history.html
+ * TLS also does not work with Android NDK if GCC is being used as the compiler
+ * instead of Clang.
  * Since NDK r16, `__NDK_MAJOR__` and `__NDK_MINOR__` are defined in
  * <android/ndk-version.h>. For NDK < r16, users should define these macros,
  * e.g. `-D__NDK_MAJOR__=11 -D__NKD_MINOR__=0` for NDK r11. */
-#if defined(__ANDROID__) && defined(__clang__) && defined(GPR_GCC_TLS)
+#if defined(__ANDROID__) && defined(GPR_GCC_TLS)
 #if __has_include(<android/ndk-version.h>)
 #include <android/ndk-version.h>
 #endif /* __has_include(<android/ndk-version.h>) */
-#if defined(__ANDROID__) && defined(__clang__) && defined(__NDK_MAJOR__) && \
-    defined(__NDK_MINOR__) &&                                               \
-    ((__NDK_MAJOR__ < 12) || ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))
+#if (defined(__clang__) && defined(__NDK_MAJOR__) && defined(__NDK_MINOR__) && \
+     ((__NDK_MAJOR__ < 12) ||                                                  \
+      ((__NDK_MAJOR__ == 12) && (__NDK_MINOR__ < 1)))) ||                      \
+    (defined(__GNUC__) && !defined(__clang__))
 #undef GPR_GCC_TLS
 #define GPR_PTHREAD_TLS 1
 #endif
-#endif /*defined(__ANDROID__) && defined(__clang__) && defined(GPR_GCC_TLS) */
+#endif /*defined(__ANDROID__) && defined(GPR_GCC_TLS) */
 
 #if defined(__has_include)
 #if __has_include(<atomic>)
diff --git a/include/grpc/impl/codegen/slice.h b/include/grpc/impl/codegen/slice.h
index 11997fc..ad026b6 100644
--- a/include/grpc/impl/codegen/slice.h
+++ b/include/grpc/impl/codegen/slice.h
@@ -43,7 +43,7 @@
 
 typedef struct grpc_slice_refcount_vtable {
   void (*ref)(void*);
-  void (*unref)(grpc_exec_ctx* exec_ctx, void*);
+  void (*unref)(void*);
   int (*eq)(grpc_slice a, grpc_slice b);
   uint32_t (*hash)(grpc_slice slice);
 } grpc_slice_refcount_vtable;
diff --git a/include/grpc/module.modulemap b/include/grpc/module.modulemap
index 0faa448..67136cb 100644
--- a/include/grpc/module.modulemap
+++ b/include/grpc/module.modulemap
@@ -7,7 +7,6 @@
   header "support/avl.h"
   header "support/cmdline.h"
   header "support/cpu.h"
-  header "support/histogram.h"
   header "support/host_port.h"
   header "support/log.h"
   header "support/log_windows.h"
diff --git a/include/grpc/slice_buffer.h b/include/grpc/slice_buffer.h
index 6510c15..30833d0 100644
--- a/include/grpc/slice_buffer.h
+++ b/include/grpc/slice_buffer.h
@@ -67,8 +67,7 @@
                                                 size_t n,
                                                 grpc_slice_buffer* dst);
 /** move the first n bytes of src into dst (copying them) */
-GPRAPI void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx* exec_ctx,
-                                                     grpc_slice_buffer* src,
+GPRAPI void grpc_slice_buffer_move_first_into_buffer(grpc_slice_buffer* src,
                                                      size_t n, void* dst);
 /** take the first slice in the slice buffer */
 GPRAPI grpc_slice grpc_slice_buffer_take_first(grpc_slice_buffer* src);
diff --git a/include/grpc/support/histogram.h b/include/grpc/support/histogram.h
deleted file mode 100644
index d2794d8..0000000
--- a/include/grpc/support/histogram.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#ifndef GRPC_SUPPORT_HISTOGRAM_H
-#define GRPC_SUPPORT_HISTOGRAM_H
-
-#include <grpc/support/port_platform.h>
-#include <stddef.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct gpr_histogram gpr_histogram;
-
-GPRAPI gpr_histogram* gpr_histogram_create(double resolution,
-                                           double max_bucket_start);
-GPRAPI void gpr_histogram_destroy(gpr_histogram* h);
-GPRAPI void gpr_histogram_add(gpr_histogram* h, double x);
-
-/** The following merges the second histogram into the first. It only works
-   if they have the same buckets and resolution. Returns 0 on failure, 1
-   on success */
-GPRAPI int gpr_histogram_merge(gpr_histogram* dst, const gpr_histogram* src);
-
-GPRAPI double gpr_histogram_percentile(gpr_histogram* histogram,
-                                       double percentile);
-GPRAPI double gpr_histogram_mean(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_stddev(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_variance(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_maximum(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_minimum(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_count(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_sum(gpr_histogram* histogram);
-GPRAPI double gpr_histogram_sum_of_squares(gpr_histogram* histogram);
-
-GPRAPI const uint32_t* gpr_histogram_get_contents(gpr_histogram* histogram,
-                                                  size_t* count);
-GPRAPI void gpr_histogram_merge_contents(gpr_histogram* histogram,
-                                         const uint32_t* data,
-                                         size_t data_count, double min_seen,
-                                         double max_seen, double sum,
-                                         double sum_of_squares, double count);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* GRPC_SUPPORT_HISTOGRAM_H */
diff --git a/include/grpc/support/log.h b/include/grpc/support/log.h
index 9cce4b1..a8371cb 100644
--- a/include/grpc/support/log.h
+++ b/include/grpc/support/log.h
@@ -73,12 +73,14 @@
 /** Log overrides: applications can use this API to intercept logging calls
    and use their own implementations */
 
-typedef struct {
+struct gpr_log_func_args {
   const char* file;
   int line;
   gpr_log_severity severity;
   const char* message;
-} gpr_log_func_args;
+};
+
+typedef struct gpr_log_func_args gpr_log_func_args;
 
 typedef void (*gpr_log_func)(gpr_log_func_args* args);
 GPRAPI void gpr_set_log_function(gpr_log_func func);
diff --git a/include/grpc/support/thd.h b/include/grpc/support/thd.h
index 225d9d6..e9444e8 100644
--- a/include/grpc/support/thd.h
+++ b/include/grpc/support/thd.h
@@ -42,9 +42,12 @@
 
 /** Create a new thread running (*thd_body)(arg) and place its thread identifier
    in *t, and return true.  If there are insufficient resources, return false.
+   thd_name is the name of the thread for identification purposes on platforms
+   that support thread naming.
    If options==NULL, default options are used.
    The thread is immediately runnable, and exits when (*thd_body)() returns.  */
-GPRAPI int gpr_thd_new(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg,
+GPRAPI int gpr_thd_new(gpr_thd_id* t, const char* thd_name,
+                       void (*thd_body)(void* arg), void* arg,
                        const gpr_thd_options* options);
 
 /** Return a gpr_thd_options struct with all fields set to defaults. */
diff --git a/include/grpc/support/tls.h b/include/grpc/support/tls.h
index 8519a83..4c9e79b 100644
--- a/include/grpc/support/tls.h
+++ b/include/grpc/support/tls.h
@@ -32,6 +32,12 @@
      GPR_TLS_DECL(foo);
    Thread locals always have static scope.
 
+   Declaring a thread local class variable 'foo':
+     GPR_TLS_CLASS_DECL(foo);
+
+   Defining the thread local class variable:
+     GPR_TLS_CLASS_DEF(foo);
+
    Initializing a thread local (must be done at library initialization
    time):
      gpr_tls_init(&foo);
diff --git a/include/grpc/support/tls_gcc.h b/include/grpc/support/tls_gcc.h
index 019acdf..b44f0f1 100644
--- a/include/grpc/support/tls_gcc.h
+++ b/include/grpc/support/tls_gcc.h
@@ -26,44 +26,6 @@
 /** Thread local storage based on gcc compiler primitives.
    #include tls.h to use this - and see that file for documentation */
 
-#ifndef NDEBUG
-
-struct gpr_gcc_thread_local {
-  intptr_t value;
-  bool* inited;
-};
-
-#define GPR_TLS_DECL(name)           \
-  static bool name##_inited = false; \
-  static __thread struct gpr_gcc_thread_local name = {0, &(name##_inited)}
-
-#define gpr_tls_init(tls)                  \
-  do {                                     \
-    GPR_ASSERT(*((tls)->inited) == false); \
-    *((tls)->inited) = true;               \
-  } while (0)
-
-/** It is allowed to call gpr_tls_init after gpr_tls_destroy is called. */
-#define gpr_tls_destroy(tls)      \
-  do {                            \
-    GPR_ASSERT(*((tls)->inited)); \
-    *((tls)->inited) = false;     \
-  } while (0)
-
-#define gpr_tls_set(tls, new_value) \
-  do {                              \
-    GPR_ASSERT(*((tls)->inited));   \
-    (tls)->value = (new_value);     \
-  } while (0)
-
-#define gpr_tls_get(tls)          \
-  ({                              \
-    GPR_ASSERT(*((tls)->inited)); \
-    (tls)->value;                 \
-  })
-
-#else /* NDEBUG */
-
 struct gpr_gcc_thread_local {
   intptr_t value;
 };
@@ -71,6 +33,11 @@
 #define GPR_TLS_DECL(name) \
   static __thread struct gpr_gcc_thread_local name = {0}
 
+#define GPR_TLS_CLASS_DECL(name) \
+  static __thread struct gpr_gcc_thread_local name
+
+#define GPR_TLS_CLASS_DEF(name) __thread struct gpr_gcc_thread_local name = {0}
+
 #define gpr_tls_init(tls) \
   do {                    \
   } while (0)
@@ -80,6 +47,4 @@
 #define gpr_tls_set(tls, new_value) (((tls)->value) = (new_value))
 #define gpr_tls_get(tls) ((tls)->value)
 
-#endif /* NDEBUG */
-
 #endif /* GRPC_SUPPORT_TLS_GCC_H */
diff --git a/include/grpc/support/tls_msvc.h b/include/grpc/support/tls_msvc.h
index e5f2205..68a411f 100644
--- a/include/grpc/support/tls_msvc.h
+++ b/include/grpc/support/tls_msvc.h
@@ -26,9 +26,18 @@
   intptr_t value;
 };
 
+/** Use GPR_TLS_DECL to declare tls static variables outside a class */
 #define GPR_TLS_DECL(name) \
   static __declspec(thread) struct gpr_msvc_thread_local name = {0}
 
+/** Use GPR_TLS_CLASS_DECL to declare tls static variable members of a class.
+ *  GPR_TLS_CLASS_DEF needs to be called to define this member. */
+#define GPR_TLS_CLASS_DECL(name) \
+  static __declspec(thread) struct gpr_msvc_thread_local name
+
+#define GPR_TLS_CLASS_DEF(name) \
+  __declspec(thread) struct gpr_msvc_thread_local name = {0}
+
 #define gpr_tls_init(tls) \
   do {                    \
   } while (0)
diff --git a/include/grpc/support/tls_pthread.h b/include/grpc/support/tls_pthread.h
index fb0edd8..249c8b1 100644
--- a/include/grpc/support/tls_pthread.h
+++ b/include/grpc/support/tls_pthread.h
@@ -29,8 +29,17 @@
   pthread_key_t key;
 };
 
+/** Use GPR_TLS_DECL to declare tls static variables outside a class */
 #define GPR_TLS_DECL(name) static struct gpr_pthread_thread_local name = {0}
 
+/** Use GPR_TLS_CLASS_DECL to declare tls static variable members of a class.
+ *  GPR_TLS_CLASS_DEF needs to be called to define this member. */
+#define GPR_TLS_CLASS_DECL(name) static struct gpr_pthread_thread_local name
+
+/** Use GPR_TLS_CLASS_DEF to declare tls static variable members of a class.
+ *  GPR_TLS_CLASS_DEF needs to be called to define this member. */
+#define GPR_TLS_CLASS_DEF(name) struct gpr_pthread_thread_local name = {0}
+
 #define gpr_tls_init(tls) GPR_ASSERT(0 == pthread_key_create(&(tls)->key, NULL))
 #define gpr_tls_destroy(tls) pthread_key_delete((tls)->key)
 #define gpr_tls_get(tls) ((intptr_t)pthread_getspecific((tls)->key))
diff --git a/package.xml b/package.xml
index 67dff8f..b4d8c88 100644
--- a/package.xml
+++ b/package.xml
@@ -13,8 +13,8 @@
  <date>2017-08-24</date>
  <time>16:06:07</time>
  <version>
-  <release>1.8.3</release>
-  <api>1.8.3</api>
+  <release>1.9.0dev</release>
+  <api>1.9.0dev</api>
  </version>
  <stability>
   <release>beta</release>
@@ -64,7 +64,6 @@
     <file baseinstalldir="/" name="include/grpc/support/avl.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/support/cmdline.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/support/cpu.h" role="src" />
-    <file baseinstalldir="/" name="include/grpc/support/histogram.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/support/host_port.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/support/log.h" role="src" />
     <file baseinstalldir="/" name="include/grpc/support/log_windows.h" role="src" />
@@ -129,7 +128,6 @@
     <file baseinstalldir="/" name="src/core/lib/support/env_posix.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/support/env_windows.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/support/fork.cc" role="src" />
-    <file baseinstalldir="/" name="src/core/lib/support/histogram.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/support/host_port.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/support/log.cc" role="src" />
     <file baseinstalldir="/" name="src/core/lib/support/log_android.cc" role="src" />
@@ -360,6 +358,9 @@
     <file baseinstalldir="/" name="src/core/lib/slice/slice_hash_table.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/slice/slice_internal.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/slice/slice_string_helpers.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/support/debug_location.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/support/ref_counted.h" role="src" />
+    <file baseinstalldir="/" name="src/core/lib/support/ref_counted_ptr.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/surface/alarm_internal.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/surface/api_trace.h" role="src" />
     <file baseinstalldir="/" name="src/core/lib/surface/call.h" role="src" />
diff --git a/setup.py b/setup.py
index 821fda5..73af9eb 100644
--- a/setup.py
+++ b/setup.py
@@ -181,6 +181,7 @@
   pymodinit_type = 'PyObject*' if PY3 else 'void'
   pymodinit = '__attribute__((visibility ("default"))) {}'.format(pymodinit_type)
   DEFINE_MACROS += (('PyMODINIT_FUNC', pymodinit),)
+  DEFINE_MACROS += (('GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK', 1),)
 
 # By default, Python3 distutils enforces compatibility of
 # c plugins (.so files) with the OSX version Python3 was built with.
diff --git a/src/compiler/cpp_generator.cc b/src/compiler/cpp_generator.cc
index dec8cd0..f35bfd9 100644
--- a/src/compiler/cpp_generator.cc
+++ b/src/compiler/cpp_generator.cc
@@ -1383,6 +1383,7 @@
                  "std::unique_ptr< $ns$$Service$::Stub> $ns$$Service$::NewStub("
                  "const std::shared_ptr< ::grpc::ChannelInterface>& channel, "
                  "const ::grpc::StubOptions& options) {\n"
+                 "  (void)options;\n"
                  "  std::unique_ptr< $ns$$Service$::Stub> stub(new "
                  "$ns$$Service$::Stub(channel));\n"
                  "  return stub;\n"
@@ -1567,11 +1568,24 @@
     static const char* headers_strs[] = {
         "grpc++/impl/codegen/async_stream.h",
         "grpc++/impl/codegen/sync_stream.h",
-        "gmock/gmock.h",
     };
     std::vector<grpc::string> headers(headers_strs, array_end(headers_strs));
     PrintIncludes(printer.get(), headers, params);
 
+    std::vector<grpc::string> gmock_header;
+    if (params.gmock_search_path.empty()) {
+      gmock_header.push_back("gmock/gmock.h");
+      PrintIncludes(printer.get(), gmock_header, params);
+    } else {
+      gmock_header.push_back("gmock.h");
+      // Copy a params to generate gmock header.
+      Parameters gmock_params(params);
+      // We use local includes when a gmock_search_path is given
+      gmock_params.use_system_headers = false;
+      gmock_params.grpc_search_path = params.gmock_search_path;
+      PrintIncludes(printer.get(), gmock_header, gmock_params);
+    }
+
     if (!file->package().empty()) {
       std::vector<grpc::string> parts = file->package_parts();
 
diff --git a/src/compiler/cpp_generator.h b/src/compiler/cpp_generator.h
index a93376a..300a27c 100644
--- a/src/compiler/cpp_generator.h
+++ b/src/compiler/cpp_generator.h
@@ -50,8 +50,10 @@
   bool use_system_headers;
   // Prefix to any grpc include
   grpc::string grpc_search_path;
-  // Generate GMOCK code to facilitate unit testing.
+  // Generate Google Mock code to facilitate unit testing.
   bool generate_mock_code;
+  // Google Mock search path, when non-empty, local includes will be used.
+  grpc::string gmock_search_path;
 };
 
 // Return the prologue of the generated header file.
diff --git a/src/compiler/cpp_plugin.cc b/src/compiler/cpp_plugin.cc
index adac0e2..661282f 100644
--- a/src/compiler/cpp_plugin.cc
+++ b/src/compiler/cpp_plugin.cc
@@ -78,6 +78,8 @@
             *error = grpc::string("Invalid parameter: ") + *parameter_string;
             return false;
           }
+        } else if (param[0] == "gmock_search_path") {
+          generator_parameters.gmock_search_path = param[1];
         } else {
           *error = grpc::string("Unknown parameter: ") + *parameter_string;
           return false;
diff --git a/src/compiler/csharp_generator.cc b/src/compiler/csharp_generator.cc
index 40fe0b0..7c97056 100644
--- a/src/compiler/csharp_generator.cc
+++ b/src/compiler/csharp_generator.cc
@@ -659,8 +659,11 @@
     }
 
     // Write out a file header.
-    out.Print("// Generated by the protocol buffer compiler.  DO NOT EDIT!\n");
-    out.Print("// source: $filename$\n", "filename", file->name());
+    out.Print("// <auto-generated>\n");
+    out.Print(
+        "//     Generated by the protocol buffer compiler.  DO NOT EDIT!\n");
+    out.Print("//     source: $filename$\n", "filename", file->name());
+    out.Print("// </auto-generated>\n");
 
     // use C++ style as there are no file-level XML comments in .NET
     grpc::string leading_comments = GetCsharpComments(file, true);
diff --git a/src/core/ext/filters/client_channel/backup_poller.cc b/src/core/ext/filters/client_channel/backup_poller.cc
index ed437d2..bfc549e 100644
--- a/src/core/ext/filters/client_channel/backup_poller.cc
+++ b/src/core/ext/filters/client_channel/backup_poller.cc
@@ -69,61 +69,62 @@
   gpr_free(env);
 }
 
-static void backup_poller_shutdown_unref(grpc_exec_ctx* exec_ctx,
-                                         backup_poller* p) {
+static void backup_poller_shutdown_unref(backup_poller* p) {
   if (gpr_unref(&p->shutdown_refs)) {
-    grpc_pollset_destroy(exec_ctx, p->pollset);
+    grpc_pollset_destroy(p->pollset);
     gpr_free(p->pollset);
     gpr_free(p);
   }
 }
 
-static void done_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
-  backup_poller_shutdown_unref(exec_ctx, (backup_poller*)arg);
+static void done_poller(void* arg, grpc_error* error) {
+  backup_poller_shutdown_unref((backup_poller*)arg);
 }
 
-static void g_poller_unref(grpc_exec_ctx* exec_ctx) {
+static void g_poller_unref() {
+  gpr_mu_lock(&g_poller_mu);
   if (gpr_unref(&g_poller->refs)) {
-    gpr_mu_lock(&g_poller_mu);
     backup_poller* p = g_poller;
     g_poller = nullptr;
     gpr_mu_unlock(&g_poller_mu);
     gpr_mu_lock(p->pollset_mu);
     p->shutting_down = true;
-    grpc_pollset_shutdown(exec_ctx, p->pollset,
-                          GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller,
-                                            p, grpc_schedule_on_exec_ctx));
+    grpc_pollset_shutdown(
+        p->pollset, GRPC_CLOSURE_INIT(&p->shutdown_closure, done_poller, p,
+                                      grpc_schedule_on_exec_ctx));
     gpr_mu_unlock(p->pollset_mu);
-    grpc_timer_cancel(exec_ctx, &p->polling_timer);
+    grpc_timer_cancel(&p->polling_timer);
+  } else {
+    gpr_mu_unlock(&g_poller_mu);
   }
 }
 
-static void run_poller(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void run_poller(void* arg, grpc_error* error) {
   backup_poller* p = (backup_poller*)arg;
   if (error != GRPC_ERROR_NONE) {
     if (error != GRPC_ERROR_CANCELLED) {
       GRPC_LOG_IF_ERROR("run_poller", GRPC_ERROR_REF(error));
     }
-    backup_poller_shutdown_unref(exec_ctx, p);
+    backup_poller_shutdown_unref(p);
     return;
   }
   gpr_mu_lock(p->pollset_mu);
   if (p->shutting_down) {
     gpr_mu_unlock(p->pollset_mu);
-    backup_poller_shutdown_unref(exec_ctx, p);
+    backup_poller_shutdown_unref(p);
     return;
   }
-  grpc_error* err = grpc_pollset_work(exec_ctx, p->pollset, nullptr,
-                                      grpc_exec_ctx_now(exec_ctx));
+  grpc_error* err =
+      grpc_pollset_work(p->pollset, nullptr, grpc_core::ExecCtx::Get()->Now());
   gpr_mu_unlock(p->pollset_mu);
   GRPC_LOG_IF_ERROR("Run client channel backup poller", err);
-  grpc_timer_init(exec_ctx, &p->polling_timer,
-                  grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
+  grpc_timer_init(&p->polling_timer,
+                  grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms,
                   &p->run_poller_closure);
 }
 
 void grpc_client_channel_start_backup_polling(
-    grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
+    grpc_pollset_set* interested_parties) {
   gpr_once_init(&g_once, init_globals);
   if (g_poll_interval_ms == 0) {
     return;
@@ -139,8 +140,8 @@
     gpr_ref_init(&g_poller->shutdown_refs, 2);
     GRPC_CLOSURE_INIT(&g_poller->run_poller_closure, run_poller, g_poller,
                       grpc_schedule_on_exec_ctx);
-    grpc_timer_init(exec_ctx, &g_poller->polling_timer,
-                    grpc_exec_ctx_now(exec_ctx) + g_poll_interval_ms,
+    grpc_timer_init(&g_poller->polling_timer,
+                    grpc_core::ExecCtx::Get()->Now() + g_poll_interval_ms,
                     &g_poller->run_poller_closure);
   }
 
@@ -152,14 +153,14 @@
   grpc_pollset* pollset = g_poller->pollset;
   gpr_mu_unlock(&g_poller_mu);
 
-  grpc_pollset_set_add_pollset(exec_ctx, interested_parties, pollset);
+  grpc_pollset_set_add_pollset(interested_parties, pollset);
 }
 
 void grpc_client_channel_stop_backup_polling(
-    grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties) {
+    grpc_pollset_set* interested_parties) {
   if (g_poll_interval_ms == 0) {
     return;
   }
-  grpc_pollset_set_del_pollset(exec_ctx, interested_parties, g_poller->pollset);
-  g_poller_unref(exec_ctx);
+  grpc_pollset_set_del_pollset(interested_parties, g_poller->pollset);
+  g_poller_unref();
 }
diff --git a/src/core/ext/filters/client_channel/backup_poller.h b/src/core/ext/filters/client_channel/backup_poller.h
index e993d50..551e033 100644
--- a/src/core/ext/filters/client_channel/backup_poller.h
+++ b/src/core/ext/filters/client_channel/backup_poller.h
@@ -25,10 +25,10 @@
 
 /* Start polling \a interested_parties periodically in the timer thread  */
 void grpc_client_channel_start_backup_polling(
-    grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
+    grpc_pollset_set* interested_parties);
 
 /* Stop polling \a interested_parties */
 void grpc_client_channel_stop_backup_polling(
-    grpc_exec_ctx* exec_ctx, grpc_pollset_set* interested_parties);
+    grpc_pollset_set* interested_parties);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_BACKUP_POLLER_H */
diff --git a/src/core/ext/filters/client_channel/channel_connectivity.cc b/src/core/ext/filters/client_channel/channel_connectivity.cc
index 7eaf5d9..20693ba 100644
--- a/src/core/ext/filters/client_channel/channel_connectivity.cc
+++ b/src/core/ext/filters/client_channel/channel_connectivity.cc
@@ -33,22 +33,22 @@
   /* forward through to the underlying client channel */
   grpc_channel_element* client_channel_elem =
       grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_connectivity_state state;
   GRPC_API_TRACE(
       "grpc_channel_check_connectivity_state(channel=%p, try_to_connect=%d)", 2,
       (channel, try_to_connect));
   if (client_channel_elem->filter == &grpc_client_channel_filter) {
-    state = grpc_client_channel_check_connectivity_state(
-        &exec_ctx, client_channel_elem, try_to_connect);
-    grpc_exec_ctx_finish(&exec_ctx);
+    state = grpc_client_channel_check_connectivity_state(client_channel_elem,
+                                                         try_to_connect);
+
     return state;
   }
   gpr_log(GPR_ERROR,
           "grpc_channel_check_connectivity_state called on something that is "
           "not a client channel, but '%s'",
           client_channel_elem->filter->name);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return GRPC_CHANNEL_SHUTDOWN;
 }
 
@@ -73,12 +73,11 @@
   void* tag;
 } state_watcher;
 
-static void delete_state_watcher(grpc_exec_ctx* exec_ctx, state_watcher* w) {
+static void delete_state_watcher(state_watcher* w) {
   grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
       grpc_channel_get_channel_stack(w->channel));
   if (client_channel_elem->filter == &grpc_client_channel_filter) {
-    GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, w->channel,
-                                "watch_channel_connectivity");
+    GRPC_CHANNEL_INTERNAL_UNREF(w->channel, "watch_channel_connectivity");
   } else {
     abort();
   }
@@ -86,8 +85,7 @@
   gpr_free(w);
 }
 
-static void finished_completion(grpc_exec_ctx* exec_ctx, void* pw,
-                                grpc_cq_completion* ignored) {
+static void finished_completion(void* pw, grpc_cq_completion* ignored) {
   bool should_delete = false;
   state_watcher* w = (state_watcher*)pw;
   gpr_mu_lock(&w->mu);
@@ -102,19 +100,19 @@
   gpr_mu_unlock(&w->mu);
 
   if (should_delete) {
-    delete_state_watcher(exec_ctx, w);
+    delete_state_watcher(w);
   }
 }
 
-static void partly_done(grpc_exec_ctx* exec_ctx, state_watcher* w,
-                        bool due_to_completion, grpc_error* error) {
+static void partly_done(state_watcher* w, bool due_to_completion,
+                        grpc_error* error) {
   if (due_to_completion) {
-    grpc_timer_cancel(exec_ctx, &w->alarm);
+    grpc_timer_cancel(&w->alarm);
   } else {
     grpc_channel_element* client_channel_elem = grpc_channel_stack_last_element(
         grpc_channel_get_channel_stack(w->channel));
     grpc_client_channel_watch_connectivity_state(
-        exec_ctx, client_channel_elem,
+        client_channel_elem,
         grpc_polling_entity_create_from_pollset(grpc_cq_pollset(w->cq)),
         nullptr, &w->on_complete, nullptr);
   }
@@ -149,7 +147,7 @@
         w->error = error;
       }
       w->phase = CALLING_BACK_AND_FINISHED;
-      grpc_cq_end_op(exec_ctx, w->cq, w->tag, w->error, finished_completion, w,
+      grpc_cq_end_op(w->cq, w->tag, w->error, finished_completion, w,
                      &w->completion_storage);
       break;
     case CALLING_BACK_AND_FINISHED:
@@ -161,14 +159,12 @@
   GRPC_ERROR_UNREF(error);
 }
 
-static void watch_complete(grpc_exec_ctx* exec_ctx, void* pw,
-                           grpc_error* error) {
-  partly_done(exec_ctx, (state_watcher*)pw, true, GRPC_ERROR_REF(error));
+static void watch_complete(void* pw, grpc_error* error) {
+  partly_done((state_watcher*)pw, true, GRPC_ERROR_REF(error));
 }
 
-static void timeout_complete(grpc_exec_ctx* exec_ctx, void* pw,
-                             grpc_error* error) {
-  partly_done(exec_ctx, (state_watcher*)pw, false, GRPC_ERROR_REF(error));
+static void timeout_complete(void* pw, grpc_error* error) {
+  partly_done((state_watcher*)pw, false, GRPC_ERROR_REF(error));
 }
 
 int grpc_channel_num_external_connectivity_watchers(grpc_channel* channel) {
@@ -183,12 +179,10 @@
   gpr_timespec deadline;
 } watcher_timer_init_arg;
 
-static void watcher_timer_init(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error_ignored) {
+static void watcher_timer_init(void* arg, grpc_error* error_ignored) {
   watcher_timer_init_arg* wa = (watcher_timer_init_arg*)arg;
 
-  grpc_timer_init(exec_ctx, &wa->w->alarm,
-                  grpc_timespec_to_millis_round_up(wa->deadline),
+  grpc_timer_init(&wa->w->alarm, grpc_timespec_to_millis_round_up(wa->deadline),
                   &wa->w->on_timeout);
   gpr_free(wa);
 }
@@ -204,7 +198,7 @@
     gpr_timespec deadline, grpc_completion_queue* cq, void* tag) {
   grpc_channel_element* client_channel_elem =
       grpc_channel_stack_last_element(grpc_channel_get_channel_stack(channel));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   state_watcher* w = (state_watcher*)gpr_malloc(sizeof(*w));
 
   GRPC_API_TRACE(
@@ -241,12 +235,10 @@
   if (client_channel_elem->filter == &grpc_client_channel_filter) {
     GRPC_CHANNEL_INTERNAL_REF(channel, "watch_channel_connectivity");
     grpc_client_channel_watch_connectivity_state(
-        &exec_ctx, client_channel_elem,
+        client_channel_elem,
         grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq)), &w->state,
         &w->on_complete, &w->watcher_timer_init);
   } else {
     abort();
   }
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 03c1b6f..e99022a 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -91,8 +91,7 @@
 static void* method_parameters_ref_wrapper(void* value) {
   return method_parameters_ref((method_parameters*)value);
 }
-static void method_parameters_unref_wrapper(grpc_exec_ctx* exec_ctx,
-                                            void* value) {
+static void method_parameters_unref_wrapper(void* value) {
   method_parameters_unref((method_parameters*)value);
 }
 
@@ -210,6 +209,14 @@
   char* info_service_config_json;
 } channel_data;
 
+typedef struct {
+  channel_data* chand;
+  /** used as an identifier, don't dereference it because the LB policy may be
+   * non-existing when the callback is run */
+  grpc_lb_policy* lb_policy;
+  grpc_closure closure;
+} reresolution_request_args;
+
 /** We create one watcher for each new lb_policy that is returned from a
     resolver, to watch for state changes from the lb_policy. When a state
     change is seen, we update the channel, and create a new watcher. */
@@ -220,12 +227,11 @@
   grpc_lb_policy* lb_policy;
 } lb_policy_connectivity_watcher;
 
-static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
+static void watch_lb_policy_locked(channel_data* chand,
                                    grpc_lb_policy* lb_policy,
                                    grpc_connectivity_state current_state);
 
-static void set_channel_connectivity_state_locked(grpc_exec_ctx* exec_ctx,
-                                                  channel_data* chand,
+static void set_channel_connectivity_state_locked(channel_data* chand,
                                                   grpc_connectivity_state state,
                                                   grpc_error* error,
                                                   const char* reason) {
@@ -237,12 +243,12 @@
     if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
       /* cancel picks with wait_for_ready=false */
       grpc_lb_policy_cancel_picks_locked(
-          exec_ctx, chand->lb_policy,
+          chand->lb_policy,
           /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
           /* check= */ 0, GRPC_ERROR_REF(error));
     } else if (state == GRPC_CHANNEL_SHUTDOWN) {
       /* cancel all picks */
-      grpc_lb_policy_cancel_picks_locked(exec_ctx, chand->lb_policy,
+      grpc_lb_policy_cancel_picks_locked(chand->lb_policy,
                                          /* mask= */ 0, /* check= */ 0,
                                          GRPC_ERROR_REF(error));
     }
@@ -251,38 +257,28 @@
     gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
             grpc_connectivity_state_name(state));
   }
-  grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
-                              reason);
+  grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
 }
 
-static void on_lb_policy_state_changed_locked(grpc_exec_ctx* exec_ctx,
-                                              void* arg, grpc_error* error) {
+static void on_lb_policy_state_changed_locked(void* arg, grpc_error* error) {
   lb_policy_connectivity_watcher* w = (lb_policy_connectivity_watcher*)arg;
-  grpc_connectivity_state publish_state = w->state;
   /* check if the notification is for the latest policy */
   if (w->lb_policy == w->chand->lb_policy) {
     if (grpc_client_channel_trace.enabled()) {
       gpr_log(GPR_DEBUG, "chand=%p: lb_policy=%p state changed to %s", w->chand,
               w->lb_policy, grpc_connectivity_state_name(w->state));
     }
-    if (publish_state == GRPC_CHANNEL_SHUTDOWN &&
-        w->chand->resolver != nullptr) {
-      publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
-      grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
-      GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
-      w->chand->lb_policy = nullptr;
-    }
-    set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
+    set_channel_connectivity_state_locked(w->chand, w->state,
                                           GRPC_ERROR_REF(error), "lb_changed");
     if (w->state != GRPC_CHANNEL_SHUTDOWN) {
-      watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
+      watch_lb_policy_locked(w->chand, w->lb_policy, w->state);
     }
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
+  GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy");
   gpr_free(w);
 }
 
-static void watch_lb_policy_locked(grpc_exec_ctx* exec_ctx, channel_data* chand,
+static void watch_lb_policy_locked(channel_data* chand,
                                    grpc_lb_policy* lb_policy,
                                    grpc_connectivity_state current_state) {
   lb_policy_connectivity_watcher* w =
@@ -293,19 +289,18 @@
                     grpc_combiner_scheduler(chand->combiner));
   w->state = current_state;
   w->lb_policy = lb_policy;
-  grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
+  grpc_lb_policy_notify_on_state_change_locked(lb_policy, &w->state,
                                                &w->on_changed);
 }
 
-static void start_resolving_locked(grpc_exec_ctx* exec_ctx,
-                                   channel_data* chand) {
+static void start_resolving_locked(channel_data* chand) {
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
   }
   GPR_ASSERT(!chand->started_resolving);
   chand->started_resolving = true;
   GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
-  grpc_resolver_next_locked(exec_ctx, chand->resolver, &chand->resolver_result,
+  grpc_resolver_next_locked(chand->resolver, &chand->resolver_result,
                             &chand->on_resolver_result_changed);
 }
 
@@ -369,8 +364,26 @@
   }
 }
 
-static void on_resolver_result_changed_locked(grpc_exec_ctx* exec_ctx,
-                                              void* arg, grpc_error* error) {
+static void request_reresolution_locked(void* arg, grpc_error* error) {
+  reresolution_request_args* args = (reresolution_request_args*)arg;
+  channel_data* chand = args->chand;
+  // If this invocation is for a stale LB policy, treat it as an LB shutdown
+  // signal.
+  if (args->lb_policy != chand->lb_policy || error != GRPC_ERROR_NONE ||
+      chand->resolver == nullptr) {
+    GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "re-resolution");
+    gpr_free(args);
+    return;
+  }
+  if (grpc_client_channel_trace.enabled()) {
+    gpr_log(GPR_DEBUG, "chand=%p: started name re-resolving", chand);
+  }
+  grpc_resolver_channel_saw_error_locked(chand->resolver);
+  // Give back the closure to the LB policy.
+  grpc_lb_policy_set_reresolve_closure_locked(chand->lb_policy, &args->closure);
+}
+
+static void on_resolver_result_changed_locked(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
@@ -385,101 +398,112 @@
   grpc_server_retry_throttle_data* retry_throttle_data = nullptr;
   grpc_slice_hash_table* method_params_table = nullptr;
   if (chand->resolver_result != nullptr) {
-    // Find LB policy name.
-    const char* lb_policy_name = nullptr;
-    const grpc_arg* channel_arg =
-        grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
-    if (channel_arg != nullptr) {
-      GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
-      lb_policy_name = channel_arg->value.string;
-    }
-    // Special case: If at least one balancer address is present, we use
-    // the grpclb policy, regardless of what the resolver actually specified.
-    channel_arg =
-        grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
-    if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
-      grpc_lb_addresses* addresses =
-          (grpc_lb_addresses*)channel_arg->value.pointer.p;
-      bool found_balancer_address = false;
-      for (size_t i = 0; i < addresses->num_addresses; ++i) {
-        if (addresses->addresses[i].is_balancer) {
-          found_balancer_address = true;
-          break;
-        }
-      }
-      if (found_balancer_address) {
-        if (lb_policy_name != nullptr &&
-            strcmp(lb_policy_name, "grpclb") != 0) {
-          gpr_log(GPR_INFO,
-                  "resolver requested LB policy %s but provided at least one "
-                  "balancer address -- forcing use of grpclb LB policy",
-                  lb_policy_name);
-        }
-        lb_policy_name = "grpclb";
-      }
-    }
-    // Use pick_first if nothing was specified and we didn't select grpclb
-    // above.
-    if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
-    grpc_lb_policy_args lb_policy_args;
-    lb_policy_args.args = chand->resolver_result;
-    lb_policy_args.client_channel_factory = chand->client_channel_factory;
-    lb_policy_args.combiner = chand->combiner;
-    // Check to see if we're already using the right LB policy.
-    // Note: It's safe to use chand->info_lb_policy_name here without
-    // taking a lock on chand->info_mu, because this function is the
-    // only thing that modifies its value, and it can only be invoked
-    // once at any given time.
-    lb_policy_name_changed =
-        chand->info_lb_policy_name == nullptr ||
-        strcmp(chand->info_lb_policy_name, lb_policy_name) != 0;
-    if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
-      // Continue using the same LB policy.  Update with new addresses.
-      lb_policy_updated = true;
-      grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
-    } else {
-      // Instantiate new LB policy.
-      new_lb_policy =
-          grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
-      if (new_lb_policy == nullptr) {
-        gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
-      }
-    }
-    // Find service config.
-    channel_arg =
-        grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVICE_CONFIG);
-    if (channel_arg != nullptr) {
-      GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
-      service_config_json = gpr_strdup(channel_arg->value.string);
-      grpc_service_config* service_config =
-          grpc_service_config_create(service_config_json);
-      if (service_config != nullptr) {
-        channel_arg =
-            grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
-        GPR_ASSERT(channel_arg != nullptr);
+    if (chand->resolver != nullptr) {
+      // Find LB policy name.
+      const char* lb_policy_name = nullptr;
+      const grpc_arg* channel_arg = grpc_channel_args_find(
+          chand->resolver_result, GRPC_ARG_LB_POLICY_NAME);
+      if (channel_arg != nullptr) {
         GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
-        grpc_uri* uri =
-            grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
-        GPR_ASSERT(uri->path[0] != '\0');
-        service_config_parsing_state parsing_state;
-        memset(&parsing_state, 0, sizeof(parsing_state));
-        parsing_state.server_name =
-            uri->path[0] == '/' ? uri->path + 1 : uri->path;
-        grpc_service_config_parse_global_params(
-            service_config, parse_retry_throttle_params, &parsing_state);
-        grpc_uri_destroy(uri);
-        retry_throttle_data = parsing_state.retry_throttle_data;
-        method_params_table = grpc_service_config_create_method_config_table(
-            exec_ctx, service_config, method_parameters_create_from_json,
-            method_parameters_ref_wrapper, method_parameters_unref_wrapper);
-        grpc_service_config_destroy(service_config);
+        lb_policy_name = channel_arg->value.string;
       }
+      // Special case: If at least one balancer address is present, we use
+      // the grpclb policy, regardless of what the resolver actually specified.
+      channel_arg =
+          grpc_channel_args_find(chand->resolver_result, GRPC_ARG_LB_ADDRESSES);
+      if (channel_arg != nullptr && channel_arg->type == GRPC_ARG_POINTER) {
+        grpc_lb_addresses* addresses =
+            (grpc_lb_addresses*)channel_arg->value.pointer.p;
+        bool found_balancer_address = false;
+        for (size_t i = 0; i < addresses->num_addresses; ++i) {
+          if (addresses->addresses[i].is_balancer) {
+            found_balancer_address = true;
+            break;
+          }
+        }
+        if (found_balancer_address) {
+          if (lb_policy_name != nullptr &&
+              strcmp(lb_policy_name, "grpclb") != 0) {
+            gpr_log(GPR_INFO,
+                    "resolver requested LB policy %s but provided at least one "
+                    "balancer address -- forcing use of grpclb LB policy",
+                    lb_policy_name);
+          }
+          lb_policy_name = "grpclb";
+        }
+      }
+      // Use pick_first if nothing was specified and we didn't select grpclb
+      // above.
+      if (lb_policy_name == nullptr) lb_policy_name = "pick_first";
+      grpc_lb_policy_args lb_policy_args;
+      lb_policy_args.args = chand->resolver_result;
+      lb_policy_args.client_channel_factory = chand->client_channel_factory;
+      lb_policy_args.combiner = chand->combiner;
+      // Check to see if we're already using the right LB policy.
+      // Note: It's safe to use chand->info_lb_policy_name here without
+      // taking a lock on chand->info_mu, because this function is the
+      // only thing that modifies its value, and it can only be invoked
+      // once at any given time.
+      lb_policy_name_changed =
+          chand->info_lb_policy_name == nullptr ||
+          gpr_stricmp(chand->info_lb_policy_name, lb_policy_name) != 0;
+      if (chand->lb_policy != nullptr && !lb_policy_name_changed) {
+        // Continue using the same LB policy.  Update with new addresses.
+        lb_policy_updated = true;
+        grpc_lb_policy_update_locked(chand->lb_policy, &lb_policy_args);
+      } else {
+        // Instantiate new LB policy.
+        new_lb_policy = grpc_lb_policy_create(lb_policy_name, &lb_policy_args);
+        if (new_lb_policy == nullptr) {
+          gpr_log(GPR_ERROR, "could not create LB policy \"%s\"",
+                  lb_policy_name);
+        } else {
+          reresolution_request_args* args =
+              (reresolution_request_args*)gpr_zalloc(sizeof(*args));
+          args->chand = chand;
+          args->lb_policy = new_lb_policy;
+          GRPC_CLOSURE_INIT(&args->closure, request_reresolution_locked, args,
+                            grpc_combiner_scheduler(chand->combiner));
+          GRPC_CHANNEL_STACK_REF(chand->owning_stack, "re-resolution");
+          grpc_lb_policy_set_reresolve_closure_locked(new_lb_policy,
+                                                      &args->closure);
+        }
+      }
+      // Find service config.
+      channel_arg = grpc_channel_args_find(chand->resolver_result,
+                                           GRPC_ARG_SERVICE_CONFIG);
+      if (channel_arg != nullptr) {
+        GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+        service_config_json = gpr_strdup(channel_arg->value.string);
+        grpc_service_config* service_config =
+            grpc_service_config_create(service_config_json);
+        if (service_config != nullptr) {
+          channel_arg = grpc_channel_args_find(chand->resolver_result,
+                                               GRPC_ARG_SERVER_URI);
+          GPR_ASSERT(channel_arg != nullptr);
+          GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
+          grpc_uri* uri = grpc_uri_parse(channel_arg->value.string, true);
+          GPR_ASSERT(uri->path[0] != '\0');
+          service_config_parsing_state parsing_state;
+          memset(&parsing_state, 0, sizeof(parsing_state));
+          parsing_state.server_name =
+              uri->path[0] == '/' ? uri->path + 1 : uri->path;
+          grpc_service_config_parse_global_params(
+              service_config, parse_retry_throttle_params, &parsing_state);
+          grpc_uri_destroy(uri);
+          retry_throttle_data = parsing_state.retry_throttle_data;
+          method_params_table = grpc_service_config_create_method_config_table(
+              service_config, method_parameters_create_from_json,
+              method_parameters_ref_wrapper, method_parameters_unref_wrapper);
+          grpc_service_config_destroy(service_config);
+        }
+      }
+      // Before we clean up, save a copy of lb_policy_name, since it might
+      // be pointing to data inside chand->resolver_result.
+      // The copy will be saved in chand->lb_policy_name below.
+      lb_policy_name_dup = gpr_strdup(lb_policy_name);
     }
-    // Before we clean up, save a copy of lb_policy_name, since it might
-    // be pointing to data inside chand->resolver_result.
-    // The copy will be saved in chand->lb_policy_name below.
-    lb_policy_name_dup = gpr_strdup(lb_policy_name);
-    grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
+    grpc_channel_args_destroy(chand->resolver_result);
     chand->resolver_result = nullptr;
   }
   if (grpc_client_channel_trace.enabled()) {
@@ -511,15 +535,15 @@
   chand->retry_throttle_data = retry_throttle_data;
   // Swap out the method params table.
   if (chand->method_params_table != nullptr) {
-    grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
+    grpc_slice_hash_table_unref(chand->method_params_table);
   }
   chand->method_params_table = method_params_table;
   // If we have a new LB policy or are shutting down (in which case
-  // new_lb_policy will be NULL), swap out the LB policy, unreffing the
-  // old one and removing its fds from chand->interested_parties.
-  // Note that we do NOT do this if either (a) we updated the existing
-  // LB policy above or (b) we failed to create the new LB policy (in
-  // which case we want to continue using the most recent one we had).
+  // new_lb_policy will be NULL), swap out the LB policy, unreffing the old one
+  // and removing its fds from chand->interested_parties. Note that we do NOT do
+  // this if either (a) we updated the existing LB policy above or (b) we failed
+  // to create the new LB policy (in which case we want to continue using the
+  // most recent one we had).
   if (new_lb_policy != nullptr || error != GRPC_ERROR_NONE ||
       chand->resolver == nullptr) {
     if (chand->lb_policy != nullptr) {
@@ -527,10 +551,9 @@
         gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
                 chand->lb_policy);
       }
-      grpc_pollset_set_del_pollset_set(exec_ctx,
-                                       chand->lb_policy->interested_parties,
+      grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
                                        chand->interested_parties);
-      GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+      GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
     }
     chand->lb_policy = new_lb_policy;
   }
@@ -544,21 +567,20 @@
       if (grpc_client_channel_trace.enabled()) {
         gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
       }
-      grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
-      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+      grpc_resolver_shutdown_locked(chand->resolver);
+      GRPC_RESOLVER_UNREF(chand->resolver, "channel");
       chand->resolver = nullptr;
     }
     set_channel_connectivity_state_locked(
-        exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
+        chand, GRPC_CHANNEL_SHUTDOWN,
         GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
             "Got resolver result after disconnection", &error, 1),
         "resolver_gone");
-    GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
+    GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
     grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
                                GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                                    "Channel disconnected", &error, 1));
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx,
-                            &chand->waiting_for_resolver_result_closures);
+    GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
   } else {  // Not shutting down.
     grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
     grpc_error* state_error =
@@ -568,33 +590,28 @@
         gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
       }
       GRPC_ERROR_UNREF(state_error);
-      state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy,
-                                                       &state_error);
-      grpc_pollset_set_add_pollset_set(exec_ctx,
-                                       new_lb_policy->interested_parties,
+      state =
+          grpc_lb_policy_check_connectivity_locked(new_lb_policy, &state_error);
+      grpc_pollset_set_add_pollset_set(new_lb_policy->interested_parties,
                                        chand->interested_parties);
-      GRPC_CLOSURE_LIST_SCHED(exec_ctx,
-                              &chand->waiting_for_resolver_result_closures);
+      GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
       if (chand->exit_idle_when_lb_policy_arrives) {
-        grpc_lb_policy_exit_idle_locked(exec_ctx, new_lb_policy);
+        grpc_lb_policy_exit_idle_locked(new_lb_policy);
         chand->exit_idle_when_lb_policy_arrives = false;
       }
-      watch_lb_policy_locked(exec_ctx, chand, new_lb_policy, state);
+      watch_lb_policy_locked(chand, new_lb_policy, state);
     }
     if (!lb_policy_updated) {
-      set_channel_connectivity_state_locked(exec_ctx, chand, state,
-                                            GRPC_ERROR_REF(state_error),
-                                            "new_lb+resolver");
+      set_channel_connectivity_state_locked(
+          chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
     }
-    grpc_resolver_next_locked(exec_ctx, chand->resolver,
-                              &chand->resolver_result,
+    grpc_resolver_next_locked(chand->resolver, &chand->resolver_result,
                               &chand->on_resolver_result_changed);
     GRPC_ERROR_UNREF(state_error);
   }
 }
 
-static void start_transport_op_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_error* error_ignored) {
+static void start_transport_op_locked(void* arg, grpc_error* error_ignored) {
   grpc_transport_op* op = (grpc_transport_op*)arg;
   grpc_channel_element* elem =
       (grpc_channel_element*)op->handler_private.extra_arg;
@@ -602,75 +619,74 @@
 
   if (op->on_connectivity_state_change != nullptr) {
     grpc_connectivity_state_notify_on_state_change(
-        exec_ctx, &chand->state_tracker, op->connectivity_state,
+        &chand->state_tracker, op->connectivity_state,
         op->on_connectivity_state_change);
     op->on_connectivity_state_change = nullptr;
     op->connectivity_state = nullptr;
   }
 
-  if (op->send_ping != nullptr) {
+  if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
     if (chand->lb_policy == nullptr) {
       GRPC_CLOSURE_SCHED(
-          exec_ctx, op->send_ping,
+          op->send_ping.on_initiate,
+          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
+      GRPC_CLOSURE_SCHED(
+          op->send_ping.on_ack,
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
     } else {
-      grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
+      grpc_lb_policy_ping_one_locked(
+          chand->lb_policy, op->send_ping.on_initiate, op->send_ping.on_ack);
       op->bind_pollset = nullptr;
     }
-    op->send_ping = nullptr;
+    op->send_ping.on_initiate = nullptr;
+    op->send_ping.on_ack = nullptr;
   }
 
   if (op->disconnect_with_error != GRPC_ERROR_NONE) {
     if (chand->resolver != nullptr) {
       set_channel_connectivity_state_locked(
-          exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
+          chand, GRPC_CHANNEL_SHUTDOWN,
           GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
-      grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
-      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+      grpc_resolver_shutdown_locked(chand->resolver);
+      GRPC_RESOLVER_UNREF(chand->resolver, "channel");
       chand->resolver = nullptr;
       if (!chand->started_resolving) {
         grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
                                    GRPC_ERROR_REF(op->disconnect_with_error));
-        GRPC_CLOSURE_LIST_SCHED(exec_ctx,
-                                &chand->waiting_for_resolver_result_closures);
+        GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
       }
       if (chand->lb_policy != nullptr) {
-        grpc_pollset_set_del_pollset_set(exec_ctx,
-                                         chand->lb_policy->interested_parties,
+        grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
                                          chand->interested_parties);
-        GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+        GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
         chand->lb_policy = nullptr;
       }
     }
     GRPC_ERROR_UNREF(op->disconnect_with_error);
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op");
+  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
 
-  GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
 }
 
-static void cc_start_transport_op(grpc_exec_ctx* exec_ctx,
-                                  grpc_channel_element* elem,
+static void cc_start_transport_op(grpc_channel_element* elem,
                                   grpc_transport_op* op) {
   channel_data* chand = (channel_data*)elem->channel_data;
 
   GPR_ASSERT(op->set_accept_stream == false);
   if (op->bind_pollset != nullptr) {
-    grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
-                                 op->bind_pollset);
+    grpc_pollset_set_add_pollset(chand->interested_parties, op->bind_pollset);
   }
 
   op->handler_private.extra_arg = elem;
   GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
                         op, grpc_combiner_scheduler(chand->combiner)),
       GRPC_ERROR_NONE);
 }
 
-static void cc_get_channel_info(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_element* elem,
+static void cc_get_channel_info(grpc_channel_element* elem,
                                 const grpc_channel_info* info) {
   channel_data* chand = (channel_data*)elem->channel_data;
   gpr_mu_lock(&chand->info_mu);
@@ -689,8 +705,7 @@
 }
 
 /* Constructor for channel_data */
-static grpc_error* cc_init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_element* elem,
+static grpc_error* cc_init_channel_elem(grpc_channel_element* elem,
                                         grpc_channel_element_args* args) {
   channel_data* chand = (channel_data*)elem->channel_data;
   GPR_ASSERT(args->is_last);
@@ -711,7 +726,7 @@
   chand->interested_parties = grpc_pollset_set_create();
   grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
                                "client_channel");
-  grpc_client_channel_start_backup_polling(exec_ctx, chand->interested_parties);
+  grpc_client_channel_start_backup_polling(chand->interested_parties);
   // Record client channel factory.
   const grpc_arg* arg = grpc_channel_args_find(args->channel_args,
                                                GRPC_ARG_CLIENT_CHANNEL_FACTORY);
@@ -739,15 +754,15 @@
   }
   char* proxy_name = nullptr;
   grpc_channel_args* new_args = nullptr;
-  grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
+  grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
                               &proxy_name, &new_args);
   // Instantiate resolver.
   chand->resolver = grpc_resolver_create(
-      exec_ctx, proxy_name != nullptr ? proxy_name : arg->value.string,
+      proxy_name != nullptr ? proxy_name : arg->value.string,
       new_args != nullptr ? new_args : args->channel_args,
       chand->interested_parties, chand->combiner);
   if (proxy_name != nullptr) gpr_free(proxy_name);
-  if (new_args != nullptr) grpc_channel_args_destroy(exec_ctx, new_args);
+  if (new_args != nullptr) grpc_channel_args_destroy(new_args);
   if (chand->resolver == nullptr) {
     return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
   }
@@ -756,32 +771,28 @@
   return GRPC_ERROR_NONE;
 }
 
-static void shutdown_resolver_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* error) {
+static void shutdown_resolver_locked(void* arg, grpc_error* error) {
   grpc_resolver* resolver = (grpc_resolver*)arg;
-  grpc_resolver_shutdown_locked(exec_ctx, resolver);
-  GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
+  grpc_resolver_shutdown_locked(resolver);
+  GRPC_RESOLVER_UNREF(resolver, "channel");
 }
 
 /* Destructor for channel_data */
-static void cc_destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_element* elem) {
+static void cc_destroy_channel_elem(grpc_channel_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   if (chand->resolver != nullptr) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
                             grpc_combiner_scheduler(chand->combiner)),
         GRPC_ERROR_NONE);
   }
   if (chand->client_channel_factory != nullptr) {
-    grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
+    grpc_client_channel_factory_unref(chand->client_channel_factory);
   }
   if (chand->lb_policy != nullptr) {
-    grpc_pollset_set_del_pollset_set(exec_ctx,
-                                     chand->lb_policy->interested_parties,
+    grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
                                      chand->interested_parties);
-    GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+    GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
   }
   gpr_free(chand->info_lb_policy_name);
   gpr_free(chand->info_service_config_json);
@@ -789,12 +800,12 @@
     grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
   }
   if (chand->method_params_table != nullptr) {
-    grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
+    grpc_slice_hash_table_unref(chand->method_params_table);
   }
-  grpc_client_channel_stop_backup_polling(exec_ctx, chand->interested_parties);
-  grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
-  grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
-  GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
+  grpc_client_channel_stop_backup_polling(chand->interested_parties);
+  grpc_connectivity_state_destroy(&chand->state_tracker);
+  grpc_pollset_set_destroy(chand->interested_parties);
+  GRPC_COMBINER_UNREF(chand->combiner, "client_channel");
   gpr_mu_destroy(&chand->info_mu);
   gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu);
 }
@@ -881,21 +892,18 @@
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void fail_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
-                                                void* arg, grpc_error* error) {
+static void fail_pending_batch_in_call_combiner(void* arg, grpc_error* error) {
   call_data* calld = (call_data*)arg;
   if (calld->waiting_for_pick_batches_count > 0) {
     --calld->waiting_for_pick_batches_count;
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx,
         calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
         GRPC_ERROR_REF(error), calld->call_combiner);
   }
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_fail(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_element* elem,
+static void waiting_for_pick_batches_fail(grpc_call_element* elem,
                                           grpc_error* error) {
   call_data* calld = (call_data*)elem->call_data;
   if (grpc_client_channel_trace.enabled()) {
@@ -908,37 +916,34 @@
     GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
                       fail_pending_batch_in_call_combiner, calld,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
-                             &calld->handle_pending_batch_in_call_combiner[i],
-                             GRPC_ERROR_REF(error),
-                             "waiting_for_pick_batches_fail");
+    GRPC_CALL_COMBINER_START(
+        calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
+        GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail");
   }
   if (calld->initial_metadata_batch != nullptr) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
+        calld->initial_metadata_batch, GRPC_ERROR_REF(error),
         calld->call_combiner);
   } else {
-    GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+    GRPC_CALL_COMBINER_STOP(calld->call_combiner,
                             "waiting_for_pick_batches_fail");
   }
   GRPC_ERROR_UNREF(error);
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void run_pending_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
-                                               void* arg, grpc_error* ignored) {
+static void run_pending_batch_in_call_combiner(void* arg, grpc_error* ignored) {
   call_data* calld = (call_data*)arg;
   if (calld->waiting_for_pick_batches_count > 0) {
     --calld->waiting_for_pick_batches_count;
     grpc_subchannel_call_process_op(
-        exec_ctx, calld->subchannel_call,
+        calld->subchannel_call,
         calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
   }
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_resume(grpc_exec_ctx* exec_ctx,
-                                            grpc_call_element* elem) {
+static void waiting_for_pick_batches_resume(grpc_call_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
   if (grpc_client_channel_trace.enabled()) {
@@ -952,20 +957,18 @@
     GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
                       run_pending_batch_in_call_combiner, calld,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
-                             &calld->handle_pending_batch_in_call_combiner[i],
-                             GRPC_ERROR_NONE,
-                             "waiting_for_pick_batches_resume");
+    GRPC_CALL_COMBINER_START(
+        calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
+        GRPC_ERROR_NONE, "waiting_for_pick_batches_resume");
   }
   GPR_ASSERT(calld->initial_metadata_batch != nullptr);
-  grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
+  grpc_subchannel_call_process_op(calld->subchannel_call,
                                   calld->initial_metadata_batch);
 }
 
 // Applies service config to the call.  Must be invoked once we know
 // that the resolver has returned results to the channel.
-static void apply_service_config_to_call_locked(grpc_exec_ctx* exec_ctx,
-                                                grpc_call_element* elem) {
+static void apply_service_config_to_call_locked(grpc_call_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
   if (grpc_client_channel_trace.enabled()) {
@@ -978,7 +981,7 @@
   }
   if (chand->method_params_table != nullptr) {
     calld->method_params = (method_parameters*)grpc_method_config_table_get(
-        exec_ctx, chand->method_params_table, calld->path);
+        chand->method_params_table, calld->path);
     if (calld->method_params != nullptr) {
       method_parameters_ref(calld->method_params);
       // If the deadline from the service config is shorter than the one
@@ -990,15 +993,14 @@
             calld->method_params->timeout;
         if (per_method_deadline < calld->deadline) {
           calld->deadline = per_method_deadline;
-          grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+          grpc_deadline_state_reset(elem, calld->deadline);
         }
       }
     }
   }
 }
 
-static void create_subchannel_call_locked(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_element* elem,
+static void create_subchannel_call_locked(grpc_call_element* elem,
                                           grpc_error* error) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -1012,24 +1014,22 @@
       calld->call_combiner             // call_combiner
   };
   grpc_error* new_error = grpc_connected_subchannel_create_call(
-      exec_ctx, calld->connected_subchannel, &call_args,
-      &calld->subchannel_call);
+      calld->connected_subchannel, &call_args, &calld->subchannel_call);
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
             chand, calld, calld->subchannel_call, grpc_error_string(new_error));
   }
   if (new_error != GRPC_ERROR_NONE) {
     new_error = grpc_error_add_child(new_error, error);
-    waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
+    waiting_for_pick_batches_fail(elem, new_error);
   } else {
-    waiting_for_pick_batches_resume(exec_ctx, elem);
+    waiting_for_pick_batches_resume(elem);
   }
   GRPC_ERROR_UNREF(error);
 }
 
 // Invoked when a pick is completed, on both success or failure.
-static void pick_done_locked(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-                             grpc_error* error) {
+static void pick_done_locked(grpc_call_element* elem, grpc_error* error) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   if (calld->connected_subchannel == nullptr) {
@@ -1045,10 +1045,10 @@
               "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
               calld, grpc_error_string(calld->error));
     }
-    waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
+    waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
   } else {
     /* Create call on subchannel. */
-    create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+    create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
 }
@@ -1057,19 +1057,17 @@
 // either (a) the pick was deferred pending a resolver result or (b) the
 // pick was done asynchronously.  Removes the call's polling entity from
 // chand->interested_parties before invoking pick_done_locked().
-static void async_pick_done_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem, grpc_error* error) {
+static void async_pick_done_locked(grpc_call_element* elem, grpc_error* error) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
-  grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
+  grpc_polling_entity_del_from_pollset_set(calld->pollent,
                                            chand->interested_parties);
-  pick_done_locked(exec_ctx, elem, error);
+  pick_done_locked(elem, error);
 }
 
 // Note: This runs under the client_channel combiner, but will NOT be
 // holding the call combiner.
-static void pick_callback_cancel_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void pick_callback_cancel_locked(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -1078,17 +1076,15 @@
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
               chand, calld, calld->lb_policy);
     }
-    grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
-                                      &calld->connected_subchannel,
-                                      GRPC_ERROR_REF(error));
+    grpc_lb_policy_cancel_pick_locked(
+        calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error));
   }
-  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
 }
 
 // Callback invoked by grpc_lb_policy_pick_locked() for async picks.
 // Unrefs the LB policy and invokes async_pick_done_locked().
-static void pick_callback_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_error* error) {
+static void pick_callback_done_locked(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -1097,23 +1093,22 @@
             chand, calld);
   }
   GPR_ASSERT(calld->lb_policy != nullptr);
-  GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
+  GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
   calld->lb_policy = nullptr;
-  async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+  async_pick_done_locked(elem, GRPC_ERROR_REF(error));
 }
 
 // Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
 // If the pick was completed synchronously, unrefs the LB policy and
 // returns true.
-static bool pick_callback_start_locked(grpc_exec_ctx* exec_ctx,
-                                       grpc_call_element* elem) {
+static bool pick_callback_start_locked(grpc_call_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
   if (grpc_client_channel_trace.enabled()) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
             chand, calld, chand->lb_policy);
   }
-  apply_service_config_to_call_locked(exec_ctx, elem);
+  apply_service_config_to_call_locked(elem);
   // If the application explicitly set wait_for_ready, use that.
   // Otherwise, if the service config specified a value for this
   // method, use that.
@@ -1143,7 +1138,7 @@
   GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
                     grpc_combiner_scheduler(chand->combiner));
   const bool pick_done = grpc_lb_policy_pick_locked(
-      exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
+      chand->lb_policy, &inputs, &calld->connected_subchannel,
       calld->subchannel_call_context, nullptr, &calld->lb_pick_closure);
   if (pick_done) {
     /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
@@ -1151,12 +1146,12 @@
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
               chand, calld);
     }
-    GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
+    GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
     calld->lb_policy = nullptr;
   } else {
     GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
     grpc_call_combiner_set_notify_on_cancel(
-        exec_ctx, calld->call_combiner,
+        calld->call_combiner,
         GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
                           pick_callback_cancel_locked, elem,
                           grpc_combiner_scheduler(chand->combiner)));
@@ -1173,8 +1168,7 @@
 
 // Note: This runs under the client_channel combiner, but will NOT be
 // holding the call combiner.
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx* exec_ctx,
-                                                     void* arg,
+static void pick_after_resolver_result_cancel_locked(void* arg,
                                                      grpc_error* error) {
   pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
   if (args->finished) {
@@ -1202,16 +1196,13 @@
   // it's safe to call async_pick_done_locked() here -- we are
   // essentially calling it here instead of calling it in
   // pick_after_resolver_result_done_locked().
-  async_pick_done_locked(exec_ctx, elem,
-                         GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                             "Pick cancelled", &error, 1));
+  async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                                   "Pick cancelled", &error, 1));
 }
 
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
-                                                    grpc_call_element* elem);
+static void pick_after_resolver_result_start_locked(grpc_call_element* elem);
 
-static void pick_after_resolver_result_done_locked(grpc_exec_ctx* exec_ctx,
-                                                   void* arg,
+static void pick_after_resolver_result_done_locked(void* arg,
                                                    grpc_error* error) {
   pick_after_resolver_result_args* args = (pick_after_resolver_result_args*)arg;
   if (args->finished) {
@@ -1231,19 +1222,19 @@
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
               chand, calld);
     }
-    async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+    async_pick_done_locked(elem, GRPC_ERROR_REF(error));
   } else if (chand->lb_policy != nullptr) {
     if (grpc_client_channel_trace.enabled()) {
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
               chand, calld);
     }
-    if (pick_callback_start_locked(exec_ctx, elem)) {
+    if (pick_callback_start_locked(elem)) {
       // Even if the LB policy returns a result synchronously, we have
       // already added our polling entity to chand->interested_parties
       // in order to wait for the resolver result, so we need to
       // remove it here.  Therefore, we call async_pick_done_locked()
       // instead of pick_done_locked().
-      async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
+      async_pick_done_locked(elem, GRPC_ERROR_NONE);
     }
   }
   // TODO(roth): It should be impossible for chand->lb_policy to be NULL
@@ -1261,19 +1252,18 @@
               "trying again",
               chand, calld);
     }
-    pick_after_resolver_result_start_locked(exec_ctx, elem);
+    pick_after_resolver_result_start_locked(elem);
   } else {
     if (grpc_client_channel_trace.enabled()) {
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver disconnected", chand,
               calld);
     }
     async_pick_done_locked(
-        exec_ctx, elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
+        elem, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
   }
 }
 
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx* exec_ctx,
-                                                    grpc_call_element* elem) {
+static void pick_after_resolver_result_start_locked(grpc_call_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
   if (grpc_client_channel_trace.enabled()) {
@@ -1289,47 +1279,46 @@
   grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
                            &args->closure, GRPC_ERROR_NONE);
   grpc_call_combiner_set_notify_on_cancel(
-      exec_ctx, calld->call_combiner,
+      calld->call_combiner,
       GRPC_CLOSURE_INIT(&args->cancel_closure,
                         pick_after_resolver_result_cancel_locked, args,
                         grpc_combiner_scheduler(chand->combiner)));
 }
 
-static void start_pick_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* ignored) {
+static void start_pick_locked(void* arg, grpc_error* ignored) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   GPR_ASSERT(calld->connected_subchannel == nullptr);
   if (chand->lb_policy != nullptr) {
     // We already have an LB policy, so ask it for a pick.
-    if (pick_callback_start_locked(exec_ctx, elem)) {
+    if (pick_callback_start_locked(elem)) {
       // Pick completed synchronously.
-      pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
+      pick_done_locked(elem, GRPC_ERROR_NONE);
       return;
     }
   } else {
     // We do not yet have an LB policy, so wait for a resolver result.
     if (chand->resolver == nullptr) {
-      pick_done_locked(exec_ctx, elem,
+      pick_done_locked(elem,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
       return;
     }
     if (!chand->started_resolving) {
-      start_resolving_locked(exec_ctx, chand);
+      start_resolving_locked(chand);
     }
-    pick_after_resolver_result_start_locked(exec_ctx, elem);
+    pick_after_resolver_result_start_locked(elem);
   }
   // We need to wait for either a resolver result or for an async result
   // from the LB policy.  Add the polling entity from call_data to the
   // channel_data's interested_parties, so that the I/O of the LB policy
   // and resolver can be done under it.  The polling entity will be
   // removed in async_pick_done_locked().
-  grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+  grpc_polling_entity_add_to_pollset_set(calld->pollent,
                                          chand->interested_parties);
 }
 
-static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_complete(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   if (calld->retry_throttle_data != nullptr) {
@@ -1345,18 +1334,15 @@
           calld->retry_throttle_data);
     }
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete,
-                   GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
 }
 
 static void cc_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   if (chand->deadline_checking_enabled) {
-    grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
-                                                               batch);
+    grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
   }
   GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
   // If we've previously been cancelled, immediately fail any new batches.
@@ -1366,7 +1352,7 @@
               chand, calld, grpc_error_string(calld->error));
     }
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+        batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
     goto done;
   }
   if (batch->cancel_stream) {
@@ -1384,11 +1370,10 @@
     // If we have a subchannel call, send the cancellation batch down.
     // Otherwise, fail all pending batches.
     if (calld->subchannel_call != nullptr) {
-      grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+      grpc_subchannel_call_process_op(calld->subchannel_call, batch);
     } else {
       waiting_for_pick_batches_add(calld, batch);
-      waiting_for_pick_batches_fail(exec_ctx, elem,
-                                    GRPC_ERROR_REF(calld->error));
+      waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
     }
     goto done;
   }
@@ -1411,7 +1396,7 @@
               "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
               calld, calld->subchannel_call);
     }
-    grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+    grpc_subchannel_call_process_op(calld->subchannel_call, batch);
     goto done;
   }
   // We do not yet have a subchannel call.
@@ -1425,7 +1410,6 @@
               chand, calld);
     }
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
                           elem, grpc_combiner_scheduler(chand->combiner)),
         GRPC_ERROR_NONE);
@@ -1436,7 +1420,7 @@
               "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
               calld);
     }
-    GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+    GRPC_CALL_COMBINER_STOP(calld->call_combiner,
                             "batch does not include send_initial_metadata");
   }
 done:
@@ -1444,8 +1428,7 @@
 }
 
 /* Constructor for call_data */
-static grpc_error* cc_init_call_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_element* elem,
+static grpc_error* cc_init_call_elem(grpc_call_element* elem,
                                      const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -1457,23 +1440,22 @@
   calld->owning_call = args->call_stack;
   calld->call_combiner = args->call_combiner;
   if (chand->deadline_checking_enabled) {
-    grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
-                             args->call_combiner, calld->deadline);
+    grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
+                             calld->deadline);
   }
   return GRPC_ERROR_NONE;
 }
 
 /* Destructor for call_data */
-static void cc_destroy_call_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_element* elem,
+static void cc_destroy_call_elem(grpc_call_element* elem,
                                  const grpc_call_final_info* final_info,
                                  grpc_closure* then_schedule_closure) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   if (chand->deadline_checking_enabled) {
-    grpc_deadline_state_destroy(exec_ctx, elem);
+    grpc_deadline_state_destroy(elem);
   }
-  grpc_slice_unref_internal(exec_ctx, calld->path);
+  grpc_slice_unref_internal(calld->path);
   if (calld->method_params != nullptr) {
     method_parameters_unref(calld->method_params);
   }
@@ -1482,14 +1464,13 @@
     grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
                                              then_schedule_closure);
     then_schedule_closure = nullptr;
-    GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
+    GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
                                "client_channel_destroy_call");
   }
   GPR_ASSERT(calld->lb_policy == nullptr);
   GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
   if (calld->connected_subchannel != nullptr) {
-    GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
-                                    "picked");
+    GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked");
   }
   for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
     if (calld->subchannel_call_context[i].value != nullptr) {
@@ -1497,11 +1478,10 @@
           calld->subchannel_call_context[i].value);
     }
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
 }
 
-static void cc_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_element* elem,
+static void cc_set_pollset_or_pollset_set(grpc_call_element* elem,
                                           grpc_polling_entity* pollent) {
   call_data* calld = (call_data*)elem->call_data;
   calld->pollent = pollent;
@@ -1525,29 +1505,27 @@
     "client-channel",
 };
 
-static void try_to_connect_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                  grpc_error* error_ignored) {
+static void try_to_connect_locked(void* arg, grpc_error* error_ignored) {
   channel_data* chand = (channel_data*)arg;
   if (chand->lb_policy != nullptr) {
-    grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
+    grpc_lb_policy_exit_idle_locked(chand->lb_policy);
   } else {
     chand->exit_idle_when_lb_policy_arrives = true;
     if (!chand->started_resolving && chand->resolver != nullptr) {
-      start_resolving_locked(exec_ctx, chand);
+      start_resolving_locked(chand);
     }
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");
+  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
 }
 
 grpc_connectivity_state grpc_client_channel_check_connectivity_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect) {
+    grpc_channel_element* elem, int try_to_connect) {
   channel_data* chand = (channel_data*)elem->channel_data;
   grpc_connectivity_state out =
       grpc_connectivity_state_check(&chand->state_tracker);
   if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
     GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
                             grpc_combiner_scheduler(chand->combiner)),
         GRPC_ERROR_NONE);
@@ -1628,50 +1606,49 @@
   return count;
 }
 
-static void on_external_watch_complete_locked(grpc_exec_ctx* exec_ctx,
-                                              void* arg, grpc_error* error) {
+static void on_external_watch_complete_locked(void* arg, grpc_error* error) {
   external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
   grpc_closure* follow_up = w->on_complete;
-  grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
+  grpc_polling_entity_del_from_pollset_set(&w->pollent,
                                            w->chand->interested_parties);
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
+  GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
                            "external_connectivity_watcher");
   external_connectivity_watcher_list_remove(w->chand, w);
   gpr_free(w);
-  GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error));
 }
 
-static void watch_connectivity_state_locked(grpc_exec_ctx* exec_ctx, void* arg,
+static void watch_connectivity_state_locked(void* arg,
                                             grpc_error* error_ignored) {
   external_connectivity_watcher* w = (external_connectivity_watcher*)arg;
   external_connectivity_watcher* found = nullptr;
   if (w->state != nullptr) {
     external_connectivity_watcher_list_append(w->chand, w);
-    GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
     GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete_locked, w,
                       grpc_combiner_scheduler(w->chand->combiner));
-    grpc_connectivity_state_notify_on_state_change(
-        exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
+    grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker,
+                                                   w->state, &w->my_closure);
   } else {
     GPR_ASSERT(w->watcher_timer_init == nullptr);
     found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
     if (found) {
       GPR_ASSERT(found->on_complete == w->on_complete);
       grpc_connectivity_state_notify_on_state_change(
-          exec_ctx, &found->chand->state_tracker, nullptr, &found->my_closure);
+          &found->chand->state_tracker, nullptr, &found->my_closure);
     }
-    grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
+    grpc_polling_entity_del_from_pollset_set(&w->pollent,
                                              w->chand->interested_parties);
-    GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
+    GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
                              "external_connectivity_watcher");
     gpr_free(w);
   }
 }
 
 void grpc_client_channel_watch_connectivity_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
-    grpc_polling_entity pollent, grpc_connectivity_state* state,
-    grpc_closure* closure, grpc_closure* watcher_timer_init) {
+    grpc_channel_element* elem, grpc_polling_entity pollent,
+    grpc_connectivity_state* state, grpc_closure* closure,
+    grpc_closure* watcher_timer_init) {
   channel_data* chand = (channel_data*)elem->channel_data;
   external_connectivity_watcher* w =
       (external_connectivity_watcher*)gpr_zalloc(sizeof(*w));
@@ -1680,12 +1657,11 @@
   w->on_complete = closure;
   w->state = state;
   w->watcher_timer_init = watcher_timer_init;
-  grpc_polling_entity_add_to_pollset_set(exec_ctx, &w->pollent,
+  grpc_polling_entity_add_to_pollset_set(&w->pollent,
                                          chand->interested_parties);
   GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
                          "external_connectivity_watcher");
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
                         grpc_combiner_scheduler(chand->combiner)),
       GRPC_ERROR_NONE);
diff --git a/src/core/ext/filters/client_channel/client_channel.h b/src/core/ext/filters/client_channel/client_channel.h
index f58a8c1..9670405 100644
--- a/src/core/ext/filters/client_channel/client_channel.h
+++ b/src/core/ext/filters/client_channel/client_channel.h
@@ -28,10 +28,6 @@
 // Channel arg key for server URI string.
 #define GRPC_ARG_SERVER_URI "grpc.server_uri"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A client channel is a channel that begins disconnected, and can connect
    to some endpoint on demand. If that endpoint disconnects, it will be
    connected to again later.
@@ -42,22 +38,18 @@
 extern const grpc_channel_filter grpc_client_channel_filter;
 
 grpc_connectivity_state grpc_client_channel_check_connectivity_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_element* elem, int try_to_connect);
+    grpc_channel_element* elem, int try_to_connect);
 
 int grpc_client_channel_num_external_connectivity_watchers(
     grpc_channel_element* elem);
 
 void grpc_client_channel_watch_connectivity_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
-    grpc_polling_entity pollent, grpc_connectivity_state* state,
-    grpc_closure* on_complete, grpc_closure* watcher_timer_init);
+    grpc_channel_element* elem, grpc_polling_entity pollent,
+    grpc_connectivity_state* state, grpc_closure* on_complete,
+    grpc_closure* watcher_timer_init);
 
 /* Debug helper: pull the subchannel call from a call stack element */
 grpc_subchannel_call* grpc_client_channel_get_subchannel_call(
     grpc_call_element* elem);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_H */
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.cc b/src/core/ext/filters/client_channel/client_channel_factory.cc
index 57eac8f..60c95d7 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.cc
+++ b/src/core/ext/filters/client_channel/client_channel_factory.cc
@@ -23,23 +23,19 @@
   factory->vtable->ref(factory);
 }
 
-void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
-                                       grpc_client_channel_factory* factory) {
-  factory->vtable->unref(exec_ctx, factory);
+void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory) {
+  factory->vtable->unref(factory);
 }
 
 grpc_subchannel* grpc_client_channel_factory_create_subchannel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
-    const grpc_subchannel_args* args) {
-  return factory->vtable->create_subchannel(exec_ctx, factory, args);
+    grpc_client_channel_factory* factory, const grpc_subchannel_args* args) {
+  return factory->vtable->create_subchannel(factory, args);
 }
 
 grpc_channel* grpc_client_channel_factory_create_channel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
-    const char* target, grpc_client_channel_type type,
-    const grpc_channel_args* args) {
-  return factory->vtable->create_client_channel(exec_ctx, factory, target, type,
-                                                args);
+    grpc_client_channel_factory* factory, const char* target,
+    grpc_client_channel_type type, const grpc_channel_args* args) {
+  return factory->vtable->create_client_channel(factory, target, type, args);
 }
 
 static void* factory_arg_copy(void* factory) {
@@ -47,9 +43,8 @@
   return factory;
 }
 
-static void factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* factory) {
-  grpc_client_channel_factory_unref(exec_ctx,
-                                    (grpc_client_channel_factory*)factory);
+static void factory_arg_destroy(void* factory) {
+  grpc_client_channel_factory_unref((grpc_client_channel_factory*)factory);
 }
 
 static int factory_arg_cmp(void* factory1, void* factory2) {
diff --git a/src/core/ext/filters/client_channel/client_channel_factory.h b/src/core/ext/filters/client_channel/client_channel_factory.h
index db8645c..766ebb9 100644
--- a/src/core/ext/filters/client_channel/client_channel_factory.h
+++ b/src/core/ext/filters/client_channel/client_channel_factory.h
@@ -27,10 +27,6 @@
 // Channel arg key for client channel factory.
 #define GRPC_ARG_CLIENT_CHANNEL_FACTORY "grpc.client_channel_factory"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_client_channel_factory grpc_client_channel_factory;
 typedef struct grpc_client_channel_factory_vtable
     grpc_client_channel_factory_vtable;
@@ -49,37 +45,28 @@
 
 struct grpc_client_channel_factory_vtable {
   void (*ref)(grpc_client_channel_factory* factory);
-  void (*unref)(grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory);
-  grpc_subchannel* (*create_subchannel)(grpc_exec_ctx* exec_ctx,
-                                        grpc_client_channel_factory* factory,
+  void (*unref)(grpc_client_channel_factory* factory);
+  grpc_subchannel* (*create_subchannel)(grpc_client_channel_factory* factory,
                                         const grpc_subchannel_args* args);
-  grpc_channel* (*create_client_channel)(grpc_exec_ctx* exec_ctx,
-                                         grpc_client_channel_factory* factory,
+  grpc_channel* (*create_client_channel)(grpc_client_channel_factory* factory,
                                          const char* target,
                                          grpc_client_channel_type type,
                                          const grpc_channel_args* args);
 };
 
 void grpc_client_channel_factory_ref(grpc_client_channel_factory* factory);
-void grpc_client_channel_factory_unref(grpc_exec_ctx* exec_ctx,
-                                       grpc_client_channel_factory* factory);
+void grpc_client_channel_factory_unref(grpc_client_channel_factory* factory);
 
 /** Create a new grpc_subchannel */
 grpc_subchannel* grpc_client_channel_factory_create_subchannel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
-    const grpc_subchannel_args* args);
+    grpc_client_channel_factory* factory, const grpc_subchannel_args* args);
 
 /** Create a new grpc_channel */
 grpc_channel* grpc_client_channel_factory_create_channel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* factory,
-    const char* target, grpc_client_channel_type type,
-    const grpc_channel_args* args);
+    grpc_client_channel_factory* factory, const char* target,
+    grpc_client_channel_type type, const grpc_channel_args* args);
 
 grpc_arg grpc_client_channel_factory_create_channel_arg(
     grpc_client_channel_factory* factory);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CLIENT_CHANNEL_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/client_channel_plugin.cc b/src/core/ext/filters/client_channel/client_channel_plugin.cc
index c1b57d0..ea630d2 100644
--- a/src/core/ext/filters/client_channel/client_channel_plugin.cc
+++ b/src/core/ext/filters/client_channel/client_channel_plugin.cc
@@ -34,14 +34,12 @@
 #include "src/core/ext/filters/client_channel/subchannel_index.h"
 #include "src/core/lib/surface/channel_init.h"
 
-static bool append_filter(grpc_exec_ctx* exec_ctx,
-                          grpc_channel_stack_builder* builder, void* arg) {
+static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
   return grpc_channel_stack_builder_append_filter(
       builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
 }
 
-static bool set_default_host_if_unset(grpc_exec_ctx* exec_ctx,
-                                      grpc_channel_stack_builder* builder,
+static bool set_default_host_if_unset(grpc_channel_stack_builder* builder,
                                       void* unused) {
   const grpc_channel_args* args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
@@ -52,20 +50,19 @@
     }
   }
   char* default_authority = grpc_get_default_authority(
-      exec_ctx, grpc_channel_stack_builder_get_target(builder));
+      grpc_channel_stack_builder_get_target(builder));
   if (default_authority != nullptr) {
     grpc_arg arg = grpc_channel_arg_string_create(
         (char*)GRPC_ARG_DEFAULT_AUTHORITY, default_authority);
     grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
-    grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
-                                                     new_args);
+    grpc_channel_stack_builder_set_channel_arguments(builder, new_args);
     gpr_free(default_authority);
-    grpc_channel_args_destroy(exec_ctx, new_args);
+    grpc_channel_args_destroy(new_args);
   }
   return true;
 }
 
-extern "C" void grpc_client_channel_init(void) {
+void grpc_client_channel_init(void) {
   grpc_lb_policy_registry_init();
   grpc_resolver_registry_init();
   grpc_retry_throttle_map_init();
@@ -80,7 +77,7 @@
   grpc_http_connect_register_handshaker_factory();
 }
 
-extern "C" void grpc_client_channel_shutdown(void) {
+void grpc_client_channel_shutdown(void) {
   grpc_subchannel_index_shutdown();
   grpc_channel_init_shutdown();
   grpc_proxy_mapper_registry_shutdown();
diff --git a/src/core/ext/filters/client_channel/connector.cc b/src/core/ext/filters/client_channel/connector.cc
index c258468..c8bf2f3 100644
--- a/src/core/ext/filters/client_channel/connector.cc
+++ b/src/core/ext/filters/client_channel/connector.cc
@@ -23,18 +23,17 @@
   return connector;
 }
 
-void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector) {
-  connector->vtable->unref(exec_ctx, connector);
+void grpc_connector_unref(grpc_connector* connector) {
+  connector->vtable->unref(connector);
 }
 
-void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+void grpc_connector_connect(grpc_connector* connector,
                             const grpc_connect_in_args* in_args,
                             grpc_connect_out_args* out_args,
                             grpc_closure* notify) {
-  connector->vtable->connect(exec_ctx, connector, in_args, out_args, notify);
+  connector->vtable->connect(connector, in_args, out_args, notify);
 }
 
-void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
-                             grpc_error* why) {
-  connector->vtable->shutdown(exec_ctx, connector, why);
+void grpc_connector_shutdown(grpc_connector* connector, grpc_error* why) {
+  connector->vtable->shutdown(connector, why);
 }
diff --git a/src/core/ext/filters/client_channel/connector.h b/src/core/ext/filters/client_channel/connector.h
index 12dc59b..d657658 100644
--- a/src/core/ext/filters/client_channel/connector.h
+++ b/src/core/ext/filters/client_channel/connector.h
@@ -23,10 +23,6 @@
 #include "src/core/lib/iomgr/resolve_address.h"
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_connector grpc_connector;
 typedef struct grpc_connector_vtable grpc_connector_vtable;
 
@@ -53,29 +49,23 @@
 
 struct grpc_connector_vtable {
   void (*ref)(grpc_connector* connector);
-  void (*unref)(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
+  void (*unref)(grpc_connector* connector);
   /** Implementation of grpc_connector_shutdown */
-  void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
-                   grpc_error* why);
+  void (*shutdown)(grpc_connector* connector, grpc_error* why);
   /** Implementation of grpc_connector_connect */
-  void (*connect)(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+  void (*connect)(grpc_connector* connector,
                   const grpc_connect_in_args* in_args,
                   grpc_connect_out_args* out_args, grpc_closure* notify);
 };
 
 grpc_connector* grpc_connector_ref(grpc_connector* connector);
-void grpc_connector_unref(grpc_exec_ctx* exec_ctx, grpc_connector* connector);
+void grpc_connector_unref(grpc_connector* connector);
 /** Connect using the connector: max one outstanding call at a time */
-void grpc_connector_connect(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
+void grpc_connector_connect(grpc_connector* connector,
                             const grpc_connect_in_args* in_args,
                             grpc_connect_out_args* out_args,
                             grpc_closure* notify);
 /** Cancel any pending connection */
-void grpc_connector_shutdown(grpc_exec_ctx* exec_ctx, grpc_connector* connector,
-                             grpc_error* why);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_connector_shutdown(grpc_connector* connector, grpc_error* why);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_CONNECTOR_H */
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.cc b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
index b7cb2e3..556a3bc 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.cc
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.cc
@@ -61,41 +61,38 @@
 } http_connect_handshaker;
 
 // Unref and clean up handshaker.
-static void http_connect_handshaker_unref(grpc_exec_ctx* exec_ctx,
-                                          http_connect_handshaker* handshaker) {
+static void http_connect_handshaker_unref(http_connect_handshaker* handshaker) {
   if (gpr_unref(&handshaker->refcount)) {
     gpr_mu_destroy(&handshaker->mu);
     if (handshaker->endpoint_to_destroy != nullptr) {
-      grpc_endpoint_destroy(exec_ctx, handshaker->endpoint_to_destroy);
+      grpc_endpoint_destroy(handshaker->endpoint_to_destroy);
     }
     if (handshaker->read_buffer_to_destroy != nullptr) {
-      grpc_slice_buffer_destroy_internal(exec_ctx,
-                                         handshaker->read_buffer_to_destroy);
+      grpc_slice_buffer_destroy_internal(handshaker->read_buffer_to_destroy);
       gpr_free(handshaker->read_buffer_to_destroy);
     }
-    grpc_slice_buffer_destroy_internal(exec_ctx, &handshaker->write_buffer);
+    grpc_slice_buffer_destroy_internal(&handshaker->write_buffer);
     grpc_http_parser_destroy(&handshaker->http_parser);
     grpc_http_response_destroy(&handshaker->http_response);
     gpr_free(handshaker);
   }
 }
 
-// Set args fields to NULL, saving the endpoint and read buffer for
+// Set args fields to nullptr, saving the endpoint and read buffer for
 // later destruction.
 static void cleanup_args_for_failure_locked(
-    grpc_exec_ctx* exec_ctx, http_connect_handshaker* handshaker) {
+    http_connect_handshaker* handshaker) {
   handshaker->endpoint_to_destroy = handshaker->args->endpoint;
   handshaker->args->endpoint = nullptr;
   handshaker->read_buffer_to_destroy = handshaker->args->read_buffer;
   handshaker->args->read_buffer = nullptr;
-  grpc_channel_args_destroy(exec_ctx, handshaker->args->args);
+  grpc_channel_args_destroy(handshaker->args->args);
   handshaker->args->args = nullptr;
 }
 
 // If the handshake failed or we're shutting down, clean up and invoke the
 // callback with the error.
-static void handshake_failed_locked(grpc_exec_ctx* exec_ctx,
-                                    http_connect_handshaker* handshaker,
+static void handshake_failed_locked(http_connect_handshaker* handshaker,
                                     grpc_error* error) {
   if (error == GRPC_ERROR_NONE) {
     // If we were shut down after an endpoint operation succeeded but
@@ -108,34 +105,32 @@
     // before destroying them, even if we know that there are no
     // pending read/write callbacks.  This should be fixed, at which
     // point this can be removed.
-    grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint,
-                           GRPC_ERROR_REF(error));
+    grpc_endpoint_shutdown(handshaker->args->endpoint, GRPC_ERROR_REF(error));
     // Not shutting down, so the handshake failed.  Clean up before
     // invoking the callback.
-    cleanup_args_for_failure_locked(exec_ctx, handshaker);
+    cleanup_args_for_failure_locked(handshaker);
     // Set shutdown to true so that subsequent calls to
     // http_connect_handshaker_shutdown() do nothing.
     handshaker->shutdown = true;
   }
   // Invoke callback.
-  GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error);
+  GRPC_CLOSURE_SCHED(handshaker->on_handshake_done, error);
 }
 
 // Callback invoked when finished writing HTTP CONNECT request.
-static void on_write_done(grpc_exec_ctx* exec_ctx, void* arg,
-                          grpc_error* error) {
+static void on_write_done(void* arg, grpc_error* error) {
   http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
   gpr_mu_lock(&handshaker->mu);
   if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
     // If the write failed or we're shutting down, clean up and invoke the
     // callback with the error.
-    handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
+    handshake_failed_locked(handshaker, GRPC_ERROR_REF(error));
     gpr_mu_unlock(&handshaker->mu);
-    http_connect_handshaker_unref(exec_ctx, handshaker);
+    http_connect_handshaker_unref(handshaker);
   } else {
     // Otherwise, read the response.
     // The read callback inherits our ref to the handshaker.
-    grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
+    grpc_endpoint_read(handshaker->args->endpoint,
                        handshaker->args->read_buffer,
                        &handshaker->response_read_closure);
     gpr_mu_unlock(&handshaker->mu);
@@ -143,14 +138,13 @@
 }
 
 // Callback invoked for reading HTTP CONNECT response.
-static void on_read_done(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
+static void on_read_done(void* arg, grpc_error* error) {
   http_connect_handshaker* handshaker = (http_connect_handshaker*)arg;
   gpr_mu_lock(&handshaker->mu);
   if (error != GRPC_ERROR_NONE || handshaker->shutdown) {
     // If the read failed or we're shutting down, clean up and invoke the
     // callback with the error.
-    handshake_failed_locked(exec_ctx, handshaker, GRPC_ERROR_REF(error));
+    handshake_failed_locked(handshaker, GRPC_ERROR_REF(error));
     goto done;
   }
   // Add buffer to parser.
@@ -161,7 +155,7 @@
                                      handshaker->args->read_buffer->slices[i],
                                      &body_start_offset);
       if (error != GRPC_ERROR_NONE) {
-        handshake_failed_locked(exec_ctx, handshaker, error);
+        handshake_failed_locked(handshaker, error);
         goto done;
       }
       if (handshaker->http_parser.state == GRPC_HTTP_BODY) {
@@ -180,7 +174,7 @@
                                &handshaker->args->read_buffer->slices[i + 1],
                                handshaker->args->read_buffer->count - i - 1);
         grpc_slice_buffer_swap(handshaker->args->read_buffer, &tmp_buffer);
-        grpc_slice_buffer_destroy_internal(exec_ctx, &tmp_buffer);
+        grpc_slice_buffer_destroy_internal(&tmp_buffer);
         break;
       }
     }
@@ -197,9 +191,8 @@
   // complete (e.g., handling chunked transfer encoding or looking
   // at the Content-Length: header).
   if (handshaker->http_parser.state != GRPC_HTTP_BODY) {
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                               handshaker->args->read_buffer);
-    grpc_endpoint_read(exec_ctx, handshaker->args->endpoint,
+    grpc_slice_buffer_reset_and_unref_internal(handshaker->args->read_buffer);
+    grpc_endpoint_read(handshaker->args->endpoint,
                        handshaker->args->read_buffer,
                        &handshaker->response_read_closure);
     gpr_mu_unlock(&handshaker->mu);
@@ -213,48 +206,44 @@
                  handshaker->http_response.status);
     error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
     gpr_free(msg);
-    handshake_failed_locked(exec_ctx, handshaker, error);
+    handshake_failed_locked(handshaker, error);
     goto done;
   }
   // Success.  Invoke handshake-done callback.
-  GRPC_CLOSURE_SCHED(exec_ctx, handshaker->on_handshake_done, error);
+  GRPC_CLOSURE_SCHED(handshaker->on_handshake_done, error);
 done:
   // Set shutdown to true so that subsequent calls to
   // http_connect_handshaker_shutdown() do nothing.
   handshaker->shutdown = true;
   gpr_mu_unlock(&handshaker->mu);
-  http_connect_handshaker_unref(exec_ctx, handshaker);
+  http_connect_handshaker_unref(handshaker);
 }
 
 //
 // Public handshaker methods
 //
 
-static void http_connect_handshaker_destroy(grpc_exec_ctx* exec_ctx,
-                                            grpc_handshaker* handshaker_in) {
+static void http_connect_handshaker_destroy(grpc_handshaker* handshaker_in) {
   http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
-  http_connect_handshaker_unref(exec_ctx, handshaker);
+  http_connect_handshaker_unref(handshaker);
 }
 
-static void http_connect_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
-                                             grpc_handshaker* handshaker_in,
+static void http_connect_handshaker_shutdown(grpc_handshaker* handshaker_in,
                                              grpc_error* why) {
   http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
   gpr_mu_lock(&handshaker->mu);
   if (!handshaker->shutdown) {
     handshaker->shutdown = true;
-    grpc_endpoint_shutdown(exec_ctx, handshaker->args->endpoint,
-                           GRPC_ERROR_REF(why));
-    cleanup_args_for_failure_locked(exec_ctx, handshaker);
+    grpc_endpoint_shutdown(handshaker->args->endpoint, GRPC_ERROR_REF(why));
+    cleanup_args_for_failure_locked(handshaker);
   }
   gpr_mu_unlock(&handshaker->mu);
   GRPC_ERROR_UNREF(why);
 }
 
 static void http_connect_handshaker_do_handshake(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker_in,
-    grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done,
-    grpc_handshaker_args* args) {
+    grpc_handshaker* handshaker_in, grpc_tcp_server_acceptor* acceptor,
+    grpc_closure* on_handshake_done, grpc_handshaker_args* args) {
   http_connect_handshaker* handshaker = (http_connect_handshaker*)handshaker_in;
   // Check for HTTP CONNECT channel arg.
   // If not found, invoke on_handshake_done without doing anything.
@@ -266,7 +255,7 @@
     gpr_mu_lock(&handshaker->mu);
     handshaker->shutdown = true;
     gpr_mu_unlock(&handshaker->mu);
-    GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(on_handshake_done, GRPC_ERROR_NONE);
     return;
   }
   GPR_ASSERT(arg->type == GRPC_ARG_STRING);
@@ -324,7 +313,7 @@
   gpr_free(header_strings);
   // Take a new ref to be held by the write callback.
   gpr_ref(&handshaker->refcount);
-  grpc_endpoint_write(exec_ctx, args->endpoint, &handshaker->write_buffer,
+  grpc_endpoint_write(args->endpoint, &handshaker->write_buffer,
                       &handshaker->request_done_closure);
   gpr_mu_unlock(&handshaker->mu);
 }
@@ -355,14 +344,13 @@
 //
 
 static void handshaker_factory_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* factory,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+    grpc_handshaker_factory* factory, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr) {
   grpc_handshake_manager_add(handshake_mgr,
                              grpc_http_connect_handshaker_create());
 }
 
-static void handshaker_factory_destroy(grpc_exec_ctx* exec_ctx,
-                                       grpc_handshaker_factory* factory) {}
+static void handshaker_factory_destroy(grpc_handshaker_factory* factory) {}
 
 static const grpc_handshaker_factory_vtable handshaker_factory_vtable = {
     handshaker_factory_add_handshakers, handshaker_factory_destroy};
diff --git a/src/core/ext/filters/client_channel/http_connect_handshaker.h b/src/core/ext/filters/client_channel/http_connect_handshaker.h
index 05a23cd..928a23d 100644
--- a/src/core/ext/filters/client_channel/http_connect_handshaker.h
+++ b/src/core/ext/filters/client_channel/http_connect_handshaker.h
@@ -28,15 +28,7 @@
 /// seperated by colons.
 #define GRPC_ARG_HTTP_CONNECT_HEADERS "grpc.http_connect_headers"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// Registers handshaker factory.
 void grpc_http_connect_register_handshaker_factory();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_CONNECT_HANDSHAKER_H */
diff --git a/src/core/ext/filters/client_channel/http_proxy.cc b/src/core/ext/filters/client_channel/http_proxy.cc
index 405d8c0..2eafeee 100644
--- a/src/core/ext/filters/client_channel/http_proxy.cc
+++ b/src/core/ext/filters/client_channel/http_proxy.cc
@@ -36,19 +36,18 @@
 
 /**
  * Parses the 'http_proxy' env var and returns the proxy hostname to resolve or
- * NULL on error. Also sets 'user_cred' to user credentials if present in the
+ * nullptr on error. Also sets 'user_cred' to user credentials if present in the
  * 'http_proxy' env var, otherwise leaves it unchanged. It is caller's
  * responsibility to gpr_free user_cred.
  */
-static char* get_http_proxy_server(grpc_exec_ctx* exec_ctx, char** user_cred) {
+static char* get_http_proxy_server(char** user_cred) {
   GPR_ASSERT(user_cred != nullptr);
   char* proxy_name = nullptr;
   char* uri_str = gpr_getenv("http_proxy");
   char** authority_strs = nullptr;
   size_t authority_nstrs;
   if (uri_str == nullptr) return nullptr;
-  grpc_uri* uri =
-      grpc_uri_parse(exec_ctx, uri_str, false /* suppress_errors */);
+  grpc_uri* uri = grpc_uri_parse(uri_str, false /* suppress_errors */);
   if (uri == nullptr || uri->authority == nullptr) {
     gpr_log(GPR_ERROR, "cannot parse value of 'http_proxy' env var");
     goto done;
@@ -82,18 +81,16 @@
   return proxy_name;
 }
 
-static bool proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
-                                  grpc_proxy_mapper* mapper,
+static bool proxy_mapper_map_name(grpc_proxy_mapper* mapper,
                                   const char* server_uri,
                                   const grpc_channel_args* args,
                                   char** name_to_resolve,
                                   grpc_channel_args** new_args) {
   char* user_cred = nullptr;
-  *name_to_resolve = get_http_proxy_server(exec_ctx, &user_cred);
+  *name_to_resolve = get_http_proxy_server(&user_cred);
   if (*name_to_resolve == nullptr) return false;
   char* no_proxy_str = nullptr;
-  grpc_uri* uri =
-      grpc_uri_parse(exec_ctx, server_uri, false /* suppress_errors */);
+  grpc_uri* uri = grpc_uri_parse(server_uri, false /* suppress_errors */);
   if (uri == nullptr || uri->path[0] == '\0') {
     gpr_log(GPR_ERROR,
             "'http_proxy' environment variable set, but cannot "
@@ -174,8 +171,7 @@
   return false;
 }
 
-static bool proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
-                                     grpc_proxy_mapper* mapper,
+static bool proxy_mapper_map_address(grpc_proxy_mapper* mapper,
                                      const grpc_resolved_address* address,
                                      const grpc_channel_args* args,
                                      grpc_resolved_address** new_address,
diff --git a/src/core/ext/filters/client_channel/http_proxy.h b/src/core/ext/filters/client_channel/http_proxy.h
index bdad03d..3469493 100644
--- a/src/core/ext/filters/client_channel/http_proxy.h
+++ b/src/core/ext/filters/client_channel/http_proxy.h
@@ -19,14 +19,6 @@
 #ifndef GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
 #define GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_register_http_proxy_mapper();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_HTTP_PROXY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy.cc b/src/core/ext/filters/client_channel/lb_policy.cc
index 6276c3e..7a5a8de 100644
--- a/src/core/ext/filters/client_channel/lb_policy.cc
+++ b/src/core/ext/filters/client_channel/lb_policy.cc
@@ -63,15 +63,13 @@
   ref_mutate(policy, 1 << WEAK_REF_BITS, 0 REF_MUTATE_PASS_ARGS("STRONG_REF"));
 }
 
-static void shutdown_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void shutdown_locked(void* arg, grpc_error* error) {
   grpc_lb_policy* policy = (grpc_lb_policy*)arg;
-  policy->vtable->shutdown_locked(exec_ctx, policy);
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, policy, "strong-unref");
+  policy->vtable->shutdown_locked(policy);
+  GRPC_LB_POLICY_WEAK_UNREF(policy, "strong-unref");
 }
 
-void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx,
-                          grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
   gpr_atm old_val =
       ref_mutate(policy, (gpr_atm)1 - (gpr_atm)(1 << WEAK_REF_BITS),
                  1 REF_MUTATE_PASS_ARGS("STRONG_UNREF"));
@@ -79,13 +77,11 @@
   gpr_atm check = 1 << WEAK_REF_BITS;
   if ((old_val & mask) == check) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_CREATE(shutdown_locked, policy,
                             grpc_combiner_scheduler(policy->combiner)),
         GRPC_ERROR_NONE);
   } else {
-    grpc_lb_policy_weak_unref(exec_ctx,
-                              policy REF_FUNC_PASS_ARGS("strong-unref"));
+    grpc_lb_policy_weak_unref(policy REF_FUNC_PASS_ARGS("strong-unref"));
   }
 }
 
@@ -93,71 +89,86 @@
   ref_mutate(policy, 1, 0 REF_MUTATE_PASS_ARGS("WEAK_REF"));
 }
 
-void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx,
-                               grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
+void grpc_lb_policy_weak_unref(grpc_lb_policy* policy REF_FUNC_EXTRA_ARGS) {
   gpr_atm old_val =
       ref_mutate(policy, -(gpr_atm)1, 1 REF_MUTATE_PASS_ARGS("WEAK_UNREF"));
   if (old_val == 1) {
-    grpc_pollset_set_destroy(exec_ctx, policy->interested_parties);
+    grpc_pollset_set_destroy(policy->interested_parties);
     grpc_combiner* combiner = policy->combiner;
-    policy->vtable->destroy(exec_ctx, policy);
-    GRPC_COMBINER_UNREF(exec_ctx, combiner, "lb_policy");
+    policy->vtable->destroy(policy);
+    GRPC_COMBINER_UNREF(combiner, "lb_policy");
   }
 }
 
-int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
                                const grpc_lb_policy_pick_args* pick_args,
                                grpc_connected_subchannel** target,
                                grpc_call_context_element* context,
                                void** user_data, grpc_closure* on_complete) {
-  return policy->vtable->pick_locked(exec_ctx, policy, pick_args, target,
-                                     context, user_data, on_complete);
+  return policy->vtable->pick_locked(policy, pick_args, target, context,
+                                     user_data, on_complete);
 }
 
-void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
-                                       grpc_lb_policy* policy,
+void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
                                        grpc_connected_subchannel** target,
                                        grpc_error* error) {
-  policy->vtable->cancel_pick_locked(exec_ctx, policy, target, error);
+  policy->vtable->cancel_pick_locked(policy, target, error);
 }
 
-void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
-                                        grpc_lb_policy* policy,
+void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,
                                         uint32_t initial_metadata_flags_mask,
                                         uint32_t initial_metadata_flags_eq,
                                         grpc_error* error) {
-  policy->vtable->cancel_picks_locked(exec_ctx, policy,
-                                      initial_metadata_flags_mask,
+  policy->vtable->cancel_picks_locked(policy, initial_metadata_flags_mask,
                                       initial_metadata_flags_eq, error);
 }
 
-void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_lb_policy* policy) {
-  policy->vtable->exit_idle_locked(exec_ctx, policy);
+void grpc_lb_policy_exit_idle_locked(grpc_lb_policy* policy) {
+  policy->vtable->exit_idle_locked(policy);
 }
 
-void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
-                                    grpc_lb_policy* policy,
-                                    grpc_closure* closure) {
-  policy->vtable->ping_one_locked(exec_ctx, policy, closure);
+void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
+                                    grpc_closure* on_initiate,
+                                    grpc_closure* on_ack) {
+  policy->vtable->ping_one_locked(policy, on_initiate, on_ack);
 }
 
 void grpc_lb_policy_notify_on_state_change_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-    grpc_connectivity_state* state, grpc_closure* closure) {
-  policy->vtable->notify_on_state_change_locked(exec_ctx, policy, state,
-                                                closure);
+    grpc_lb_policy* policy, grpc_connectivity_state* state,
+    grpc_closure* closure) {
+  policy->vtable->notify_on_state_change_locked(policy, state, closure);
 }
 
 grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-    grpc_error** connectivity_error) {
-  return policy->vtable->check_connectivity_locked(exec_ctx, policy,
-                                                   connectivity_error);
+    grpc_lb_policy* policy, grpc_error** connectivity_error) {
+  return policy->vtable->check_connectivity_locked(policy, connectivity_error);
 }
 
-void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
-                                  grpc_lb_policy* policy,
+void grpc_lb_policy_update_locked(grpc_lb_policy* policy,
                                   const grpc_lb_policy_args* lb_policy_args) {
-  policy->vtable->update_locked(exec_ctx, policy, lb_policy_args);
+  policy->vtable->update_locked(policy, lb_policy_args);
+}
+
+void grpc_lb_policy_set_reresolve_closure_locked(
+    grpc_lb_policy* policy, grpc_closure* request_reresolution) {
+  policy->vtable->set_reresolve_closure_locked(policy, request_reresolution);
+}
+
+void grpc_lb_policy_try_reresolve(grpc_lb_policy* policy,
+                                  grpc_core::TraceFlag* grpc_lb_trace,
+                                  grpc_error* error) {
+  if (policy->request_reresolution != nullptr) {
+    GRPC_CLOSURE_SCHED(policy->request_reresolution, error);
+    policy->request_reresolution = nullptr;
+    if (grpc_lb_trace->enabled()) {
+      gpr_log(GPR_DEBUG,
+              "%s %p: scheduling re-resolution closure with error=%s.",
+              grpc_lb_trace->name(), policy, grpc_error_string(error));
+    }
+  } else {
+    if (grpc_lb_trace->enabled() && error == GRPC_ERROR_NONE) {
+      gpr_log(GPR_DEBUG, "%s %p: re-resolution already in progress.",
+              grpc_lb_trace->name(), policy);
+    }
+  }
 }
diff --git a/src/core/ext/filters/client_channel/lb_policy.h b/src/core/ext/filters/client_channel/lb_policy.h
index cd40b4d..3572c97 100644
--- a/src/core/ext/filters/client_channel/lb_policy.h
+++ b/src/core/ext/filters/client_channel/lb_policy.h
@@ -23,10 +23,6 @@
 #include "src/core/lib/iomgr/polling_entity.h"
 #include "src/core/lib/transport/connectivity_state.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** A load balancing policy: specified by a vtable and a struct (which
     is expected to be extended to contain some parameters) */
 typedef struct grpc_lb_policy grpc_lb_policy;
@@ -42,6 +38,8 @@
   grpc_pollset_set* interested_parties;
   /* combiner under which lb_policy actions take place */
   grpc_combiner* combiner;
+  /* callback to force a re-resolution */
+  grpc_closure* request_reresolution;
 };
 
 /** Extra arguments for an LB pick */
@@ -57,49 +55,51 @@
 } grpc_lb_policy_pick_args;
 
 struct grpc_lb_policy_vtable {
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
-  void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+  void (*destroy)(grpc_lb_policy* policy);
+  void (*shutdown_locked)(grpc_lb_policy* policy);
 
   /** \see grpc_lb_policy_pick */
-  int (*pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+  int (*pick_locked)(grpc_lb_policy* policy,
                      const grpc_lb_policy_pick_args* pick_args,
                      grpc_connected_subchannel** target,
                      grpc_call_context_element* context, void** user_data,
                      grpc_closure* on_complete);
 
   /** \see grpc_lb_policy_cancel_pick */
-  void (*cancel_pick_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+  void (*cancel_pick_locked)(grpc_lb_policy* policy,
                              grpc_connected_subchannel** target,
                              grpc_error* error);
 
   /** \see grpc_lb_policy_cancel_picks */
-  void (*cancel_picks_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+  void (*cancel_picks_locked)(grpc_lb_policy* policy,
                               uint32_t initial_metadata_flags_mask,
                               uint32_t initial_metadata_flags_eq,
                               grpc_error* error);
 
   /** \see grpc_lb_policy_ping_one */
-  void (*ping_one_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-                          grpc_closure* closure);
+  void (*ping_one_locked)(grpc_lb_policy* policy, grpc_closure* on_initiate,
+                          grpc_closure* on_ack);
 
   /** Try to enter a READY connectivity state */
-  void (*exit_idle_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+  void (*exit_idle_locked)(grpc_lb_policy* policy);
 
   /** check the current connectivity of the lb_policy */
   grpc_connectivity_state (*check_connectivity_locked)(
-      grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-      grpc_error** connectivity_error);
+      grpc_lb_policy* policy, grpc_error** connectivity_error);
 
   /** call notify when the connectivity state of a channel changes from *state.
       Updates *state with the new state of the policy. Calling with a NULL \a
       state cancels the subscription.  */
-  void (*notify_on_state_change_locked)(grpc_exec_ctx* exec_ctx,
-                                        grpc_lb_policy* policy,
+  void (*notify_on_state_change_locked)(grpc_lb_policy* policy,
                                         grpc_connectivity_state* state,
                                         grpc_closure* closure);
 
-  void (*update_locked)(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+  void (*update_locked)(grpc_lb_policy* policy,
                         const grpc_lb_policy_args* args);
+
+  /** \see grpc_lb_policy_set_reresolve_closure */
+  void (*set_reresolve_closure_locked)(grpc_lb_policy* policy,
+                                       grpc_closure* request_reresolution);
 };
 
 #ifndef NDEBUG
@@ -107,33 +107,33 @@
 /* Strong references: the policy will shutdown when they reach zero */
 #define GRPC_LB_POLICY_REF(p, r) \
   grpc_lb_policy_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_LB_POLICY_UNREF(exec_ctx, p, r) \
-  grpc_lb_policy_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_UNREF(p, r) \
+  grpc_lb_policy_unref((p), __FILE__, __LINE__, (r))
 
 /* Weak references: they don't prevent the shutdown of the LB policy. When no
  * strong references are left but there are still weak ones, shutdown is called.
  * Once the weak reference also reaches zero, the LB policy is destroyed. */
 #define GRPC_LB_POLICY_WEAK_REF(p, r) \
   grpc_lb_policy_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, p, r) \
-  grpc_lb_policy_weak_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+#define GRPC_LB_POLICY_WEAK_UNREF(p, r) \
+  grpc_lb_policy_weak_unref((p), __FILE__, __LINE__, (r))
 void grpc_lb_policy_ref(grpc_lb_policy* policy, const char* file, int line,
                         const char* reason);
-void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-                          const char* file, int line, const char* reason);
+void grpc_lb_policy_unref(grpc_lb_policy* policy, const char* file, int line,
+                          const char* reason);
 void grpc_lb_policy_weak_ref(grpc_lb_policy* policy, const char* file, int line,
                              const char* reason);
-void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-                               const char* file, int line, const char* reason);
+void grpc_lb_policy_weak_unref(grpc_lb_policy* policy, const char* file,
+                               int line, const char* reason);
 #else
 #define GRPC_LB_POLICY_REF(p, r) grpc_lb_policy_ref((p))
-#define GRPC_LB_POLICY_UNREF(cl, p, r) grpc_lb_policy_unref((cl), (p))
+#define GRPC_LB_POLICY_UNREF(p, r) grpc_lb_policy_unref((p))
 #define GRPC_LB_POLICY_WEAK_REF(p, r) grpc_lb_policy_weak_ref((p))
-#define GRPC_LB_POLICY_WEAK_UNREF(cl, p, r) grpc_lb_policy_weak_unref((cl), (p))
+#define GRPC_LB_POLICY_WEAK_UNREF(p, r) grpc_lb_policy_weak_unref((p))
 void grpc_lb_policy_ref(grpc_lb_policy* policy);
-void grpc_lb_policy_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+void grpc_lb_policy_unref(grpc_lb_policy* policy);
 void grpc_lb_policy_weak_ref(grpc_lb_policy* policy);
-void grpc_lb_policy_weak_unref(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy);
+void grpc_lb_policy_weak_unref(grpc_lb_policy* policy);
 #endif
 
 /** called by concrete implementations to initialize the base struct */
@@ -158,7 +158,7 @@
 
     Any IO should be done under the \a interested_parties \a grpc_pollset_set
     in the \a grpc_lb_policy struct. */
-int grpc_lb_policy_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+int grpc_lb_policy_pick_locked(grpc_lb_policy* policy,
                                const grpc_lb_policy_pick_args* pick_args,
                                grpc_connected_subchannel** target,
                                grpc_call_context_element* context,
@@ -166,48 +166,49 @@
 
 /** Perform a connected subchannel ping (see \a grpc_connected_subchannel_ping)
     against one of the connected subchannels managed by \a policy. */
-void grpc_lb_policy_ping_one_locked(grpc_exec_ctx* exec_ctx,
-                                    grpc_lb_policy* policy,
-                                    grpc_closure* closure);
+void grpc_lb_policy_ping_one_locked(grpc_lb_policy* policy,
+                                    grpc_closure* on_initiate,
+                                    grpc_closure* on_ack);
 
 /** Cancel picks for \a target.
     The \a on_complete callback of the pending picks will be invoked with \a
     *target set to NULL. */
-void grpc_lb_policy_cancel_pick_locked(grpc_exec_ctx* exec_ctx,
-                                       grpc_lb_policy* policy,
+void grpc_lb_policy_cancel_pick_locked(grpc_lb_policy* policy,
                                        grpc_connected_subchannel** target,
                                        grpc_error* error);
 
 /** Cancel all pending picks for which their \a initial_metadata_flags (as given
     in the call to \a grpc_lb_policy_pick) matches \a initial_metadata_flags_eq
     when AND'd with \a initial_metadata_flags_mask */
-void grpc_lb_policy_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
-                                        grpc_lb_policy* policy,
+void grpc_lb_policy_cancel_picks_locked(grpc_lb_policy* policy,
                                         uint32_t initial_metadata_flags_mask,
                                         uint32_t initial_metadata_flags_eq,
                                         grpc_error* error);
 
 /** Try to enter a READY connectivity state */
-void grpc_lb_policy_exit_idle_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_lb_policy* policy);
+void grpc_lb_policy_exit_idle_locked(grpc_lb_policy* policy);
 
 /* Call notify when the connectivity state of a channel changes from \a *state.
  * Updates \a *state with the new state of the policy */
 void grpc_lb_policy_notify_on_state_change_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-    grpc_connectivity_state* state, grpc_closure* closure);
+    grpc_lb_policy* policy, grpc_connectivity_state* state,
+    grpc_closure* closure);
 
 grpc_connectivity_state grpc_lb_policy_check_connectivity_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-    grpc_error** connectivity_error);
+    grpc_lb_policy* policy, grpc_error** connectivity_error);
 
 /** Update \a policy with \a lb_policy_args. */
-void grpc_lb_policy_update_locked(grpc_exec_ctx* exec_ctx,
-                                  grpc_lb_policy* policy,
+void grpc_lb_policy_update_locked(grpc_lb_policy* policy,
                                   const grpc_lb_policy_args* lb_policy_args);
 
-#ifdef __cplusplus
-}
-#endif
+/** Set the re-resolution closure to \a request_reresolution. */
+void grpc_lb_policy_set_reresolve_closure_locked(
+    grpc_lb_policy* policy, grpc_closure* request_reresolution);
+
+/** Try to request a re-resolution. It's NOT a public API; it's only for use by
+    the LB policy implementations. */
+void grpc_lb_policy_try_reresolve(grpc_lb_policy* policy,
+                                  grpc_core::TraceFlag* grpc_lb_trace,
+                                  grpc_error* error);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
index 6d9fada..3eedb08 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc
@@ -25,14 +25,12 @@
 #include "src/core/lib/iomgr/error.h"
 #include "src/core/lib/profiling/timers.h"
 
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 typedef struct {
   // Stats object to update.
@@ -47,28 +45,24 @@
   bool recv_initial_metadata_succeeded;
 } call_data;
 
-static void on_complete_for_send(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void on_complete_for_send(void* arg, grpc_error* error) {
   call_data* calld = (call_data*)arg;
   if (error == GRPC_ERROR_NONE) {
     calld->send_initial_metadata_succeeded = true;
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete_for_send,
-                   GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(calld->original_on_complete_for_send, GRPC_ERROR_REF(error));
 }
 
-static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
   call_data* calld = (call_data*)arg;
   if (error == GRPC_ERROR_NONE) {
     calld->recv_initial_metadata_succeeded = true;
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
+  GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready,
                    GRPC_ERROR_REF(error));
 }
 
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   // Get stats object from context and take a ref.
@@ -81,7 +75,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = (call_data*)elem->call_data;
@@ -96,8 +90,7 @@
 }
 
 static void start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   GPR_TIMER_BEGIN("clr_start_transport_stream_op_batch", 0);
   // Intercept send_initial_metadata.
@@ -118,7 +111,7 @@
         &calld->recv_initial_metadata_ready;
   }
   // Chain to next filter.
-  grpc_call_next_op(exec_ctx, elem, batch);
+  grpc_call_next_op(elem, batch);
   GPR_TIMER_END("clr_start_transport_stream_op_batch", 0);
 }
 
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
index abf613a..04de7a0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.h
@@ -21,15 +21,7 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_client_load_reporting_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_CLIENT_LOAD_REPORTING_FILTER_H \
         */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index 5fb502e..ba4e90d4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -113,13 +113,13 @@
 #include "src/core/lib/slice/slice_hash_table.h"
 #include "src/core/lib/slice/slice_internal.h"
 #include "src/core/lib/slice/slice_string_helpers.h"
+#include "src/core/lib/support/manual_constructor.h"
 #include "src/core/lib/support/string.h"
 #include "src/core/lib/surface/call.h"
 #include "src/core/lib/surface/channel.h"
 #include "src/core/lib/surface/channel_init.h"
 #include "src/core/lib/transport/static_metadata.h"
 
-#define GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS 20
 #define GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS 1
 #define GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER 1.6
 #define GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS 120
@@ -131,12 +131,12 @@
 /* add lb_token of selected subchannel (address) to the call's initial
  * metadata */
 static grpc_error* initial_metadata_add_lb_token(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
+    grpc_metadata_batch* initial_metadata,
     grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
   GPR_ASSERT(lb_token_mdelem_storage != nullptr);
   GPR_ASSERT(!GRPC_MDISNULL(lb_token));
-  return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
-                                      lb_token_mdelem_storage, lb_token);
+  return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
+                                      lb_token);
 }
 
 static void destroy_client_stats(void* arg) {
@@ -186,20 +186,19 @@
 /* The \a on_complete closure passed as part of the pick requires keeping a
  * reference to its associated round robin instance. We wrap this closure in
  * order to unref the round robin instance upon its invocation */
-static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error) {
+static void wrapped_rr_closure(void* arg, grpc_error* error) {
   wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
 
   GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
-  GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
 
   if (wc_arg->rr_policy != nullptr) {
-    /* if *target is NULL, no pick has been made by the RR policy (eg, all
+    /* if *target is nullptr, no pick has been made by the RR policy (eg, all
      * addresses failed to connect). There won't be any user_data/token
      * available */
     if (*wc_arg->target != nullptr) {
       if (!GRPC_MDISNULL(wc_arg->lb_token)) {
-        initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
+        initial_metadata_add_lb_token(wc_arg->initial_metadata,
                                       wc_arg->lb_token_mdelem_storage,
                                       GRPC_MDELEM_REF(wc_arg->lb_token));
       } else {
@@ -221,7 +220,7 @@
       gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
               wc_arg->rr_policy);
     }
-    GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
+    GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
   }
   GPR_ASSERT(wc_arg->free_when_done != nullptr);
   gpr_free(wc_arg->free_when_done);
@@ -241,8 +240,8 @@
   /* original pick()'s arguments */
   grpc_lb_policy_pick_args pick_args;
 
-  /* output argument where to store the pick()ed connected subchannel, or NULL
-   * upon error. */
+  /* output argument where to store the pick()ed connected subchannel, or
+   * nullptr upon error. */
   grpc_connected_subchannel** target;
 
   /* args for wrapped_on_complete */
@@ -275,18 +274,30 @@
 typedef struct pending_ping {
   struct pending_ping* next;
 
-  /* args for wrapped_notify */
-  wrapped_rr_closure_arg wrapped_notify_arg;
+  /* args for sending the ping */
+  wrapped_rr_closure_arg* on_initiate;
+  wrapped_rr_closure_arg* on_ack;
 } pending_ping;
 
-static void add_pending_ping(pending_ping** root, grpc_closure* notify) {
+static void add_pending_ping(pending_ping** root, grpc_closure* on_initiate,
+                             grpc_closure* on_ack) {
   pending_ping* pping = (pending_ping*)gpr_zalloc(sizeof(*pping));
-  pping->wrapped_notify_arg.wrapped_closure = notify;
-  pping->wrapped_notify_arg.free_when_done = pping;
+  if (on_initiate != nullptr) {
+    pping->on_initiate =
+        (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_initiate));
+    pping->on_initiate->wrapped_closure = on_initiate;
+    pping->on_initiate->free_when_done = pping->on_initiate;
+    GRPC_CLOSURE_INIT(&pping->on_initiate->wrapper_closure, wrapped_rr_closure,
+                      &pping->on_initiate, grpc_schedule_on_exec_ctx);
+  }
+  if (on_ack != nullptr) {
+    pping->on_ack = (wrapped_rr_closure_arg*)gpr_zalloc(sizeof(*pping->on_ack));
+    pping->on_ack->wrapped_closure = on_ack;
+    pping->on_ack->free_when_done = pping->on_ack;
+    GRPC_CLOSURE_INIT(&pping->on_ack->wrapper_closure, wrapped_rr_closure,
+                      &pping->on_ack, grpc_schedule_on_exec_ctx);
+  }
   pping->next = *root;
-  GRPC_CLOSURE_INIT(&pping->wrapped_notify_arg.wrapper_closure,
-                    wrapped_rr_closure, &pping->wrapped_notify_arg,
-                    grpc_schedule_on_exec_ctx);
   *root = pping;
 }
 
@@ -328,8 +339,8 @@
   /** connectivity state of the LB channel */
   grpc_connectivity_state lb_channel_connectivity;
 
-  /** stores the deserialized response from the LB. May be NULL until one such
-   * response has arrived. */
+  /** stores the deserialized response from the LB. May be nullptr until one
+   * such response has arrived. */
   grpc_grpclb_serverlist* serverlist;
 
   /** Index into serverlist for next pick.
@@ -366,6 +377,9 @@
   /************************************************************/
   /*  client data associated with the LB server communication */
   /************************************************************/
+  /* Finished sending initial request. */
+  grpc_closure lb_on_sent_initial_request;
+
   /* Status from the LB server has been received. This signals the end of the LB
    * call. */
   grpc_closure lb_on_server_status_received;
@@ -397,7 +411,7 @@
   grpc_slice lb_call_status_details;
 
   /** LB call retry backoff state */
-  grpc_backoff lb_call_backoff_state;
+  grpc_core::ManualConstructor<grpc_core::BackOff> lb_call_backoff;
 
   /** LB call retry timer */
   grpc_timer lb_call_retry_timer;
@@ -405,6 +419,7 @@
   /** LB fallback timer */
   grpc_timer lb_fallback_timer;
 
+  bool initial_request_sent;
   bool seen_initial_response;
 
   /* Stats for client-side load reporting. Should be unreffed and
@@ -459,9 +474,9 @@
              ? nullptr
              : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
 }
-static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
+static void lb_token_destroy(void* token) {
   if (token != nullptr) {
-    GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
+    GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
   }
 }
 static int lb_token_cmp(void* token1, void* token2) {
@@ -497,7 +512,7 @@
 
 /* Returns addresses extracted from \a serverlist. */
 static grpc_lb_addresses* process_serverlist_locked(
-    grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
+    const grpc_grpclb_serverlist* serverlist) {
   size_t num_valid = 0;
   /* first pass: count how many are valid in order to allocate the necessary
    * memory in a single block */
@@ -528,9 +543,9 @@
           strnlen(server->load_balance_token, lb_token_max_length);
       grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
           server->load_balance_token, lb_token_length);
-      user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
-                                                 lb_token_mdstr)
-                      .payload;
+      user_data =
+          (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
+              .payload;
     } else {
       char* uri = grpc_sockaddr_to_uri(&addr);
       gpr_log(GPR_INFO,
@@ -552,7 +567,7 @@
 
 /* Returns the backend addresses extracted from the given addresses */
 static grpc_lb_addresses* extract_backend_addresses_locked(
-    grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
+    const grpc_lb_addresses* addresses) {
   /* first pass: count the number of backend addresses */
   size_t num_backends = 0;
   for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -577,8 +592,8 @@
 }
 
 static void update_lb_connectivity_status_locked(
-    grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
-    grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
+    glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
+    grpc_error* rr_state_error) {
   const grpc_connectivity_state curr_glb_state =
       grpc_connectivity_state_check(&glb_policy->state_tracker);
 
@@ -630,20 +645,20 @@
         glb_policy, grpc_connectivity_state_name(rr_state),
         glb_policy->rr_policy);
   }
-  grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
+  grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
                               rr_state_error,
                               "update_lb_connectivity_status_locked");
 }
 
 /* Perform a pick over \a glb_policy->rr_policy. Given that a pick can return
  * immediately (ignoring its completion callback), we need to perform the
- * cleanups this callback would otherwise be resposible for.
+ * cleanups this callback would otherwise be responsible for.
  * If \a force_async is true, then we will manually schedule the
  * completion callback even if the pick is available immediately. */
 static bool pick_from_internal_rr_locked(
-    grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
-    const grpc_lb_policy_pick_args* pick_args, bool force_async,
-    grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
+    glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args,
+    bool force_async, grpc_connected_subchannel** target,
+    wrapped_rr_closure_arg* wc_arg) {
   // Check for drops if we are not using fallback backend addresses.
   if (glb_policy->serverlist != nullptr) {
     // Look at the index into the serverlist to see if we should drop this call.
@@ -658,7 +673,7 @@
         gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
                 wc_arg->rr_policy);
       }
-      GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+      GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
       // Update client load reporting stats to indicate the number of
       // dropped calls.  Note that we have to do this here instead of in
       // the client_load_reporting filter, because we do not create a
@@ -670,7 +685,7 @@
       grpc_grpclb_client_stats_unref(wc_arg->client_stats);
       if (force_async) {
         GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
-        GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
         gpr_free(wc_arg->free_when_done);
         return false;
       }
@@ -680,7 +695,7 @@
   }
   // Pick via the RR policy.
   const bool pick_done = grpc_lb_policy_pick_locked(
-      exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
+      wc_arg->rr_policy, pick_args, target, wc_arg->context,
       (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
   if (pick_done) {
     /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
@@ -688,9 +703,9 @@
       gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
               wc_arg->rr_policy);
     }
-    GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+    GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
     /* add the load reporting initial metadata */
-    initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
+    initial_metadata_add_lb_token(pick_args->initial_metadata,
                                   pick_args->lb_token_mdelem_storage,
                                   GRPC_MDELEM_REF(wc_arg->lb_token));
     // Pass on client stats via context. Passes ownership of the reference.
@@ -699,7 +714,7 @@
     wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
     if (force_async) {
       GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
-      GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
       gpr_free(wc_arg->free_when_done);
       return false;
     }
@@ -712,12 +727,11 @@
   return pick_done;
 }
 
-static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
-                                                  glb_lb_policy* glb_policy) {
+static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
   grpc_lb_addresses* addresses;
   if (glb_policy->serverlist != nullptr) {
     GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
-    addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+    addresses = process_serverlist_locked(glb_policy->serverlist);
   } else {
     // If rr_handover_locked() is invoked when we haven't received any
     // serverlist from the balancer, we use the fallback backends returned by
@@ -737,24 +751,21 @@
   args->args = grpc_channel_args_copy_and_add_and_remove(
       glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
       1);
-  grpc_lb_addresses_destroy(exec_ctx, addresses);
+  grpc_lb_addresses_destroy(addresses);
   return args;
 }
 
-static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
-                                   grpc_lb_policy_args* args) {
-  grpc_channel_args_destroy(exec_ctx, args->args);
+static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
+  grpc_channel_args_destroy(args->args);
   gpr_free(args);
 }
 
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
-                                               void* arg, grpc_error* error);
-static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error);
+static void create_rr_locked(glb_lb_policy* glb_policy,
                              grpc_lb_policy_args* args) {
   GPR_ASSERT(glb_policy->rr_policy == nullptr);
 
-  grpc_lb_policy* new_rr_policy =
-      grpc_lb_policy_create(exec_ctx, "round_robin", args);
+  grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
   if (new_rr_policy == nullptr) {
     gpr_log(GPR_ERROR,
             "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
@@ -766,19 +777,20 @@
             glb_policy->rr_policy);
     return;
   }
+  grpc_lb_policy_set_reresolve_closure_locked(
+      new_rr_policy, glb_policy->base.request_reresolution);
+  glb_policy->base.request_reresolution = nullptr;
   glb_policy->rr_policy = new_rr_policy;
   grpc_error* rr_state_error = nullptr;
   const grpc_connectivity_state rr_state =
-      grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
+      grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
                                                &rr_state_error);
   /* Connectivity state is a function of the RR policy updated/created */
-  update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
-                                       rr_state_error);
+  update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error);
   /* Add the gRPC LB's interested_parties pollset_set to that of the newly
    * created RR policy. This will make the RR policy progress upon activity on
    * gRPC LB, which in turn is tied to the application's call */
-  grpc_pollset_set_add_pollset_set(exec_ctx,
-                                   glb_policy->rr_policy->interested_parties,
+  grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
                                    glb_policy->base.interested_parties);
 
   /* Allocate the data for the tracking of the new RR policy's connectivity.
@@ -793,10 +805,10 @@
 
   /* Subscribe to changes to the connectivity of the new RR */
   GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
-  grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
+  grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
                                                &rr_connectivity->state,
                                                &rr_connectivity->on_change);
-  grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
+  grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
 
   /* Update picks and pings in wait */
   pending_pick* pp;
@@ -811,7 +823,7 @@
               "[grpclb %p] Pending pick about to (async) PICK from RR %p",
               glb_policy, glb_policy->rr_policy);
     }
-    pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
+    pick_from_internal_rr_locked(glb_policy, &pp->pick_args,
                                  true /* force_async */, pp->target,
                                  &pp->wrapped_on_complete_arg);
   }
@@ -819,46 +831,53 @@
   pending_ping* pping;
   while ((pping = glb_policy->pending_pings)) {
     glb_policy->pending_pings = pping->next;
-    GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
-    pping->wrapped_notify_arg.rr_policy = glb_policy->rr_policy;
+    grpc_closure* on_initiate = nullptr;
+    grpc_closure* on_ack = nullptr;
+    if (pping->on_initiate != nullptr) {
+      GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
+      pping->on_initiate->rr_policy = glb_policy->rr_policy;
+      on_initiate = &pping->on_initiate->wrapper_closure;
+    }
+    if (pping->on_ack != nullptr) {
+      GRPC_LB_POLICY_REF(glb_policy->rr_policy, "rr_handover_pending_ping");
+      pping->on_ack->rr_policy = glb_policy->rr_policy;
+      on_ack = &pping->on_ack->wrapper_closure;
+    }
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
               glb_policy, glb_policy->rr_policy);
     }
-    grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
-                                   &pping->wrapped_notify_arg.wrapper_closure);
+    grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
+    gpr_free(pping);
   }
 }
 
-/* glb_policy->rr_policy may be NULL (initial handover) */
-static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
-                               glb_lb_policy* glb_policy) {
+/* glb_policy->rr_policy may be nullptr (initial handover) */
+static void rr_handover_locked(glb_lb_policy* glb_policy) {
   if (glb_policy->shutting_down) return;
-  grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
+  grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
   GPR_ASSERT(args != nullptr);
   if (glb_policy->rr_policy != nullptr) {
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
               glb_policy->rr_policy);
     }
-    grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
+    grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
   } else {
-    create_rr_locked(exec_ctx, glb_policy, args);
+    create_rr_locked(glb_policy, args);
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
               glb_policy->rr_policy);
     }
   }
-  lb_policy_args_destroy(exec_ctx, args);
+  lb_policy_args_destroy(args);
 }
 
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
-                                               void* arg, grpc_error* error) {
+static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
   rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
   glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
   if (glb_policy->shutting_down) {
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "glb_rr_connectivity_cb");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
     gpr_free(rr_connectivity);
     return;
   }
@@ -866,25 +885,22 @@
     /* An RR policy that has transitioned into the SHUTDOWN connectivity state
      * should not be considered for picks or updates: the SHUTDOWN state is a
      * sink, policies can't transition back from it. .*/
-    GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
-                         "rr_connectivity_shutdown");
+    GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
     glb_policy->rr_policy = nullptr;
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "glb_rr_connectivity_cb");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
     gpr_free(rr_connectivity);
     return;
   }
   /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
-  update_lb_connectivity_status_locked(
-      exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
+  update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state,
+                                       GRPC_ERROR_REF(error));
   /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
-  grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
+  grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
                                                &rr_connectivity->state,
                                                &rr_connectivity->on_change);
 }
 
-static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
-                                  void* balancer_name) {
+static void destroy_balancer_name(void* balancer_name) {
   gpr_free(balancer_name);
 }
 
@@ -911,7 +927,7 @@
  *   above the grpclb policy.
  *   - \a args: other args inherited from the grpclb policy. */
 static grpc_channel_args* build_lb_channel_args(
-    grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
+    const grpc_lb_addresses* addresses,
     grpc_fake_resolver_response_generator* response_generator,
     const grpc_channel_args* args) {
   size_t num_grpclb_addrs = 0;
@@ -954,7 +970,7 @@
   gpr_free(targets_info_entries);
 
   grpc_channel_args* lb_channel_args =
-      grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
+      grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
                                                   response_generator, args);
 
   grpc_arg lb_channel_addresses_arg =
@@ -962,35 +978,36 @@
 
   grpc_channel_args* result = grpc_channel_args_copy_and_add(
       lb_channel_args, &lb_channel_addresses_arg, 1);
-  grpc_slice_hash_table_unref(exec_ctx, targets_info);
-  grpc_channel_args_destroy(exec_ctx, lb_channel_args);
-  grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
+  grpc_slice_hash_table_unref(targets_info);
+  grpc_channel_args_destroy(lb_channel_args);
+  grpc_lb_addresses_destroy(lb_addresses);
   return result;
 }
 
-static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void glb_destroy(grpc_lb_policy* pol) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   GPR_ASSERT(glb_policy->pending_picks == nullptr);
   GPR_ASSERT(glb_policy->pending_pings == nullptr);
   gpr_free((void*)glb_policy->server_name);
-  grpc_channel_args_destroy(exec_ctx, glb_policy->args);
+  grpc_channel_args_destroy(glb_policy->args);
   if (glb_policy->client_stats != nullptr) {
     grpc_grpclb_client_stats_unref(glb_policy->client_stats);
   }
-  grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
+  grpc_connectivity_state_destroy(&glb_policy->state_tracker);
   if (glb_policy->serverlist != nullptr) {
     grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
   }
   if (glb_policy->fallback_backend_addresses != nullptr) {
-    grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+    grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
   }
   grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
   grpc_subchannel_index_unref();
   gpr_free(glb_policy);
 }
 
-static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void glb_shutdown_locked(grpc_lb_policy* pol) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
+  grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
   glb_policy->shutting_down = true;
 
   /* We need a copy of the lb_call pointer because we can't cancell the call
@@ -1007,11 +1024,11 @@
     /* lb_on_server_status_received will pick up the cancel and clean up */
   }
   if (glb_policy->retry_timer_active) {
-    grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+    grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
     glb_policy->retry_timer_active = false;
   }
   if (glb_policy->fallback_timer_active) {
-    grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+    grpc_timer_cancel(&glb_policy->lb_fallback_timer);
     glb_policy->fallback_timer_active = false;
   }
 
@@ -1020,7 +1037,9 @@
   pending_ping* pping = glb_policy->pending_pings;
   glb_policy->pending_pings = nullptr;
   if (glb_policy->rr_policy != nullptr) {
-    GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
+    GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
+  } else {
+    grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
   }
   // We destroy the LB channel here because
   // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
@@ -1030,28 +1049,34 @@
     grpc_channel_destroy(glb_policy->lb_channel);
     glb_policy->lb_channel = nullptr;
   }
-  grpc_connectivity_state_set(
-      exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"), "glb_shutdown");
+  grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+                              GRPC_ERROR_REF(error), "glb_shutdown");
 
   while (pp != nullptr) {
     pending_pick* next = pp->next;
     *pp->target = nullptr;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+    GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
+                       GRPC_ERROR_REF(error));
     gpr_free(pp);
     pp = next;
   }
 
   while (pping != nullptr) {
     pending_ping* next = pping->next;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
+    if (pping->on_initiate != nullptr) {
+      GRPC_CLOSURE_SCHED(&pping->on_initiate->wrapper_closure,
+                         GRPC_ERROR_REF(error));
+      gpr_free(pping->on_initiate);
+    }
+    if (pping->on_ack != nullptr) {
+      GRPC_CLOSURE_SCHED(&pping->on_ack->wrapper_closure,
+                         GRPC_ERROR_REF(error));
+      gpr_free(pping->on_ack);
+    }
     gpr_free(pping);
     pping = next;
   }
+  GRPC_ERROR_UNREF(error);
 }
 
 // Cancel a specific pending pick.
@@ -1063,8 +1088,8 @@
 //   pick needs also be cancelled by the RR instance.
 // - Otherwise, without an RR instance, picks stay pending at this policy's
 //   level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
-//   we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+//   we invoke the completion closure and set *target to nullptr right here.
+static void glb_cancel_pick_locked(grpc_lb_policy* pol,
                                    grpc_connected_subchannel** target,
                                    grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
@@ -1074,7 +1099,7 @@
     pending_pick* next = pp->next;
     if (pp->target == target) {
       *target = nullptr;
-      GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+      GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick Cancelled", &error, 1));
     } else {
@@ -1084,7 +1109,7 @@
     pp = next;
   }
   if (glb_policy->rr_policy != nullptr) {
-    grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
+    grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target,
                                       GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
@@ -1099,9 +1124,8 @@
 //   pick needs also be cancelled by the RR instance.
 // - Otherwise, without an RR instance, picks stay pending at this policy's
 //   level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
-//   we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
-                                    grpc_lb_policy* pol,
+//   we invoke the completion closure and set *target to nullptr right here.
+static void glb_cancel_picks_locked(grpc_lb_policy* pol,
                                     uint32_t initial_metadata_flags_mask,
                                     uint32_t initial_metadata_flags_eq,
                                     grpc_error* error) {
@@ -1112,7 +1136,7 @@
     pending_pick* next = pp->next;
     if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
-      GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+      GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick Cancelled", &error, 1));
     } else {
@@ -1123,52 +1147,49 @@
   }
   if (glb_policy->rr_policy != nullptr) {
     grpc_lb_policy_cancel_picks_locked(
-        exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
+        glb_policy->rr_policy, initial_metadata_flags_mask,
         initial_metadata_flags_eq, GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
 }
 
-static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error);
-static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
-                                      glb_lb_policy* glb_policy);
-static void start_picking_locked(grpc_exec_ctx* exec_ctx,
-                                 glb_lb_policy* glb_policy) {
+static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
+static void query_for_backends_locked(glb_lb_policy* glb_policy);
+static void start_picking_locked(glb_lb_policy* glb_policy) {
   /* start a timer to fall back */
   if (glb_policy->lb_fallback_timeout_ms > 0 &&
       glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
     grpc_millis deadline =
-        grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
+        grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
     GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
     GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
                       glb_policy,
                       grpc_combiner_scheduler(glb_policy->base.combiner));
     glb_policy->fallback_timer_active = true;
-    grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
+    grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
                     &glb_policy->lb_on_fallback);
   }
 
   glb_policy->started_picking = true;
-  grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
-  query_for_backends_locked(exec_ctx, glb_policy);
+  glb_policy->lb_call_backoff->Reset();
+  query_for_backends_locked(glb_policy);
 }
 
-static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void glb_exit_idle_locked(grpc_lb_policy* pol) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   if (!glb_policy->started_picking) {
-    start_picking_locked(exec_ctx, glb_policy);
+    start_picking_locked(glb_policy);
   }
 }
 
-static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static int glb_pick_locked(grpc_lb_policy* pol,
                            const grpc_lb_policy_pick_args* pick_args,
                            grpc_connected_subchannel** target,
                            grpc_call_context_element* context, void** user_data,
                            grpc_closure* on_complete) {
   if (pick_args->lb_token_mdelem_storage == nullptr) {
     *target = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
+    GRPC_CLOSURE_SCHED(on_complete,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                            "No mdelem storage for the LB token. Load reporting "
                            "won't work without it. Failing"));
@@ -1178,8 +1199,8 @@
   bool pick_done = false;
   if (glb_policy->rr_policy != nullptr) {
     const grpc_connectivity_state rr_connectivity_state =
-        grpc_lb_policy_check_connectivity_locked(
-            exec_ctx, glb_policy->rr_policy, nullptr);
+        grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
+                                                 nullptr);
     // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
     // callback registered to capture this event
     // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
@@ -1216,9 +1237,8 @@
       wc_arg->initial_metadata = pick_args->initial_metadata;
       wc_arg->free_when_done = wc_arg;
       wc_arg->glb_policy = pol;
-      pick_done =
-          pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
-                                       false /* force_async */, target, wc_arg);
+      pick_done = pick_from_internal_rr_locked(
+          glb_policy, pick_args, false /* force_async */, target, wc_arg);
     }
   } else {  // glb_policy->rr_policy == NULL
     if (grpc_lb_glb_trace.enabled()) {
@@ -1229,7 +1249,7 @@
     add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
                      on_complete);
     if (!glb_policy->started_picking) {
-      start_picking_locked(exec_ctx, glb_policy);
+      start_picking_locked(glb_policy);
     }
     pick_done = false;
   }
@@ -1237,37 +1257,34 @@
 }
 
 static grpc_connectivity_state glb_check_connectivity_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
-    grpc_error** connectivity_error) {
+    grpc_lb_policy* pol, grpc_error** connectivity_error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   return grpc_connectivity_state_get(&glb_policy->state_tracker,
                                      connectivity_error);
 }
 
-static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
-                                grpc_closure* closure) {
+static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
+                                grpc_closure* on_ack) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   if (glb_policy->rr_policy) {
-    grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
+    grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, on_initiate, on_ack);
   } else {
-    add_pending_ping(&glb_policy->pending_pings, closure);
+    add_pending_ping(&glb_policy->pending_pings, on_initiate, on_ack);
     if (!glb_policy->started_picking) {
-      start_picking_locked(exec_ctx, glb_policy);
+      start_picking_locked(glb_policy);
     }
   }
 }
 
-static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
-                                              grpc_lb_policy* pol,
+static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
                                               grpc_connectivity_state* current,
                                               grpc_closure* notify) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
-  grpc_connectivity_state_notify_on_state_change(
-      exec_ctx, &glb_policy->state_tracker, current, notify);
+  grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
+                                                 current, notify);
 }
 
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                          grpc_error* error) {
+static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   glb_policy->retry_timer_active = false;
   if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
@@ -1275,28 +1292,25 @@
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
     }
-    query_for_backends_locked(exec_ctx, glb_policy);
+    query_for_backends_locked(glb_policy);
   }
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer");
 }
 
-static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
-                                  glb_lb_policy* glb_policy) {
+static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
   if (glb_policy->started_picking && glb_policy->updating_lb_call) {
     if (glb_policy->retry_timer_active) {
-      grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+      grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
     }
-    if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
+    if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
     glb_policy->updating_lb_call = false;
   } else if (!glb_policy->shutting_down) {
     /* if we aren't shutting down, restart the LB client call after some time */
-    grpc_millis next_try =
-        grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
-            .next_attempt_start_time;
+    grpc_millis next_try = glb_policy->lb_call_backoff->Step();
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
               glb_policy);
-      grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+      grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
       if (timeout > 0) {
         gpr_log(GPR_DEBUG,
                 "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
@@ -1311,43 +1325,56 @@
                       lb_call_on_retry_timer_locked, glb_policy,
                       grpc_combiner_scheduler(glb_policy->base.combiner));
     glb_policy->retry_timer_active = true;
-    grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+    grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
                     &glb_policy->lb_on_call_retry);
   }
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                             "lb_on_server_status_received_locked");
 }
 
-static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error);
+static void send_client_load_report_locked(void* arg, grpc_error* error);
 
-static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
-                                             glb_lb_policy* glb_policy) {
+static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
   const grpc_millis next_client_load_report_time =
-      grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
+      grpc_core::ExecCtx::Get()->Now() +
+      glb_policy->client_stats_report_interval;
   GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
                     send_client_load_report_locked, glb_policy,
                     grpc_combiner_scheduler(glb_policy->base.combiner));
-  grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
+  grpc_timer_init(&glb_policy->client_load_report_timer,
                   next_client_load_report_time,
                   &glb_policy->client_load_report_closure);
 }
 
-static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void client_load_report_done_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
   glb_policy->client_load_report_payload = nullptr;
   if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
     glb_policy->client_load_report_timer_pending = false;
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "client_load_report");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
     if (glb_policy->lb_call == nullptr) {
-      maybe_restart_lb_call(exec_ctx, glb_policy);
+      maybe_restart_lb_call(glb_policy);
     }
     return;
   }
-  schedule_next_client_load_report(exec_ctx, glb_policy);
+  schedule_next_client_load_report(glb_policy);
+}
+
+static void do_send_client_load_report_locked(glb_lb_policy* glb_policy) {
+  grpc_op op;
+  memset(&op, 0, sizeof(op));
+  op.op = GRPC_OP_SEND_MESSAGE;
+  op.data.send_message.send_message = glb_policy->client_load_report_payload;
+  GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
+                    client_load_report_done_locked, glb_policy,
+                    grpc_combiner_scheduler(glb_policy->base.combiner));
+  grpc_call_error call_error = grpc_call_start_batch_and_execute(
+      glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
+  if (call_error != GRPC_CALL_OK) {
+    gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
+    GPR_ASSERT(GRPC_CALL_OK == call_error);
+  }
 }
 
 static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
@@ -1362,15 +1389,13 @@
          (drop_entries == nullptr || drop_entries->num_entries == 0);
 }
 
-static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void send_client_load_report_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
     glb_policy->client_load_report_timer_pending = false;
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "client_load_report");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
     if (glb_policy->lb_call == nullptr) {
-      maybe_restart_lb_call(exec_ctx, glb_policy);
+      maybe_restart_lb_call(glb_policy);
     }
     return;
   }
@@ -1383,7 +1408,7 @@
   if (load_report_counters_are_zero(request)) {
     if (glb_policy->last_client_load_report_counters_were_zero) {
       grpc_grpclb_request_destroy(request);
-      schedule_next_client_load_report(exec_ctx, glb_policy);
+      schedule_next_client_load_report(glb_policy);
       return;
     }
     glb_policy->last_client_load_report_counters_were_zero = true;
@@ -1393,31 +1418,20 @@
   grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
   glb_policy->client_load_report_payload =
       grpc_raw_byte_buffer_create(&request_payload_slice, 1);
-  grpc_slice_unref_internal(exec_ctx, request_payload_slice);
+  grpc_slice_unref_internal(request_payload_slice);
   grpc_grpclb_request_destroy(request);
-  // Send load report message.
-  grpc_op op;
-  memset(&op, 0, sizeof(op));
-  op.op = GRPC_OP_SEND_MESSAGE;
-  op.data.send_message.send_message = glb_policy->client_load_report_payload;
-  GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
-                    client_load_report_done_locked, glb_policy,
-                    grpc_combiner_scheduler(glb_policy->base.combiner));
-  grpc_call_error call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, &op, 1,
-      &glb_policy->client_load_report_closure);
-  if (call_error != GRPC_CALL_OK) {
-    gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
-    GPR_ASSERT(GRPC_CALL_OK == call_error);
+  // If we've already sent the initial request, then we can go ahead and send
+  // the load report. Otherwise, we need to wait until the initial request has
+  // been sent to send this (see lb_on_sent_initial_request_locked() below).
+  if (glb_policy->initial_request_sent) {
+    do_send_client_load_report_locked(glb_policy);
   }
 }
 
-static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
-                                                void* arg, grpc_error* error);
-static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error);
-static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
-                                glb_lb_policy* glb_policy) {
+static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error);
+static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
+static void lb_on_response_received_locked(void* arg, grpc_error* error);
+static void lb_call_init_locked(glb_lb_policy* glb_policy) {
   GPR_ASSERT(glb_policy->server_name != nullptr);
   GPR_ASSERT(glb_policy->server_name[0] != '\0');
   GPR_ASSERT(glb_policy->lb_call == nullptr);
@@ -1430,13 +1444,13 @@
   grpc_millis deadline =
       glb_policy->lb_call_timeout_ms == 0
           ? GRPC_MILLIS_INF_FUTURE
-          : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
+          : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
   glb_policy->lb_call = grpc_channel_create_pollset_set_call(
-      exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
+      glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
       glb_policy->base.interested_parties,
       GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
       &host, deadline, nullptr);
-  grpc_slice_unref_internal(exec_ctx, host);
+  grpc_slice_unref_internal(host);
 
   if (glb_policy->client_stats != nullptr) {
     grpc_grpclb_client_stats_unref(glb_policy->client_stats);
@@ -1451,9 +1465,12 @@
   grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
   glb_policy->lb_request_payload =
       grpc_raw_byte_buffer_create(&request_payload_slice, 1);
-  grpc_slice_unref_internal(exec_ctx, request_payload_slice);
+  grpc_slice_unref_internal(request_payload_slice);
   grpc_grpclb_request_destroy(request);
 
+  GRPC_CLOSURE_INIT(&glb_policy->lb_on_sent_initial_request,
+                    lb_on_sent_initial_request_locked, glb_policy,
+                    grpc_combiner_scheduler(glb_policy->base.combiner));
   GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
                     lb_on_server_status_received_locked, glb_policy,
                     grpc_combiner_scheduler(glb_policy->base.combiner));
@@ -1461,19 +1478,21 @@
                     lb_on_response_received_locked, glb_policy,
                     grpc_combiner_scheduler(glb_policy->base.combiner));
 
-  grpc_backoff_init(&glb_policy->lb_call_backoff_state,
-                    GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
-                    GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER,
-                    GRPC_GRPCLB_RECONNECT_JITTER,
-                    GRPC_GRPCLB_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
-                    GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+  grpc_core::BackOff::Options backoff_options;
+  backoff_options
+      .set_initial_backoff(GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
+      .set_multiplier(GRPC_GRPCLB_RECONNECT_BACKOFF_MULTIPLIER)
+      .set_jitter(GRPC_GRPCLB_RECONNECT_JITTER)
+      .set_max_backoff(GRPC_GRPCLB_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
 
+  glb_policy->lb_call_backoff.Init(backoff_options);
+
+  glb_policy->initial_request_sent = false;
   glb_policy->seen_initial_response = false;
   glb_policy->last_client_load_report_counters_were_zero = false;
 }
 
-static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
-                                   glb_lb_policy* glb_policy) {
+static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
   GPR_ASSERT(glb_policy->lb_call != nullptr);
   grpc_call_unref(glb_policy->lb_call);
   glb_policy->lb_call = nullptr;
@@ -1482,22 +1501,21 @@
   grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
 
   grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
-  grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
+  grpc_slice_unref_internal(glb_policy->lb_call_status_details);
 
   if (glb_policy->client_load_report_timer_pending) {
-    grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
+    grpc_timer_cancel(&glb_policy->client_load_report_timer);
   }
 }
 
 /*
  * Auxiliary functions and LB client callbacks.
  */
-static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
-                                      glb_lb_policy* glb_policy) {
+static void query_for_backends_locked(glb_lb_policy* glb_policy) {
   GPR_ASSERT(glb_policy->lb_channel != nullptr);
   if (glb_policy->shutting_down) return;
 
-  lb_call_init_locked(exec_ctx, glb_policy);
+  lb_call_init_locked(glb_policy);
 
   if (grpc_lb_glb_trace.enabled()) {
     gpr_log(GPR_INFO,
@@ -1528,8 +1546,13 @@
   op->flags = 0;
   op->reserved = nullptr;
   op++;
+  /* take a weak ref (won't prevent calling of \a glb_shutdown if the strong ref
+   * count goes to zero) to be unref'd in lb_on_sent_initial_request_locked() */
+  GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
+                          "lb_on_sent_initial_request_locked");
   call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), nullptr);
+      glb_policy->lb_call, ops, (size_t)(op - ops),
+      &glb_policy->lb_on_sent_initial_request);
   GPR_ASSERT(GRPC_CALL_OK == call_error);
 
   op = ops;
@@ -1547,7 +1570,7 @@
   GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
                           "lb_on_server_status_received_locked");
   call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+      glb_policy->lb_call, ops, (size_t)(op - ops),
       &glb_policy->lb_on_server_status_received);
   GPR_ASSERT(GRPC_CALL_OK == call_error);
 
@@ -1561,19 +1584,30 @@
    * lb_on_response_received_locked */
   GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
   call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+      glb_policy->lb_call, ops, (size_t)(op - ops),
       &glb_policy->lb_on_response_received);
   GPR_ASSERT(GRPC_CALL_OK == call_error);
 }
 
-static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void lb_on_sent_initial_request_locked(void* arg, grpc_error* error) {
+  glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
+  glb_policy->initial_request_sent = true;
+  // If we attempted to send a client load report before the initial request was
+  // sent, send the load report now.
+  if (glb_policy->client_load_report_payload != nullptr) {
+    do_send_client_load_report_locked(glb_policy);
+  }
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
+                            "lb_on_sent_initial_request_locked");
+}
+
+static void lb_on_response_received_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   grpc_op ops[2];
   memset(ops, 0, sizeof(ops));
   grpc_op* op = ops;
   if (glb_policy->lb_response_payload != nullptr) {
-    grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
+    glb_policy->lb_call_backoff->Reset();
     /* Received data from the LB server. Look inside
      * glb_policy->lb_response_payload, for a serverlist. */
     grpc_byte_buffer_reader bbr;
@@ -1601,7 +1635,7 @@
          * send_client_load_report_locked() */
         glb_policy->client_load_report_timer_pending = true;
         GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
-        schedule_next_client_load_report(exec_ctx, glb_policy);
+        schedule_next_client_load_report(glb_policy);
       } else if (grpc_lb_glb_trace.enabled()) {
         gpr_log(GPR_INFO,
                 "[grpclb %p] Received initial LB response message; client load "
@@ -1646,11 +1680,10 @@
               grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
             } else {
               /* or dispose of the fallback */
-              grpc_lb_addresses_destroy(exec_ctx,
-                                        glb_policy->fallback_backend_addresses);
+              grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
               glb_policy->fallback_backend_addresses = nullptr;
               if (glb_policy->fallback_timer_active) {
-                grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+                grpc_timer_cancel(&glb_policy->lb_fallback_timer);
                 glb_policy->fallback_timer_active = false;
               }
             }
@@ -1659,7 +1692,7 @@
              * update or in glb_destroy() */
             glb_policy->serverlist = serverlist;
             glb_policy->serverlist_index = 0;
-            rr_handover_locked(exec_ctx, glb_policy);
+            rr_handover_locked(glb_policy);
           }
         } else {
           if (grpc_lb_glb_trace.enabled()) {
@@ -1669,14 +1702,14 @@
           }
           grpc_grpclb_destroy_serverlist(serverlist);
         }
-      } else { /* serverlist == NULL */
+      } else { /* serverlist == nullptr */
         gpr_log(GPR_ERROR,
                 "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
                 glb_policy,
                 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
       }
     }
-    grpc_slice_unref_internal(exec_ctx, response_slice);
+    grpc_slice_unref_internal(response_slice);
     if (!glb_policy->shutting_down) {
       /* keep listening for serverlist updates */
       op->op = GRPC_OP_RECV_MESSAGE;
@@ -1687,23 +1720,22 @@
       /* reuse the "lb_on_response_received_locked" weak ref taken in
        * query_for_backends_locked() */
       const grpc_call_error call_error = grpc_call_start_batch_and_execute(
-          exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+          glb_policy->lb_call, ops, (size_t)(op - ops),
           &glb_policy->lb_on_response_received); /* loop */
       GPR_ASSERT(GRPC_CALL_OK == call_error);
     } else {
-      GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+      GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                                 "lb_on_response_received_locked_shutdown");
     }
   } else { /* empty payload: call cancelled. */
            /* dispose of the "lb_on_response_received_locked" weak ref taken in
             * query_for_backends_locked() and reused in every reception loop */
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                               "lb_on_response_received_locked_empty_payload");
   }
 }
 
-static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   glb_policy->fallback_timer_active = false;
   /* If we receive a serverlist after the timer fires but before this callback
@@ -1716,15 +1748,13 @@
                 glb_policy);
       }
       GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
-      rr_handover_locked(exec_ctx, glb_policy);
+      rr_handover_locked(glb_policy);
     }
   }
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                            "grpclb_fallback_timer");
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer");
 }
 
-static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
-                                                void* arg, grpc_error* error) {
+static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   GPR_ASSERT(glb_policy->lb_call != nullptr);
   if (grpc_lb_glb_trace.enabled()) {
@@ -1738,29 +1768,28 @@
     gpr_free(status_details);
   }
   /* We need to perform cleanups no matter what. */
-  lb_call_destroy_locked(exec_ctx, glb_policy);
+  lb_call_destroy_locked(glb_policy);
   // If the load report timer is still pending, we wait for it to be
   // called before restarting the call.  Otherwise, we restart the call
   // here.
   if (!glb_policy->client_load_report_timer_pending) {
-    maybe_restart_lb_call(exec_ctx, glb_policy);
+    maybe_restart_lb_call(glb_policy);
   }
 }
 
-static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
-                                   glb_lb_policy* glb_policy,
+static void fallback_update_locked(glb_lb_policy* glb_policy,
                                    const grpc_lb_addresses* addresses) {
   GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
-  grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+  grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
   glb_policy->fallback_backend_addresses =
-      extract_backend_addresses_locked(exec_ctx, addresses);
-  if (glb_policy->started_picking && glb_policy->lb_fallback_timeout_ms > 0 &&
-      !glb_policy->fallback_timer_active) {
-    rr_handover_locked(exec_ctx, glb_policy);
+      extract_backend_addresses_locked(addresses);
+  if (glb_policy->lb_fallback_timeout_ms > 0 &&
+      glb_policy->rr_policy != nullptr) {
+    rr_handover_locked(glb_policy);
   }
 }
 
-static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+static void glb_update_locked(grpc_lb_policy* policy,
                               const grpc_lb_policy_args* args) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
   const grpc_arg* arg =
@@ -1770,7 +1799,7 @@
       // If we don't have a current channel to the LB, go into TRANSIENT
       // FAILURE.
       grpc_connectivity_state_set(
-          exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+          &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
           "glb_update_missing");
     } else {
@@ -1787,16 +1816,16 @@
   // If a non-empty serverlist hasn't been received from the balancer,
   // propagate the update to fallback_backend_addresses.
   if (glb_policy->serverlist == nullptr) {
-    fallback_update_locked(exec_ctx, glb_policy, addresses);
+    fallback_update_locked(glb_policy, addresses);
   }
   GPR_ASSERT(glb_policy->lb_channel != nullptr);
   // Propagate updates to the LB channel (pick_first) through the fake
   // resolver.
   grpc_channel_args* lb_channel_args = build_lb_channel_args(
-      exec_ctx, addresses, glb_policy->response_generator, args->args);
+      addresses, glb_policy->response_generator, args->args);
   grpc_fake_resolver_response_generator_set_response(
-      exec_ctx, glb_policy->response_generator, lb_channel_args);
-  grpc_channel_args_destroy(exec_ctx, lb_channel_args);
+      glb_policy->response_generator, lb_channel_args);
+  grpc_channel_args_destroy(lb_channel_args);
   // Start watching the LB channel connectivity for connection, if not
   // already doing so.
   if (!glb_policy->watching_lb_channel) {
@@ -1808,7 +1837,7 @@
     glb_policy->watching_lb_channel = true;
     GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
     grpc_client_channel_watch_connectivity_state(
-        exec_ctx, client_channel_elem,
+        client_channel_elem,
         grpc_polling_entity_create_from_pollset_set(
             glb_policy->base.interested_parties),
         &glb_policy->lb_channel_connectivity,
@@ -1819,8 +1848,7 @@
 // Invoked as part of the update process. It continues watching the LB channel
 // until it shuts down or becomes READY. It's invoked even if the LB channel
 // stayed READY throughout the update (for example if the update is identical).
-static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
-                                                      void* arg,
+static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
                                                       grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   if (glb_policy->shutting_down) goto done;
@@ -1836,7 +1864,7 @@
               grpc_channel_get_channel_stack(glb_policy->lb_channel));
       GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
       grpc_client_channel_watch_connectivity_state(
-          exec_ctx, client_channel_elem,
+          client_channel_elem,
           grpc_polling_entity_create_from_pollset_set(
               glb_policy->base.interested_parties),
           &glb_policy->lb_channel_connectivity,
@@ -1853,23 +1881,36 @@
         grpc_call_cancel(glb_policy->lb_call, nullptr);
         // lb_on_server_status_received() will pick up the cancel and reinit
         // lb_call.
-      } else if (glb_policy->started_picking && !glb_policy->shutting_down) {
+      } else if (glb_policy->started_picking) {
         if (glb_policy->retry_timer_active) {
-          grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+          grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
           glb_policy->retry_timer_active = false;
         }
-        start_picking_locked(exec_ctx, glb_policy);
+        start_picking_locked(glb_policy);
       }
     /* fallthrough */
     case GRPC_CHANNEL_SHUTDOWN:
     done:
       glb_policy->watching_lb_channel = false;
-      GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+      GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                                 "watch_lb_channel_connectivity_cb_shutdown");
       break;
   }
 }
 
+static void glb_set_reresolve_closure_locked(
+    grpc_lb_policy* policy, grpc_closure* request_reresolution) {
+  glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
+  GPR_ASSERT(!glb_policy->shutting_down);
+  GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
+  if (glb_policy->rr_policy != nullptr) {
+    grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
+                                                request_reresolution);
+  } else {
+    glb_policy->base.request_reresolution = request_reresolution;
+  }
+}
+
 /* Code wiring the policy with the rest of the core */
 static const grpc_lb_policy_vtable glb_lb_policy_vtable = {
     glb_destroy,
@@ -1881,10 +1922,10 @@
     glb_exit_idle_locked,
     glb_check_connectivity_locked,
     glb_notify_on_state_change_locked,
-    glb_update_locked};
+    glb_update_locked,
+    glb_set_reresolve_closure_locked};
 
-static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
-                                  grpc_lb_policy_factory* factory,
+static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
                                   grpc_lb_policy_args* args) {
   /* Count the number of gRPC-LB addresses. There must be at least one. */
   const grpc_arg* arg =
@@ -1905,7 +1946,7 @@
   arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
   GPR_ASSERT(arg != nullptr);
   GPR_ASSERT(arg->type == GRPC_ARG_STRING);
-  grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
+  grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
   GPR_ASSERT(uri->path[0] != '\0');
   glb_policy->server_name =
       gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
@@ -1938,26 +1979,26 @@
   /* Extract the backend addresses (may be empty) from the resolver for
    * fallback. */
   glb_policy->fallback_backend_addresses =
-      extract_backend_addresses_locked(exec_ctx, addresses);
+      extract_backend_addresses_locked(addresses);
 
   /* Create a client channel over them to communicate with a LB service */
   glb_policy->response_generator =
       grpc_fake_resolver_response_generator_create();
   grpc_channel_args* lb_channel_args = build_lb_channel_args(
-      exec_ctx, addresses, glb_policy->response_generator, args->args);
+      addresses, glb_policy->response_generator, args->args);
   char* uri_str;
   gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
   glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
-      exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
+      uri_str, args->client_channel_factory, lb_channel_args);
 
   /* Propagate initial resolution */
   grpc_fake_resolver_response_generator_set_response(
-      exec_ctx, glb_policy->response_generator, lb_channel_args);
-  grpc_channel_args_destroy(exec_ctx, lb_channel_args);
+      glb_policy->response_generator, lb_channel_args);
+  grpc_channel_args_destroy(lb_channel_args);
   gpr_free(uri_str);
   if (glb_policy->lb_channel == nullptr) {
     gpr_free((void*)glb_policy->server_name);
-    grpc_channel_args_destroy(exec_ctx, glb_policy->args);
+    grpc_channel_args_destroy(glb_policy->args);
     gpr_free(glb_policy);
     return nullptr;
   }
@@ -1988,7 +2029,7 @@
 
 // Only add client_load_reporting filter if the grpclb LB policy is used.
 static bool maybe_add_client_load_reporting_filter(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   const grpc_channel_args* args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
   const grpc_arg* channel_arg =
@@ -2001,7 +2042,7 @@
   return true;
 }
 
-extern "C" void grpc_lb_policy_grpclb_init() {
+void grpc_lb_policy_grpclb_init() {
   grpc_register_lb_policy(grpc_glb_lb_factory_create());
   grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
                                    GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
@@ -2009,4 +2050,4 @@
                                    (void*)&grpc_client_load_reporting_filter);
 }
 
-extern "C" void grpc_lb_policy_grpclb_shutdown() {}
+void grpc_lb_policy_grpclb_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
index b6135a4..0a2edb0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.h
@@ -21,17 +21,9 @@
 
 #include "src/core/ext/filters/client_channel/lb_policy_factory.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Returns a load balancing factory for the glb policy, which tries to connect
  * to a load balancing server to decide the next successfully connected
  * subchannel to pick. */
 grpc_lb_policy_factory* grpc_glb_lb_factory_create();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
index aacaec1..a8ecea4 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.cc
@@ -26,17 +26,17 @@
 #include "src/core/lib/support/string.h"
 
 grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
-    grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+    const char* lb_service_target_addresses,
     grpc_client_channel_factory* client_channel_factory,
     grpc_channel_args* args) {
   grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
-      exec_ctx, client_channel_factory, lb_service_target_addresses,
+      client_channel_factory, lb_service_target_addresses,
       GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, args);
   return lb_channel;
 }
 
 grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
-    grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+    grpc_slice_hash_table* targets_info,
     grpc_fake_resolver_response_generator* response_generator,
     const grpc_channel_args* args) {
   const grpc_arg to_add[] = {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
index 39cbf53..56104b2 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel.h
@@ -23,10 +23,6 @@
 #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
 #include "src/core/lib/slice/slice_hash_table.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Create the channel used for communicating with an LB service.
  * Note that an LB *service* may be comprised of several LB *servers*.
  *
@@ -35,18 +31,14 @@
  * \a client_channel_factory will be used for the creation of the LB channel,
  * alongside the channel args passed in \a args. */
 grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
-    grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+    const char* lb_service_target_addresses,
     grpc_client_channel_factory* client_channel_factory,
     grpc_channel_args* args);
 
 grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
-    grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+    grpc_slice_hash_table* targets_info,
     grpc_fake_resolver_response_generator* response_generator,
     const grpc_channel_args* args);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CHANNEL_H \
         */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
index 8eaa90e..76bcddf 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc
@@ -29,7 +29,7 @@
 #include "src/core/lib/support/string.h"
 
 grpc_channel* grpc_lb_policy_grpclb_create_lb_channel(
-    grpc_exec_ctx* exec_ctx, const char* lb_service_target_addresses,
+    const char* lb_service_target_addresses,
     grpc_client_channel_factory* client_channel_factory,
     grpc_channel_args* args) {
   grpc_channel_args* new_args = args;
@@ -50,19 +50,19 @@
     new_args = grpc_channel_args_copy_and_add_and_remove(
         args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), args_to_add,
         GPR_ARRAY_SIZE(args_to_add));
-    grpc_channel_credentials_unref(exec_ctx, creds_sans_call_creds);
+    grpc_channel_credentials_unref(creds_sans_call_creds);
   }
   grpc_channel* lb_channel = grpc_client_channel_factory_create_channel(
-      exec_ctx, client_channel_factory, lb_service_target_addresses,
+      client_channel_factory, lb_service_target_addresses,
       GRPC_CLIENT_CHANNEL_TYPE_LOAD_BALANCING, new_args);
   if (channel_credentials != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, new_args);
+    grpc_channel_args_destroy(new_args);
   }
   return lb_channel;
 }
 
 grpc_channel_args* grpc_lb_policy_grpclb_build_lb_channel_args(
-    grpc_exec_ctx* exec_ctx, grpc_slice_hash_table* targets_info,
+    grpc_slice_hash_table* targets_info,
     grpc_fake_resolver_response_generator* response_generator,
     const grpc_channel_args* args) {
   const grpc_arg to_add[] = {
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
index ce88cf9..d4b9d06 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.h
@@ -23,10 +23,6 @@
 
 #include <grpc/impl/codegen/grpc_types.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_grpclb_client_stats grpc_grpclb_client_stats;
 
 typedef struct {
@@ -65,9 +61,5 @@
 void grpc_grpclb_dropped_call_counts_destroy(
     grpc_grpclb_dropped_call_counts* drop_entries);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_GRPCLB_CLIENT_STATS_H \
         */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
index 2c8d7f4..fc781da 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc
@@ -215,9 +215,6 @@
       return nullptr;
     }
   }
-  if (res.server_list.has_expiration_interval) {
-    sl->expiration_interval = res.server_list.expiration_interval;
-  }
   return sl;
 }
 
@@ -237,8 +234,6 @@
   grpc_grpclb_serverlist* copy =
       (grpc_grpclb_serverlist*)gpr_zalloc(sizeof(grpc_grpclb_serverlist));
   copy->num_servers = sl->num_servers;
-  memcpy(&copy->expiration_interval, &sl->expiration_interval,
-         sizeof(grpc_grpclb_duration));
   copy->servers = (grpc_grpclb_server**)gpr_malloc(sizeof(grpc_grpclb_server*) *
                                                    sl->num_servers);
   for (size_t i = 0; i < sl->num_servers; i++) {
@@ -257,10 +252,6 @@
   if (lhs->num_servers != rhs->num_servers) {
     return false;
   }
-  if (grpc_grpclb_duration_compare(&lhs->expiration_interval,
-                                   &rhs->expiration_interval) != 0) {
-    return false;
-  }
   for (size_t i = 0; i < lhs->num_servers; i++) {
     if (!grpc_grpclb_server_equals(lhs->servers[i], rhs->servers[i])) {
       return false;
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
index 138012c..ccb0212 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h
@@ -25,10 +25,6 @@
 #include "src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h"
 #include "src/core/ext/filters/client_channel/lb_policy_factory.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #define GRPC_GRPCLB_SERVICE_NAME_MAX_LENGTH 128
 
 typedef grpc_lb_v1_Server_ip_address_t grpc_grpclb_ip_address;
@@ -39,7 +35,6 @@
 typedef struct {
   grpc_grpclb_server** servers;
   size_t num_servers;
-  grpc_grpclb_duration expiration_interval;
 } grpc_grpclb_serverlist;
 
 /** Create a request for a gRPC LB service under \a lb_service_name */
@@ -87,9 +82,5 @@
 void grpc_grpclb_initial_response_destroy(
     grpc_grpclb_initial_response* response);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_GRPCLB_LOAD_BALANCER_API_H \
         */
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
index 6a5d54c..4e6c5cc 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
@@ -61,9 +61,8 @@
     PB_LAST_FIELD
 };
 
-const pb_field_t grpc_lb_v1_ServerList_fields[3] = {
+const pb_field_t grpc_lb_v1_ServerList_fields[2] = {
     PB_FIELD(  1, MESSAGE , REPEATED, CALLBACK, FIRST, grpc_lb_v1_ServerList, servers, servers, &grpc_lb_v1_Server_fields),
-    PB_FIELD(  3, MESSAGE , OPTIONAL, STATIC  , OTHER, grpc_lb_v1_ServerList, expiration_interval, servers, &grpc_lb_v1_Duration_fields),
     PB_LAST_FIELD
 };
 
@@ -85,7 +84,7 @@
  * numbers or field sizes that are larger than what can fit in 8 or 16 bit
  * field descriptors.
  */
-PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v1_ServerList, servers) < 65536 && pb_membersize(grpc_lb_v1_ServerList, expiration_interval) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 65536 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 65536 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 65536 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 65536 && pb_membersize(grpc_lb_v1_ServerList, servers) < 65536), YOU_MUST_DEFINE_PB_FIELD_32BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
 #endif
 
 #if !defined(PB_FIELD_16BIT) && !defined(PB_FIELD_32BIT)
@@ -96,7 +95,7 @@
  * numbers or field sizes that are larger than what can fit in the default
  * 8 bit descriptors.
  */
-PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 256 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v1_ServerList, servers) < 256 && pb_membersize(grpc_lb_v1_ServerList, expiration_interval) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
+PB_STATIC_ASSERT((pb_membersize(grpc_lb_v1_LoadBalanceRequest, initial_request) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceRequest, client_stats) < 256 && pb_membersize(grpc_lb_v1_ClientStats, timestamp) < 256 && pb_membersize(grpc_lb_v1_ClientStats, calls_finished_with_drop) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, initial_response) < 256 && pb_membersize(grpc_lb_v1_LoadBalanceResponse, server_list) < 256 && pb_membersize(grpc_lb_v1_InitialLoadBalanceResponse, client_stats_report_interval) < 256 && pb_membersize(grpc_lb_v1_ServerList, servers) < 256), YOU_MUST_DEFINE_PB_FIELD_16BIT_FOR_MESSAGES_grpc_lb_v1_Duration_grpc_lb_v1_Timestamp_grpc_lb_v1_LoadBalanceRequest_grpc_lb_v1_InitialLoadBalanceRequest_grpc_lb_v1_ClientStatsPerToken_grpc_lb_v1_ClientStats_grpc_lb_v1_LoadBalanceResponse_grpc_lb_v1_InitialLoadBalanceResponse_grpc_lb_v1_ServerList_grpc_lb_v1_Server)
 #endif
 
 
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index 93333d1..066c076 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -14,6 +14,11 @@
 #endif
 
 /* Struct definitions */
+typedef struct _grpc_lb_v1_ServerList {
+    pb_callback_t servers;
+/* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */
+} grpc_lb_v1_ServerList;
+
 typedef struct _grpc_lb_v1_ClientStatsPerToken {
     pb_callback_t load_balance_token;
     bool has_num_calls;
@@ -79,13 +84,6 @@
 /* @@protoc_insertion_point(struct:grpc_lb_v1_InitialLoadBalanceResponse) */
 } grpc_lb_v1_InitialLoadBalanceResponse;
 
-typedef struct _grpc_lb_v1_ServerList {
-    pb_callback_t servers;
-    bool has_expiration_interval;
-    grpc_lb_v1_Duration expiration_interval;
-/* @@protoc_insertion_point(struct:grpc_lb_v1_ServerList) */
-} grpc_lb_v1_ServerList;
-
 typedef struct _grpc_lb_v1_LoadBalanceRequest {
     bool has_initial_request;
     grpc_lb_v1_InitialLoadBalanceRequest initial_request;
@@ -113,7 +111,7 @@
 #define grpc_lb_v1_ClientStats_init_default      {false, grpc_lb_v1_Timestamp_init_default, false, 0, false, 0, false, 0, false, 0, {{NULL}, NULL}}
 #define grpc_lb_v1_LoadBalanceResponse_init_default {false, grpc_lb_v1_InitialLoadBalanceResponse_init_default, false, grpc_lb_v1_ServerList_init_default}
 #define grpc_lb_v1_InitialLoadBalanceResponse_init_default {false, "", false, grpc_lb_v1_Duration_init_default}
-#define grpc_lb_v1_ServerList_init_default       {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_default}
+#define grpc_lb_v1_ServerList_init_default       {{{NULL}, NULL}}
 #define grpc_lb_v1_Server_init_default           {false, {0, {0}}, false, 0, false, "", false, 0}
 #define grpc_lb_v1_Duration_init_zero            {false, 0, false, 0}
 #define grpc_lb_v1_Timestamp_init_zero           {false, 0, false, 0}
@@ -123,10 +121,11 @@
 #define grpc_lb_v1_ClientStats_init_zero         {false, grpc_lb_v1_Timestamp_init_zero, false, 0, false, 0, false, 0, false, 0, {{NULL}, NULL}}
 #define grpc_lb_v1_LoadBalanceResponse_init_zero {false, grpc_lb_v1_InitialLoadBalanceResponse_init_zero, false, grpc_lb_v1_ServerList_init_zero}
 #define grpc_lb_v1_InitialLoadBalanceResponse_init_zero {false, "", false, grpc_lb_v1_Duration_init_zero}
-#define grpc_lb_v1_ServerList_init_zero          {{{NULL}, NULL}, false, grpc_lb_v1_Duration_init_zero}
+#define grpc_lb_v1_ServerList_init_zero          {{{NULL}, NULL}}
 #define grpc_lb_v1_Server_init_zero              {false, {0, {0}}, false, 0, false, "", false, 0}
 
 /* Field tags (for use in manual encoding/decoding) */
+#define grpc_lb_v1_ServerList_servers_tag        1
 #define grpc_lb_v1_ClientStatsPerToken_load_balance_token_tag 1
 #define grpc_lb_v1_ClientStatsPerToken_num_calls_tag 2
 #define grpc_lb_v1_Duration_seconds_tag          1
@@ -146,8 +145,6 @@
 #define grpc_lb_v1_ClientStats_calls_finished_with_drop_tag 8
 #define grpc_lb_v1_InitialLoadBalanceResponse_load_balancer_delegate_tag 1
 #define grpc_lb_v1_InitialLoadBalanceResponse_client_stats_report_interval_tag 2
-#define grpc_lb_v1_ServerList_servers_tag        1
-#define grpc_lb_v1_ServerList_expiration_interval_tag 3
 #define grpc_lb_v1_LoadBalanceRequest_initial_request_tag 1
 #define grpc_lb_v1_LoadBalanceRequest_client_stats_tag 2
 #define grpc_lb_v1_LoadBalanceResponse_initial_response_tag 1
@@ -162,7 +159,7 @@
 extern const pb_field_t grpc_lb_v1_ClientStats_fields[7];
 extern const pb_field_t grpc_lb_v1_LoadBalanceResponse_fields[3];
 extern const pb_field_t grpc_lb_v1_InitialLoadBalanceResponse_fields[3];
-extern const pb_field_t grpc_lb_v1_ServerList_fields[3];
+extern const pb_field_t grpc_lb_v1_ServerList_fields[2];
 extern const pb_field_t grpc_lb_v1_Server_fields[5];
 
 /* Maximum encoded size of messages (where known) */
diff --git a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
index b15ca82..0861261 100644
--- a/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc
@@ -57,12 +57,12 @@
   grpc_connectivity_state_tracker state_tracker;
 } pick_first_lb_policy;
 
-static void pf_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void pf_destroy(grpc_lb_policy* pol) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
   GPR_ASSERT(p->subchannel_list == nullptr);
   GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
   GPR_ASSERT(p->pending_picks == nullptr);
-  grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+  grpc_connectivity_state_destroy(&p->state_tracker);
   gpr_free(p);
   grpc_subchannel_index_unref();
   if (grpc_lb_pick_first_trace.enabled()) {
@@ -70,8 +70,9 @@
   }
 }
 
-static void shutdown_locked(grpc_exec_ctx* exec_ctx, pick_first_lb_policy* p,
-                            grpc_error* error) {
+static void pf_shutdown_locked(grpc_lb_policy* pol) {
+  pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
+  grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
   if (grpc_lb_pick_first_trace.enabled()) {
     gpr_log(GPR_DEBUG, "Pick First %p Shutting down", p);
   }
@@ -80,31 +81,27 @@
   while ((pp = p->pending_picks) != nullptr) {
     p->pending_picks = pp->next;
     *pp->target = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
     gpr_free(pp);
   }
-  grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                              GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
-                              "shutdown");
+  grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+                              GRPC_ERROR_REF(error), "shutdown");
   if (p->subchannel_list != nullptr) {
-    grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+    grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
                                                "pf_shutdown");
     p->subchannel_list = nullptr;
   }
   if (p->latest_pending_subchannel_list != nullptr) {
     grpc_lb_subchannel_list_shutdown_and_unref(
-        exec_ctx, p->latest_pending_subchannel_list, "pf_shutdown");
+        p->latest_pending_subchannel_list, "pf_shutdown");
     p->latest_pending_subchannel_list = nullptr;
   }
+  grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
+                               GRPC_ERROR_CANCELLED);
   GRPC_ERROR_UNREF(error);
 }
 
-static void pf_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
-  shutdown_locked(exec_ctx, (pick_first_lb_policy*)pol,
-                  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown"));
-}
-
-static void pf_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static void pf_cancel_pick_locked(grpc_lb_policy* pol,
                                   grpc_connected_subchannel** target,
                                   grpc_error* error) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
@@ -114,7 +111,7 @@
     pending_pick* next = pp->next;
     if (pp->target == target) {
       *target = nullptr;
-      GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
+      GRPC_CLOSURE_SCHED(pp->on_complete,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick Cancelled", &error, 1));
       gpr_free(pp);
@@ -127,7 +124,7 @@
   GRPC_ERROR_UNREF(error);
 }
 
-static void pf_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static void pf_cancel_picks_locked(grpc_lb_policy* pol,
                                    uint32_t initial_metadata_flags_mask,
                                    uint32_t initial_metadata_flags_eq,
                                    grpc_error* error) {
@@ -138,7 +135,7 @@
     pending_pick* next = pp->next;
     if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
-      GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
+      GRPC_CLOSURE_SCHED(pp->on_complete,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick Cancelled", &error, 1));
       gpr_free(pp);
@@ -151,27 +148,31 @@
   GRPC_ERROR_UNREF(error);
 }
 
-static void start_picking_locked(grpc_exec_ctx* exec_ctx,
-                                 pick_first_lb_policy* p) {
+static void start_picking_locked(pick_first_lb_policy* p) {
   p->started_picking = true;
   if (p->subchannel_list != nullptr &&
       p->subchannel_list->num_subchannels > 0) {
     p->subchannel_list->checking_subchannel = 0;
-    grpc_lb_subchannel_list_ref_for_connectivity_watch(
-        p->subchannel_list, "connectivity_watch+start_picking");
-    grpc_lb_subchannel_data_start_connectivity_watch(
-        exec_ctx, &p->subchannel_list->subchannels[0]);
+    for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
+      if (p->subchannel_list->subchannels[i].subchannel != nullptr) {
+        grpc_lb_subchannel_list_ref_for_connectivity_watch(
+            p->subchannel_list, "connectivity_watch+start_picking");
+        grpc_lb_subchannel_data_start_connectivity_watch(
+            &p->subchannel_list->subchannels[i]);
+        break;
+      }
+    }
   }
 }
 
-static void pf_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void pf_exit_idle_locked(grpc_lb_policy* pol) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
   if (!p->started_picking) {
-    start_picking_locked(exec_ctx, p);
+    start_picking_locked(p);
   }
 }
 
-static int pf_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static int pf_pick_locked(grpc_lb_policy* pol,
                           const grpc_lb_policy_pick_args* pick_args,
                           grpc_connected_subchannel** target,
                           grpc_call_context_element* context, void** user_data,
@@ -185,7 +186,7 @@
   }
   // No subchannel selected yet, so handle asynchronously.
   if (!p->started_picking) {
-    start_picking_locked(exec_ctx, p);
+    start_picking_locked(p);
   }
   pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
   pp->next = p->pending_picks;
@@ -196,48 +197,47 @@
   return 0;
 }
 
-static void destroy_unselected_subchannels_locked(grpc_exec_ctx* exec_ctx,
-                                                  pick_first_lb_policy* p) {
+static void destroy_unselected_subchannels_locked(pick_first_lb_policy* p) {
   for (size_t i = 0; i < p->subchannel_list->num_subchannels; ++i) {
     grpc_lb_subchannel_data* sd = &p->subchannel_list->subchannels[i];
     if (p->selected != sd) {
-      grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
+      grpc_lb_subchannel_data_unref_subchannel(sd,
                                                "selected_different_subchannel");
     }
   }
 }
 
 static grpc_connectivity_state pf_check_connectivity_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
+    grpc_lb_policy* pol, grpc_error** error) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
   return grpc_connectivity_state_get(&p->state_tracker, error);
 }
 
-static void pf_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
-                                             grpc_lb_policy* pol,
+static void pf_notify_on_state_change_locked(grpc_lb_policy* pol,
                                              grpc_connectivity_state* current,
                                              grpc_closure* notify) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
-  grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
-                                                 current, notify);
+  grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
+                                                 notify);
 }
 
-static void pf_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
-                               grpc_closure* closure) {
+static void pf_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
+                               grpc_closure* on_ack) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)pol;
   if (p->selected) {
-    grpc_connected_subchannel_ping(exec_ctx, p->selected->connected_subchannel,
-                                   closure);
+    grpc_connected_subchannel_ping(p->selected->connected_subchannel,
+                                   on_initiate, on_ack);
   } else {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure,
+    GRPC_CLOSURE_SCHED(on_initiate,
+                       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
+    GRPC_CLOSURE_SCHED(on_ack,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Not connected"));
   }
 }
 
-static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error);
+static void pf_connectivity_changed_locked(void* arg, grpc_error* error);
 
-static void pf_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+static void pf_update_locked(grpc_lb_policy* policy,
                              const grpc_lb_policy_args* args) {
   pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
   const grpc_arg* arg =
@@ -246,7 +246,7 @@
     if (p->subchannel_list == nullptr) {
       // If we don't have a current subchannel list, go into TRANSIENT FAILURE.
       grpc_connectivity_state_set(
-          exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+          &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
           "pf_update_missing");
     } else {
@@ -265,17 +265,17 @@
             (void*)p, (unsigned long)addresses->num_addresses);
   }
   grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
-      exec_ctx, &p->base, &grpc_lb_pick_first_trace, addresses, args,
+      &p->base, &grpc_lb_pick_first_trace, addresses, args,
       pf_connectivity_changed_locked);
   if (subchannel_list->num_subchannels == 0) {
     // Empty update or no valid subchannels. Unsubscribe from all current
     // subchannels and put the channel in TRANSIENT_FAILURE.
     grpc_connectivity_state_set(
-        exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+        &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
         "pf_update_empty");
     if (p->subchannel_list != nullptr) {
-      grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+      grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
                                                  "sl_shutdown_empty_update");
     }
     p->subchannel_list = subchannel_list;  // Empty list.
@@ -286,7 +286,7 @@
     // We don't yet have a selected subchannel, so replace the current
     // subchannel list immediately.
     if (p->subchannel_list != nullptr) {
-      grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+      grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
                                                  "pf_update_before_selected");
     }
     p->subchannel_list = subchannel_list;
@@ -311,19 +311,19 @@
         p->selected = sd;
         if (p->subchannel_list != nullptr) {
           grpc_lb_subchannel_list_shutdown_and_unref(
-              exec_ctx, p->subchannel_list, "pf_update_includes_selected");
+              p->subchannel_list, "pf_update_includes_selected");
         }
         p->subchannel_list = subchannel_list;
-        destroy_unselected_subchannels_locked(exec_ctx, p);
+        destroy_unselected_subchannels_locked(p);
         grpc_lb_subchannel_list_ref_for_connectivity_watch(
             subchannel_list, "connectivity_watch+replace_selected");
-        grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+        grpc_lb_subchannel_data_start_connectivity_watch(sd);
         // If there was a previously pending update (which may or may
         // not have contained the currently selected subchannel), drop
         // it, so that it doesn't override what we've done here.
         if (p->latest_pending_subchannel_list != nullptr) {
           grpc_lb_subchannel_list_shutdown_and_unref(
-              exec_ctx, p->latest_pending_subchannel_list,
+              p->latest_pending_subchannel_list,
               "pf_update_includes_selected+outdated");
           p->latest_pending_subchannel_list = nullptr;
         }
@@ -343,8 +343,7 @@
                 (void*)subchannel_list);
       }
       grpc_lb_subchannel_list_shutdown_and_unref(
-          exec_ctx, p->latest_pending_subchannel_list,
-          "sl_outdated_dont_smash");
+          p->latest_pending_subchannel_list, "sl_outdated_dont_smash");
     }
     p->latest_pending_subchannel_list = subchannel_list;
   }
@@ -354,12 +353,11 @@
     grpc_lb_subchannel_list_ref_for_connectivity_watch(
         subchannel_list, "connectivity_watch+update");
     grpc_lb_subchannel_data_start_connectivity_watch(
-        exec_ctx, &subchannel_list->subchannels[0]);
+        &subchannel_list->subchannels[0]);
   }
 }
 
-static void pf_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void pf_connectivity_changed_locked(void* arg, grpc_error* error) {
   grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
   pick_first_lb_policy* p = (pick_first_lb_policy*)sd->subchannel_list->policy;
   if (grpc_lb_pick_first_trace.enabled()) {
@@ -377,18 +375,18 @@
   }
   // If the policy is shutting down, unref and return.
   if (p->shutdown) {
-    grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-    grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_shutdown");
-    grpc_lb_subchannel_list_unref_for_connectivity_watch(
-        exec_ctx, sd->subchannel_list, "pf_shutdown");
+    grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+    grpc_lb_subchannel_data_unref_subchannel(sd, "pf_shutdown");
+    grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
+                                                         "pf_shutdown");
     return;
   }
   // If the subchannel list is shutting down, stop watching.
   if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
-    grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-    grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "pf_sl_shutdown");
-    grpc_lb_subchannel_list_unref_for_connectivity_watch(
-        exec_ctx, sd->subchannel_list, "pf_sl_shutdown");
+    grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+    grpc_lb_subchannel_data_unref_subchannel(sd, "pf_sl_shutdown");
+    grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
+                                                         "pf_sl_shutdown");
     return;
   }
   // If we're still here, the notification must be for a subchannel in
@@ -404,29 +402,45 @@
     if (sd->curr_connectivity_state != GRPC_CHANNEL_READY &&
         p->latest_pending_subchannel_list != nullptr) {
       p->selected = nullptr;
+      grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+      grpc_lb_subchannel_list_unref_for_connectivity_watch(
+          sd->subchannel_list, "selected_not_ready+switch_to_update");
       grpc_lb_subchannel_list_shutdown_and_unref(
-          exec_ctx, p->subchannel_list, "selected_not_ready+switch_to_update");
+          p->subchannel_list, "selected_not_ready+switch_to_update");
       p->subchannel_list = p->latest_pending_subchannel_list;
       p->latest_pending_subchannel_list = nullptr;
       grpc_connectivity_state_set(
-          exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+          &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
           GRPC_ERROR_REF(error), "selected_not_ready+switch_to_update");
     } else {
-      if (sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
-        /* if the selected channel goes bad, we're done */
-        sd->curr_connectivity_state = GRPC_CHANNEL_SHUTDOWN;
+      // TODO(juanlishen): we re-resolve when the selected subchannel goes to
+      // TRANSIENT_FAILURE because we used to shut down in this case before
+      // re-resolution is introduced. But we need to investigate whether we
+      // really want to take any action instead of waiting for the selected
+      // subchannel reconnecting.
+      if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN ||
+          sd->curr_connectivity_state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
+        // If the selected channel goes bad, request a re-resolution.
+        grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
+                                    GRPC_ERROR_NONE,
+                                    "selected_changed+reresolve");
+        p->started_picking = false;
+        grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
+                                     GRPC_ERROR_NONE);
+      } else {
+        grpc_connectivity_state_set(&p->state_tracker,
+                                    sd->curr_connectivity_state,
+                                    GRPC_ERROR_REF(error), "selected_changed");
       }
-      grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                                  sd->curr_connectivity_state,
-                                  GRPC_ERROR_REF(error), "selected_changed");
       if (sd->curr_connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
         // Renew notification.
-        grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+        grpc_lb_subchannel_data_start_connectivity_watch(sd);
       } else {
-        grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+        p->selected = nullptr;
+        grpc_lb_subchannel_data_stop_connectivity_watch(sd);
         grpc_lb_subchannel_list_unref_for_connectivity_watch(
-            exec_ctx, sd->subchannel_list, "pf_selected_shutdown");
-        shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
+            sd->subchannel_list, "pf_selected_shutdown");
+        grpc_lb_subchannel_data_unref_subchannel(sd, "pf_selected_shutdown");
       }
     }
     return;
@@ -446,15 +460,14 @@
       // p->subchannel_list.
       if (sd->subchannel_list == p->latest_pending_subchannel_list) {
         GPR_ASSERT(p->subchannel_list != nullptr);
-        grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+        grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
                                                    "finish_update");
         p->subchannel_list = p->latest_pending_subchannel_list;
         p->latest_pending_subchannel_list = nullptr;
       }
       // Cases 1 and 2.
-      grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                                  GRPC_CHANNEL_READY, GRPC_ERROR_NONE,
-                                  "connecting_ready");
+      grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
+                                  GRPC_ERROR_NONE, "connecting_ready");
       sd->connected_subchannel = GRPC_CONNECTED_SUBCHANNEL_REF(
           grpc_subchannel_get_connected_subchannel(sd->subchannel),
           "connected");
@@ -464,7 +477,7 @@
                 (void*)sd->subchannel);
       }
       // Drop all other subchannels, since we are now connected.
-      destroy_unselected_subchannels_locked(exec_ctx, p);
+      destroy_unselected_subchannels_locked(p);
       // Update any calls that were waiting for a pick.
       pending_pick* pp;
       while ((pp = p->pending_picks)) {
@@ -476,15 +489,15 @@
                   "Servicing pending pick with selected subchannel %p",
                   (void*)p->selected);
         }
-        GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
         gpr_free(pp);
       }
       // Renew notification.
-      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      grpc_lb_subchannel_data_start_connectivity_watch(sd);
       break;
     }
     case GRPC_CHANNEL_TRANSIENT_FAILURE: {
-      grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
+      grpc_lb_subchannel_data_stop_connectivity_watch(sd);
       do {
         sd->subchannel_list->checking_subchannel =
             (sd->subchannel_list->checking_subchannel + 1) %
@@ -497,29 +510,28 @@
       if (sd->subchannel_list->checking_subchannel == 0 &&
           sd->subchannel_list == p->subchannel_list) {
         grpc_connectivity_state_set(
-            exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+            &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
             GRPC_ERROR_REF(error), "connecting_transient_failure");
       }
       // Reuses the connectivity refs from the previous watch.
-      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      grpc_lb_subchannel_data_start_connectivity_watch(sd);
       break;
     }
     case GRPC_CHANNEL_CONNECTING:
     case GRPC_CHANNEL_IDLE: {
       // Only update connectivity state in case 1.
       if (sd->subchannel_list == p->subchannel_list) {
-        grpc_connectivity_state_set(
-            exec_ctx, &p->state_tracker, GRPC_CHANNEL_CONNECTING,
-            GRPC_ERROR_REF(error), "connecting_changed");
+        grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_CONNECTING,
+                                    GRPC_ERROR_REF(error),
+                                    "connecting_changed");
       }
       // Renew notification.
-      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+      grpc_lb_subchannel_data_start_connectivity_watch(sd);
       break;
     }
     case GRPC_CHANNEL_SHUTDOWN: {
-      grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-      grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
-                                               "pf_candidate_shutdown");
+      grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+      grpc_lb_subchannel_data_unref_subchannel(sd, "pf_candidate_shutdown");
       // Advance to next subchannel and check its state.
       grpc_lb_subchannel_data* original_sd = sd;
       do {
@@ -531,24 +543,36 @@
       } while (sd->subchannel == nullptr && sd != original_sd);
       if (sd == original_sd) {
         grpc_lb_subchannel_list_unref_for_connectivity_watch(
-            exec_ctx, sd->subchannel_list, "pf_candidate_shutdown");
-        shutdown_locked(exec_ctx, p,
-                        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                            "Pick first exhausted channels", &error, 1));
-        break;
+            sd->subchannel_list, "pf_exhausted_subchannels");
+        if (sd->subchannel_list == p->subchannel_list) {
+          grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
+                                      GRPC_ERROR_NONE,
+                                      "exhausted_subchannels+reresolve");
+          p->started_picking = false;
+          grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_pick_first_trace,
+                                       GRPC_ERROR_NONE);
+        }
+      } else {
+        if (sd->subchannel_list == p->subchannel_list) {
+          grpc_connectivity_state_set(
+              &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+              GRPC_ERROR_REF(error), "subchannel_failed");
+        }
+        // Reuses the connectivity refs from the previous watch.
+        grpc_lb_subchannel_data_start_connectivity_watch(sd);
       }
-      if (sd->subchannel_list == p->subchannel_list) {
-        grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                                    GRPC_CHANNEL_TRANSIENT_FAILURE,
-                                    GRPC_ERROR_REF(error), "subchannel_failed");
-      }
-      // Reuses the connectivity refs from the previous watch.
-      grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
-      break;
     }
   }
 }
 
+static void pf_set_reresolve_closure_locked(
+    grpc_lb_policy* policy, grpc_closure* request_reresolution) {
+  pick_first_lb_policy* p = (pick_first_lb_policy*)policy;
+  GPR_ASSERT(!p->shutdown);
+  GPR_ASSERT(policy->request_reresolution == nullptr);
+  policy->request_reresolution = request_reresolution;
+}
+
 static const grpc_lb_policy_vtable pick_first_lb_policy_vtable = {
     pf_destroy,
     pf_shutdown_locked,
@@ -559,21 +583,21 @@
     pf_exit_idle_locked,
     pf_check_connectivity_locked,
     pf_notify_on_state_change_locked,
-    pf_update_locked};
+    pf_update_locked,
+    pf_set_reresolve_closure_locked};
 
 static void pick_first_factory_ref(grpc_lb_policy_factory* factory) {}
 
 static void pick_first_factory_unref(grpc_lb_policy_factory* factory) {}
 
-static grpc_lb_policy* create_pick_first(grpc_exec_ctx* exec_ctx,
-                                         grpc_lb_policy_factory* factory,
+static grpc_lb_policy* create_pick_first(grpc_lb_policy_factory* factory,
                                          grpc_lb_policy_args* args) {
   GPR_ASSERT(args->client_channel_factory != nullptr);
   pick_first_lb_policy* p = (pick_first_lb_policy*)gpr_zalloc(sizeof(*p));
   if (grpc_lb_pick_first_trace.enabled()) {
     gpr_log(GPR_DEBUG, "Pick First %p created.", (void*)p);
   }
-  pf_update_locked(exec_ctx, &p->base, args);
+  pf_update_locked(&p->base, args);
   grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable, args->combiner);
   grpc_subchannel_index_ref();
   return &p->base;
@@ -592,8 +616,8 @@
 
 /* Plugin registration */
 
-extern "C" void grpc_lb_policy_pick_first_init() {
+void grpc_lb_policy_pick_first_init() {
   grpc_register_lb_policy(pick_first_lb_factory_create());
 }
 
-extern "C" void grpc_lb_policy_pick_first_shutdown() {}
+void grpc_lb_policy_pick_first_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
index 5e54d1f..b0c8401 100644
--- a/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc
@@ -20,9 +20,9 @@
  *
  * Before every pick, the \a get_next_ready_subchannel_index_locked function
  * returns the p->subchannel_list->subchannels index for next subchannel,
- * respecting the relative
- * order of the addresses provided upon creation or updates. Note however that
- * updates will start picking from the beginning of the updated list. */
+ * respecting the relative order of the addresses provided upon creation or
+ * updates. Note however that updates will start picking from the beginning of
+ * the updated list. */
 
 #include <string.h>
 
@@ -154,7 +154,7 @@
   }
 }
 
-static void rr_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void rr_destroy(grpc_lb_policy* pol) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
   if (grpc_lb_round_robin_trace.enabled()) {
     gpr_log(GPR_DEBUG, "[RR %p] Destroying Round Robin policy at %p",
@@ -162,13 +162,14 @@
   }
   GPR_ASSERT(p->subchannel_list == nullptr);
   GPR_ASSERT(p->latest_pending_subchannel_list == nullptr);
-  grpc_connectivity_state_destroy(exec_ctx, &p->state_tracker);
+  grpc_connectivity_state_destroy(&p->state_tracker);
   grpc_subchannel_index_unref();
   gpr_free(p);
 }
 
-static void shutdown_locked(grpc_exec_ctx* exec_ctx, round_robin_lb_policy* p,
-                            grpc_error* error) {
+static void rr_shutdown_locked(grpc_lb_policy* pol) {
+  round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
+  grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
   if (grpc_lb_round_robin_trace.enabled()) {
     gpr_log(GPR_DEBUG, "[RR %p] Shutting down", p);
   }
@@ -177,33 +178,27 @@
   while ((pp = p->pending_picks) != nullptr) {
     p->pending_picks = pp->next;
     *pp->target = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_REF(error));
     gpr_free(pp);
   }
-  grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                              GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
-                              "rr_shutdown");
+  grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+                              GRPC_ERROR_REF(error), "rr_shutdown");
   if (p->subchannel_list != nullptr) {
-    grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+    grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
                                                "sl_shutdown_rr_shutdown");
     p->subchannel_list = nullptr;
   }
   if (p->latest_pending_subchannel_list != nullptr) {
     grpc_lb_subchannel_list_shutdown_and_unref(
-        exec_ctx, p->latest_pending_subchannel_list,
-        "sl_shutdown_pending_rr_shutdown");
+        p->latest_pending_subchannel_list, "sl_shutdown_pending_rr_shutdown");
     p->latest_pending_subchannel_list = nullptr;
   }
+  grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
+                               GRPC_ERROR_CANCELLED);
   GRPC_ERROR_UNREF(error);
 }
 
-static void rr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
-  round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
-  shutdown_locked(exec_ctx, p,
-                  GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Shutdown"));
-}
-
-static void rr_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static void rr_cancel_pick_locked(grpc_lb_policy* pol,
                                   grpc_connected_subchannel** target,
                                   grpc_error* error) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
@@ -213,7 +208,7 @@
     pending_pick* next = pp->next;
     if (pp->target == target) {
       *target = nullptr;
-      GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
+      GRPC_CLOSURE_SCHED(pp->on_complete,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick cancelled", &error, 1));
       gpr_free(pp);
@@ -226,7 +221,7 @@
   GRPC_ERROR_UNREF(error);
 }
 
-static void rr_cancel_picks_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static void rr_cancel_picks_locked(grpc_lb_policy* pol,
                                    uint32_t initial_metadata_flags_mask,
                                    uint32_t initial_metadata_flags_eq,
                                    grpc_error* error) {
@@ -238,7 +233,7 @@
     if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
       *pp->target = nullptr;
-      GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete,
+      GRPC_CLOSURE_SCHED(pp->on_complete,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick cancelled", &error, 1));
       gpr_free(pp);
@@ -251,25 +246,26 @@
   GRPC_ERROR_UNREF(error);
 }
 
-static void start_picking_locked(grpc_exec_ctx* exec_ctx,
-                                 round_robin_lb_policy* p) {
+static void start_picking_locked(round_robin_lb_policy* p) {
   p->started_picking = true;
   for (size_t i = 0; i < p->subchannel_list->num_subchannels; i++) {
-    grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
-                                                       "connectivity_watch");
-    grpc_lb_subchannel_data_start_connectivity_watch(
-        exec_ctx, &p->subchannel_list->subchannels[i]);
+    if (p->subchannel_list->subchannels[i].subchannel != nullptr) {
+      grpc_lb_subchannel_list_ref_for_connectivity_watch(p->subchannel_list,
+                                                         "connectivity_watch");
+      grpc_lb_subchannel_data_start_connectivity_watch(
+          &p->subchannel_list->subchannels[i]);
+    }
   }
 }
 
-static void rr_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void rr_exit_idle_locked(grpc_lb_policy* pol) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
   if (!p->started_picking) {
-    start_picking_locked(exec_ctx, p);
+    start_picking_locked(p);
   }
 }
 
-static int rr_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static int rr_pick_locked(grpc_lb_policy* pol,
                           const grpc_lb_policy_pick_args* pick_args,
                           grpc_connected_subchannel** target,
                           grpc_call_context_element* context, void** user_data,
@@ -306,7 +302,7 @@
   }
   /* no pick currently available. Save for later in list of pending picks */
   if (!p->started_picking) {
-    start_picking_locked(exec_ctx, p);
+    start_picking_locked(p);
   }
   pending_pick* pp = (pending_pick*)gpr_malloc(sizeof(*pp));
   pp->next = p->pending_picks;
@@ -346,75 +342,64 @@
 }
 
 /** Sets the policy's connectivity status based on that of the passed-in \a sd
- * (the grpc_lb_subchannel_data associted with the updated subchannel) and the
- * subchannel list \a sd belongs to (sd->subchannel_list). \a error will only be
- * used upon policy transition to TRANSIENT_FAILURE or SHUTDOWN. Returns the
- * connectivity status set. */
-static grpc_connectivity_state update_lb_connectivity_status_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, grpc_error* error) {
+ * (the grpc_lb_subchannel_data associated with the updated subchannel) and the
+ * subchannel list \a sd belongs to (sd->subchannel_list). \a error will be used
+ * only if the policy transitions to state TRANSIENT_FAILURE. */
+static void update_lb_connectivity_status_locked(grpc_lb_subchannel_data* sd,
+                                                 grpc_error* error) {
   /* In priority order. The first rule to match terminates the search (ie, if we
    * are on rule n, all previous rules were unfulfilled).
    *
    * 1) RULE: ANY subchannel is READY => policy is READY.
-   *    CHECK: At least one subchannel is ready iff p->ready_list is NOT empty.
+   *    CHECK: subchannel_list->num_ready > 0.
    *
    * 2) RULE: ANY subchannel is CONNECTING => policy is CONNECTING.
    *    CHECK: sd->curr_connectivity_state == CONNECTING.
    *
-   * 3) RULE: ALL subchannels are SHUTDOWN => policy is SHUTDOWN.
-   *    CHECK: p->subchannel_list->num_shutdown ==
-   *           p->subchannel_list->num_subchannels.
+   * 3) RULE: ALL subchannels are SHUTDOWN => policy is IDLE (and requests
+   *          re-resolution).
+   *    CHECK: subchannel_list->num_shutdown ==
+   *           subchannel_list->num_subchannels.
    *
-   * 4) RULE: ALL subchannels are TRANSIENT_FAILURE => policy is
-   *    TRANSIENT_FAILURE.
-   *    CHECK: p->num_transient_failures == p->subchannel_list->num_subchannels.
-   *
-   * 5) RULE: ALL subchannels are IDLE => policy is IDLE.
-   *    CHECK: p->num_idle == p->subchannel_list->num_subchannels.
+   * 4) RULE: ALL subchannels are SHUTDOWN or TRANSIENT_FAILURE => policy is
+   *          TRANSIENT_FAILURE.
+   *    CHECK: subchannel_list->num_shutdown +
+   *             subchannel_list->num_transient_failures ==
+   *           subchannel_list->num_subchannels.
    */
-  grpc_connectivity_state new_state = sd->curr_connectivity_state;
+  // TODO(juanlishen): For rule 4, we may want to re-resolve instead.
   grpc_lb_subchannel_list* subchannel_list = sd->subchannel_list;
   round_robin_lb_policy* p = (round_robin_lb_policy*)subchannel_list->policy;
-  if (subchannel_list->num_ready > 0) { /* 1) READY */
-    grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_READY,
+  GPR_ASSERT(sd->curr_connectivity_state != GRPC_CHANNEL_IDLE);
+  if (subchannel_list->num_ready > 0) {
+    /* 1) READY */
+    grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_READY,
                                 GRPC_ERROR_NONE, "rr_ready");
-    new_state = GRPC_CHANNEL_READY;
-  } else if (sd->curr_connectivity_state ==
-             GRPC_CHANNEL_CONNECTING) { /* 2) CONNECTING */
-    grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                                GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
-                                "rr_connecting");
-    new_state = GRPC_CHANNEL_CONNECTING;
-  } else if (p->subchannel_list->num_shutdown ==
-             p->subchannel_list->num_subchannels) { /* 3) SHUTDOWN */
-    grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
-                                GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
-                                "rr_shutdown");
-    p->shutdown = true;
-    new_state = GRPC_CHANNEL_SHUTDOWN;
-    if (grpc_lb_round_robin_trace.enabled()) {
-      gpr_log(GPR_INFO,
-              "[RR %p] Shutting down: all subchannels have gone into shutdown",
-              (void*)p);
-    }
-  } else if (subchannel_list->num_transient_failures ==
-             p->subchannel_list->num_subchannels) { /* 4) TRANSIENT_FAILURE */
-    grpc_connectivity_state_set(exec_ctx, &p->state_tracker,
+  } else if (sd->curr_connectivity_state == GRPC_CHANNEL_CONNECTING) {
+    /* 2) CONNECTING */
+    grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_CONNECTING,
+                                GRPC_ERROR_NONE, "rr_connecting");
+  } else if (subchannel_list->num_shutdown ==
+             subchannel_list->num_subchannels) {
+    /* 3) IDLE and re-resolve */
+    grpc_connectivity_state_set(&p->state_tracker, GRPC_CHANNEL_IDLE,
+                                GRPC_ERROR_NONE,
+                                "rr_exhausted_subchannels+reresolve");
+    p->started_picking = false;
+    grpc_lb_policy_try_reresolve(&p->base, &grpc_lb_round_robin_trace,
+                                 GRPC_ERROR_NONE);
+  } else if (subchannel_list->num_shutdown +
+                 subchannel_list->num_transient_failures ==
+             subchannel_list->num_subchannels) {
+    /* 4) TRANSIENT_FAILURE */
+    grpc_connectivity_state_set(&p->state_tracker,
                                 GRPC_CHANNEL_TRANSIENT_FAILURE,
                                 GRPC_ERROR_REF(error), "rr_transient_failure");
-    new_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
-  } else if (subchannel_list->num_idle ==
-             p->subchannel_list->num_subchannels) { /* 5) IDLE */
-    grpc_connectivity_state_set(exec_ctx, &p->state_tracker, GRPC_CHANNEL_IDLE,
-                                GRPC_ERROR_NONE, "rr_idle");
-    new_state = GRPC_CHANNEL_IDLE;
   }
   GRPC_ERROR_UNREF(error);
-  return new_state;
 }
 
-static void rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void rr_connectivity_changed_locked(void* arg, grpc_error* error) {
   grpc_lb_subchannel_data* sd = (grpc_lb_subchannel_data*)arg;
   round_robin_lb_policy* p =
       (round_robin_lb_policy*)sd->subchannel_list->policy;
@@ -432,18 +417,18 @@
   }
   // If the policy is shutting down, unref and return.
   if (p->shutdown) {
-    grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-    grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_shutdown");
-    grpc_lb_subchannel_list_unref_for_connectivity_watch(
-        exec_ctx, sd->subchannel_list, "rr_shutdown");
+    grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+    grpc_lb_subchannel_data_unref_subchannel(sd, "rr_shutdown");
+    grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
+                                                         "rr_shutdown");
     return;
   }
   // If the subchannel list is shutting down, stop watching.
   if (sd->subchannel_list->shutting_down || error == GRPC_ERROR_CANCELLED) {
-    grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-    grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, "rr_sl_shutdown");
-    grpc_lb_subchannel_list_unref_for_connectivity_watch(
-        exec_ctx, sd->subchannel_list, "rr_sl_shutdown");
+    grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+    grpc_lb_subchannel_data_unref_subchannel(sd, "rr_sl_shutdown");
+    grpc_lb_subchannel_list_unref_for_connectivity_watch(sd->subchannel_list,
+                                                         "rr_sl_shutdown");
     return;
   }
   // If we're still here, the notification must be for a subchannel in
@@ -454,21 +439,15 @@
   // state (which was set by the connectivity state watcher) to
   // curr_connectivity_state, which is what we use inside of the combiner.
   sd->curr_connectivity_state = sd->pending_connectivity_state_unsafe;
-  // Update state counters and determine new overall state.
+  // Update state counters and new overall state.
   update_state_counters_locked(sd);
-  const grpc_connectivity_state new_policy_connectivity_state =
-      update_lb_connectivity_status_locked(exec_ctx, sd, GRPC_ERROR_REF(error));
-  // If the sd's new state is SHUTDOWN, unref the subchannel, and if the new
-  // policy's state is SHUTDOWN, clean up.
+  update_lb_connectivity_status_locked(sd, GRPC_ERROR_REF(error));
+  // If the sd's new state is SHUTDOWN, unref the subchannel.
   if (sd->curr_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
-    grpc_lb_subchannel_data_stop_connectivity_watch(exec_ctx, sd);
-    grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
-                                             "rr_connectivity_shutdown");
+    grpc_lb_subchannel_data_stop_connectivity_watch(sd);
+    grpc_lb_subchannel_data_unref_subchannel(sd, "rr_connectivity_shutdown");
     grpc_lb_subchannel_list_unref_for_connectivity_watch(
-        exec_ctx, sd->subchannel_list, "rr_connectivity_shutdown");
-    if (new_policy_connectivity_state == GRPC_CHANNEL_SHUTDOWN) {
-      shutdown_locked(exec_ctx, p, GRPC_ERROR_REF(error));
-    }
+        sd->subchannel_list, "rr_connectivity_shutdown");
   } else {  // sd not in SHUTDOWN
     if (sd->curr_connectivity_state == GRPC_CHANNEL_READY) {
       if (sd->connected_subchannel == nullptr) {
@@ -496,15 +475,15 @@
         }
         if (p->subchannel_list != nullptr) {
           // dispose of the current subchannel_list
-          grpc_lb_subchannel_list_shutdown_and_unref(
-              exec_ctx, p->subchannel_list, "sl_phase_out_shutdown");
+          grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
+                                                     "sl_phase_out_shutdown");
         }
         p->subchannel_list = p->latest_pending_subchannel_list;
         p->latest_pending_subchannel_list = nullptr;
       }
       /* at this point we know there's at least one suitable subchannel. Go
        * ahead and pick one and notify the pending suitors in
-       * p->pending_picks. This preemtively replicates rr_pick()'s actions. */
+       * p->pending_picks. This preemptively replicates rr_pick()'s actions. */
       const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
       GPR_ASSERT(next_ready_index < p->subchannel_list->num_subchannels);
       grpc_lb_subchannel_data* selected =
@@ -529,32 +508,31 @@
                   (void*)p, (void*)selected->subchannel,
                   (void*)p->subchannel_list, (unsigned long)next_ready_index);
         }
-        GRPC_CLOSURE_SCHED(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(pp->on_complete, GRPC_ERROR_NONE);
         gpr_free(pp);
       }
     }
     // Renew notification.
-    grpc_lb_subchannel_data_start_connectivity_watch(exec_ctx, sd);
+    grpc_lb_subchannel_data_start_connectivity_watch(sd);
   }
 }
 
 static grpc_connectivity_state rr_check_connectivity_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol, grpc_error** error) {
+    grpc_lb_policy* pol, grpc_error** error) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
   return grpc_connectivity_state_get(&p->state_tracker, error);
 }
 
-static void rr_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
-                                             grpc_lb_policy* pol,
+static void rr_notify_on_state_change_locked(grpc_lb_policy* pol,
                                              grpc_connectivity_state* current,
                                              grpc_closure* notify) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
-  grpc_connectivity_state_notify_on_state_change(exec_ctx, &p->state_tracker,
-                                                 current, notify);
+  grpc_connectivity_state_notify_on_state_change(&p->state_tracker, current,
+                                                 notify);
 }
 
-static void rr_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
-                               grpc_closure* closure) {
+static void rr_ping_one_locked(grpc_lb_policy* pol, grpc_closure* on_initiate,
+                               grpc_closure* on_ack) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)pol;
   const size_t next_ready_index = get_next_ready_subchannel_index_locked(p);
   if (next_ready_index < p->subchannel_list->num_subchannels) {
@@ -562,16 +540,17 @@
         &p->subchannel_list->subchannels[next_ready_index];
     grpc_connected_subchannel* target = GRPC_CONNECTED_SUBCHANNEL_REF(
         selected->connected_subchannel, "rr_ping");
-    grpc_connected_subchannel_ping(exec_ctx, target, closure);
-    GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_ping");
+    grpc_connected_subchannel_ping(target, on_initiate, on_ack);
+    GRPC_CONNECTED_SUBCHANNEL_UNREF(target, "rr_ping");
   } else {
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, closure,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Round Robin not connected"));
+    GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                        "Round Robin not connected"));
+    GRPC_CLOSURE_SCHED(on_ack, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                   "Round Robin not connected"));
   }
 }
 
-static void rr_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+static void rr_update_locked(grpc_lb_policy* policy,
                              const grpc_lb_policy_args* args) {
   round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
   const grpc_arg* arg =
@@ -582,7 +561,7 @@
     // Otherwise, keep using the current subchannel list (ignore this update).
     if (p->subchannel_list == nullptr) {
       grpc_connectivity_state_set(
-          exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+          &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
           "rr_update_missing");
     }
@@ -594,15 +573,15 @@
             addresses->num_addresses);
   }
   grpc_lb_subchannel_list* subchannel_list = grpc_lb_subchannel_list_create(
-      exec_ctx, &p->base, &grpc_lb_round_robin_trace, addresses, args,
+      &p->base, &grpc_lb_round_robin_trace, addresses, args,
       rr_connectivity_changed_locked);
   if (subchannel_list->num_subchannels == 0) {
     grpc_connectivity_state_set(
-        exec_ctx, &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+        &p->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Empty update"),
         "rr_update_empty");
     if (p->subchannel_list != nullptr) {
-      grpc_lb_subchannel_list_shutdown_and_unref(exec_ctx, p->subchannel_list,
+      grpc_lb_subchannel_list_shutdown_and_unref(p->subchannel_list,
                                                  "sl_shutdown_empty_update");
     }
     p->subchannel_list = subchannel_list;  // empty list
@@ -618,7 +597,7 @@
                 (void*)subchannel_list);
       }
       grpc_lb_subchannel_list_shutdown_and_unref(
-          exec_ctx, p->latest_pending_subchannel_list, "sl_outdated");
+          p->latest_pending_subchannel_list, "sl_outdated");
     }
     p->latest_pending_subchannel_list = subchannel_list;
     for (size_t i = 0; i < subchannel_list->num_subchannels; ++i) {
@@ -629,19 +608,27 @@
       grpc_lb_subchannel_list_ref_for_connectivity_watch(subchannel_list,
                                                          "connectivity_watch");
       grpc_lb_subchannel_data_start_connectivity_watch(
-          exec_ctx, &subchannel_list->subchannels[i]);
+          &subchannel_list->subchannels[i]);
     }
   } else {
     // The policy isn't picking yet. Save the update for later, disposing of
     // previous version if any.
     if (p->subchannel_list != nullptr) {
       grpc_lb_subchannel_list_shutdown_and_unref(
-          exec_ctx, p->subchannel_list, "rr_update_before_started_picking");
+          p->subchannel_list, "rr_update_before_started_picking");
     }
     p->subchannel_list = subchannel_list;
   }
 }
 
+static void rr_set_reresolve_closure_locked(
+    grpc_lb_policy* policy, grpc_closure* request_reresolution) {
+  round_robin_lb_policy* p = (round_robin_lb_policy*)policy;
+  GPR_ASSERT(!p->shutdown);
+  GPR_ASSERT(policy->request_reresolution == nullptr);
+  policy->request_reresolution = request_reresolution;
+}
+
 static const grpc_lb_policy_vtable round_robin_lb_policy_vtable = {
     rr_destroy,
     rr_shutdown_locked,
@@ -652,14 +639,14 @@
     rr_exit_idle_locked,
     rr_check_connectivity_locked,
     rr_notify_on_state_change_locked,
-    rr_update_locked};
+    rr_update_locked,
+    rr_set_reresolve_closure_locked};
 
 static void round_robin_factory_ref(grpc_lb_policy_factory* factory) {}
 
 static void round_robin_factory_unref(grpc_lb_policy_factory* factory) {}
 
-static grpc_lb_policy* round_robin_create(grpc_exec_ctx* exec_ctx,
-                                          grpc_lb_policy_factory* factory,
+static grpc_lb_policy* round_robin_create(grpc_lb_policy_factory* factory,
                                           grpc_lb_policy_args* args) {
   GPR_ASSERT(args->client_channel_factory != nullptr);
   round_robin_lb_policy* p = (round_robin_lb_policy*)gpr_zalloc(sizeof(*p));
@@ -667,7 +654,7 @@
   grpc_subchannel_index_ref();
   grpc_connectivity_state_init(&p->state_tracker, GRPC_CHANNEL_IDLE,
                                "round_robin");
-  rr_update_locked(exec_ctx, &p->base, args);
+  rr_update_locked(&p->base, args);
   if (grpc_lb_round_robin_trace.enabled()) {
     gpr_log(GPR_DEBUG, "[RR %p] Created with %lu subchannels", (void*)p,
             (unsigned long)p->subchannel_list->num_subchannels);
@@ -688,8 +675,8 @@
 
 /* Plugin registration */
 
-extern "C" void grpc_lb_policy_round_robin_init() {
+void grpc_lb_policy_round_robin_init() {
   grpc_register_lb_policy(round_robin_lb_factory_create());
 }
 
-extern "C" void grpc_lb_policy_round_robin_shutdown() {}
+void grpc_lb_policy_round_robin_shutdown() {}
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
index b6fce4d..a3b4c8e 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc
@@ -28,8 +28,7 @@
 #include "src/core/lib/iomgr/sockaddr_utils.h"
 #include "src/core/lib/transport/connectivity_state.h"
 
-void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
-                                              grpc_lb_subchannel_data* sd,
+void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
                                               const char* reason) {
   if (sd->subchannel != nullptr) {
     if (sd->subchannel_list->tracer->enabled()) {
@@ -41,23 +40,22 @@
               (size_t)(sd - sd->subchannel_list->subchannels),
               sd->subchannel_list->num_subchannels, sd->subchannel);
     }
-    GRPC_SUBCHANNEL_UNREF(exec_ctx, sd->subchannel, reason);
+    GRPC_SUBCHANNEL_UNREF(sd->subchannel, reason);
     sd->subchannel = nullptr;
     if (sd->connected_subchannel != nullptr) {
-      GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, sd->connected_subchannel,
-                                      reason);
+      GRPC_CONNECTED_SUBCHANNEL_UNREF(sd->connected_subchannel, reason);
       sd->connected_subchannel = nullptr;
     }
     if (sd->user_data != nullptr) {
       GPR_ASSERT(sd->user_data_vtable != nullptr);
-      sd->user_data_vtable->destroy(exec_ctx, sd->user_data);
+      sd->user_data_vtable->destroy(sd->user_data);
       sd->user_data = nullptr;
     }
   }
 }
 
 void grpc_lb_subchannel_data_start_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
+    grpc_lb_subchannel_data* sd) {
   if (sd->subchannel_list->tracer->enabled()) {
     gpr_log(GPR_DEBUG,
             "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@@ -69,13 +67,13 @@
   }
   sd->connectivity_notification_pending = true;
   grpc_subchannel_notify_on_state_change(
-      exec_ctx, sd->subchannel, sd->subchannel_list->policy->interested_parties,
+      sd->subchannel, sd->subchannel_list->policy->interested_parties,
       &sd->pending_connectivity_state_unsafe,
       &sd->connectivity_changed_closure);
 }
 
 void grpc_lb_subchannel_data_stop_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd) {
+    grpc_lb_subchannel_data* sd) {
   if (sd->subchannel_list->tracer->enabled()) {
     gpr_log(GPR_DEBUG,
             "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@@ -90,7 +88,7 @@
 }
 
 grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
+    grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
     const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
     grpc_iomgr_cb_func connectivity_changed_cb) {
   grpc_lb_subchannel_list* subchannel_list =
@@ -124,8 +122,8 @@
     gpr_free(addr_arg.value.string);
     sc_args.args = new_args;
     grpc_subchannel* subchannel = grpc_client_channel_factory_create_subchannel(
-        exec_ctx, args->client_channel_factory, &sc_args);
-    grpc_channel_args_destroy(exec_ctx, new_args);
+        args->client_channel_factory, &sc_args);
+    grpc_channel_args_destroy(new_args);
     if (subchannel == nullptr) {
       // Subchannel could not be created.
       if (tracer->enabled()) {
@@ -172,8 +170,7 @@
   return subchannel_list;
 }
 
-static void subchannel_list_destroy(grpc_exec_ctx* exec_ctx,
-                                    grpc_lb_subchannel_list* subchannel_list) {
+static void subchannel_list_destroy(grpc_lb_subchannel_list* subchannel_list) {
   if (subchannel_list->tracer->enabled()) {
     gpr_log(GPR_DEBUG, "[%s %p] Destroying subchannel_list %p",
             subchannel_list->tracer->name(), subchannel_list->policy,
@@ -181,8 +178,7 @@
   }
   for (size_t i = 0; i < subchannel_list->num_subchannels; i++) {
     grpc_lb_subchannel_data* sd = &subchannel_list->subchannels[i];
-    grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd,
-                                             "subchannel_list_destroy");
+    grpc_lb_subchannel_data_unref_subchannel(sd, "subchannel_list_destroy");
   }
   gpr_free(subchannel_list->subchannels);
   gpr_free(subchannel_list);
@@ -200,8 +196,7 @@
   }
 }
 
-void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_lb_subchannel_list* subchannel_list,
+void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
                                    const char* reason) {
   const bool done = gpr_unref(&subchannel_list->refcount);
   if (subchannel_list->tracer->enabled()) {
@@ -212,7 +207,7 @@
             reason);
   }
   if (done) {
-    subchannel_list_destroy(exec_ctx, subchannel_list);
+    subchannel_list_destroy(subchannel_list);
   }
 }
 
@@ -223,14 +218,13 @@
 }
 
 void grpc_lb_subchannel_list_unref_for_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
-    const char* reason) {
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, subchannel_list->policy, reason);
-  grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
+    grpc_lb_subchannel_list* subchannel_list, const char* reason) {
+  GRPC_LB_POLICY_WEAK_UNREF(subchannel_list->policy, reason);
+  grpc_lb_subchannel_list_unref(subchannel_list, reason);
 }
 
 static void subchannel_data_cancel_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd, const char* reason) {
+    grpc_lb_subchannel_data* sd, const char* reason) {
   if (sd->subchannel_list->tracer->enabled()) {
     gpr_log(GPR_DEBUG,
             "[%s %p] subchannel list %p index %" PRIuPTR " of %" PRIuPTR
@@ -240,14 +234,12 @@
             (size_t)(sd - sd->subchannel_list->subchannels),
             sd->subchannel_list->num_subchannels, sd->subchannel, reason);
   }
-  grpc_subchannel_notify_on_state_change(exec_ctx, sd->subchannel, nullptr,
-                                         nullptr,
+  grpc_subchannel_notify_on_state_change(sd->subchannel, nullptr, nullptr,
                                          &sd->connectivity_changed_closure);
 }
 
 void grpc_lb_subchannel_list_shutdown_and_unref(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
-    const char* reason) {
+    grpc_lb_subchannel_list* subchannel_list, const char* reason) {
   if (subchannel_list->tracer->enabled()) {
     gpr_log(GPR_DEBUG, "[%s %p] Shutting down subchannel_list %p (%s)",
             subchannel_list->tracer->name(), subchannel_list->policy,
@@ -261,10 +253,10 @@
     // the callback is responsible for unreffing the subchannel.
     // Otherwise, unref the subchannel directly.
     if (sd->connectivity_notification_pending) {
-      subchannel_data_cancel_connectivity_watch(exec_ctx, sd, reason);
+      subchannel_data_cancel_connectivity_watch(sd, reason);
     } else if (sd->subchannel != nullptr) {
-      grpc_lb_subchannel_data_unref_subchannel(exec_ctx, sd, reason);
+      grpc_lb_subchannel_data_unref_subchannel(sd, reason);
     }
   }
-  grpc_lb_subchannel_list_unref(exec_ctx, subchannel_list, reason);
+  grpc_lb_subchannel_list_unref(subchannel_list, reason);
 }
diff --git a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
index 6538bd0..0f8cea9 100644
--- a/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
+++ b/src/core/ext/filters/client_channel/lb_policy/subchannel_list.h
@@ -36,10 +36,6 @@
 // round_robin that could be refactored and moved here.  In a future PR,
 // need to clean this up.
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_lb_subchannel_list grpc_lb_subchannel_list;
 
 typedef struct {
@@ -69,8 +65,7 @@
 } grpc_lb_subchannel_data;
 
 /// Unrefs the subchannel contained in sd.
-void grpc_lb_subchannel_data_unref_subchannel(grpc_exec_ctx* exec_ctx,
-                                              grpc_lb_subchannel_data* sd,
+void grpc_lb_subchannel_data_unref_subchannel(grpc_lb_subchannel_data* sd,
                                               const char* reason);
 
 /// Starts watching the connectivity state of the subchannel.
@@ -78,11 +73,11 @@
 /// grpc_lb_subchannel_data_stop_connectivity_watch() or again call
 /// grpc_lb_subchannel_data_start_connectivity_watch().
 void grpc_lb_subchannel_data_start_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
+    grpc_lb_subchannel_data* sd);
 
 /// Stops watching the connectivity state of the subchannel.
 void grpc_lb_subchannel_data_stop_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_data* sd);
+    grpc_lb_subchannel_data* sd);
 
 struct grpc_lb_subchannel_list {
   /** backpointer to owning policy */
@@ -121,15 +116,14 @@
 };
 
 grpc_lb_subchannel_list* grpc_lb_subchannel_list_create(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
+    grpc_lb_policy* p, grpc_core::TraceFlag* tracer,
     const grpc_lb_addresses* addresses, const grpc_lb_policy_args* args,
     grpc_iomgr_cb_func connectivity_changed_cb);
 
 void grpc_lb_subchannel_list_ref(grpc_lb_subchannel_list* subchannel_list,
                                  const char* reason);
 
-void grpc_lb_subchannel_list_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_lb_subchannel_list* subchannel_list,
+void grpc_lb_subchannel_list_unref(grpc_lb_subchannel_list* subchannel_list,
                                    const char* reason);
 
 /// Takes and releases refs needed for a connectivity notification.
@@ -137,17 +131,11 @@
 void grpc_lb_subchannel_list_ref_for_connectivity_watch(
     grpc_lb_subchannel_list* subchannel_list, const char* reason);
 void grpc_lb_subchannel_list_unref_for_connectivity_watch(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
-    const char* reason);
+    grpc_lb_subchannel_list* subchannel_list, const char* reason);
 
 /// Mark subchannel_list as discarded. Unsubscribes all its subchannels. The
 /// connectivity state notification callback will ultimately unref it.
 void grpc_lb_subchannel_list_shutdown_and_unref(
-    grpc_exec_ctx* exec_ctx, grpc_lb_subchannel_list* subchannel_list,
-    const char* reason);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_lb_subchannel_list* subchannel_list, const char* reason);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_SUBCHANNEL_LIST_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.cc b/src/core/ext/filters/client_channel/lb_policy_factory.cc
index d43f9fd..dbf69fd 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.cc
@@ -112,13 +112,11 @@
   return 0;
 }
 
-void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
-                               grpc_lb_addresses* addresses) {
+void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses) {
   for (size_t i = 0; i < addresses->num_addresses; ++i) {
     gpr_free(addresses->addresses[i].balancer_name);
     if (addresses->addresses[i].user_data != nullptr) {
-      addresses->user_data_vtable->destroy(exec_ctx,
-                                           addresses->addresses[i].user_data);
+      addresses->user_data_vtable->destroy(addresses->addresses[i].user_data);
     }
   }
   gpr_free(addresses->addresses);
@@ -128,8 +126,8 @@
 static void* lb_addresses_copy(void* addresses) {
   return grpc_lb_addresses_copy((grpc_lb_addresses*)addresses);
 }
-static void lb_addresses_destroy(grpc_exec_ctx* exec_ctx, void* addresses) {
-  grpc_lb_addresses_destroy(exec_ctx, (grpc_lb_addresses*)addresses);
+static void lb_addresses_destroy(void* addresses) {
+  grpc_lb_addresses_destroy((grpc_lb_addresses*)addresses);
 }
 static int lb_addresses_cmp(void* addresses1, void* addresses2) {
   return grpc_lb_addresses_cmp((grpc_lb_addresses*)addresses1,
@@ -162,8 +160,7 @@
 }
 
 grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy_factory* factory,
-    grpc_lb_policy_args* args) {
+    grpc_lb_policy_factory* factory, grpc_lb_policy_args* args) {
   if (factory == nullptr) return nullptr;
-  return factory->vtable->create_lb_policy(exec_ctx, factory, args);
+  return factory->vtable->create_lb_policy(factory, args);
 }
diff --git a/src/core/ext/filters/client_channel/lb_policy_factory.h b/src/core/ext/filters/client_channel/lb_policy_factory.h
index 360a42b..9da231b 100644
--- a/src/core/ext/filters/client_channel/lb_policy_factory.h
+++ b/src/core/ext/filters/client_channel/lb_policy_factory.h
@@ -29,10 +29,6 @@
 // Channel arg key for grpc_lb_addresses.
 #define GRPC_ARG_LB_ADDRESSES "grpc.lb_addresses"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_lb_policy_factory grpc_lb_policy_factory;
 typedef struct grpc_lb_policy_factory_vtable grpc_lb_policy_factory_vtable;
 
@@ -54,7 +50,7 @@
 
 typedef struct grpc_lb_user_data_vtable {
   void* (*copy)(void*);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, void*);
+  void (*destroy)(void*);
   int (*cmp)(void*, void*);
 } grpc_lb_user_data_vtable;
 
@@ -95,8 +91,7 @@
                           const grpc_lb_addresses* addresses2);
 
 /** Destroys \a addresses. */
-void grpc_lb_addresses_destroy(grpc_exec_ctx* exec_ctx,
-                               grpc_lb_addresses* addresses);
+void grpc_lb_addresses_destroy(grpc_lb_addresses* addresses);
 
 /** Returns a channel arg containing \a addresses. */
 grpc_arg grpc_lb_addresses_create_channel_arg(
@@ -118,8 +113,7 @@
   void (*unref)(grpc_lb_policy_factory* factory);
 
   /** Implementation of grpc_lb_policy_factory_create_lb_policy */
-  grpc_lb_policy* (*create_lb_policy)(grpc_exec_ctx* exec_ctx,
-                                      grpc_lb_policy_factory* factory,
+  grpc_lb_policy* (*create_lb_policy)(grpc_lb_policy_factory* factory,
                                       grpc_lb_policy_args* args);
 
   /** Name for the LB policy this factory implements */
@@ -131,11 +125,6 @@
 
 /** Create a lb_policy instance. */
 grpc_lb_policy* grpc_lb_policy_factory_create_lb_policy(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy_factory* factory,
-    grpc_lb_policy_args* args);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_lb_policy_factory* factory, grpc_lb_policy_args* args);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.cc b/src/core/ext/filters/client_channel/lb_policy_registry.cc
index 6e710e8..edd0330 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.cc
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.cc
@@ -61,10 +61,10 @@
   return nullptr;
 }
 
-grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
+grpc_lb_policy* grpc_lb_policy_create(const char* name,
                                       grpc_lb_policy_args* args) {
   grpc_lb_policy_factory* factory = lookup_factory(name);
   grpc_lb_policy* lb_policy =
-      grpc_lb_policy_factory_create_lb_policy(exec_ctx, factory, args);
+      grpc_lb_policy_factory_create_lb_policy(factory, args);
   return lb_policy;
 }
diff --git a/src/core/ext/filters/client_channel/lb_policy_registry.h b/src/core/ext/filters/client_channel/lb_policy_registry.h
index 055f751..5aff793 100644
--- a/src/core/ext/filters/client_channel/lb_policy_registry.h
+++ b/src/core/ext/filters/client_channel/lb_policy_registry.h
@@ -22,10 +22,6 @@
 #include "src/core/ext/filters/client_channel/lb_policy_factory.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Initialize the registry and set \a default_factory as the factory to be
  * returned when no name is provided in a lookup */
 void grpc_lb_policy_registry_init(void);
@@ -38,11 +34,7 @@
  *
  * If \a name is NULL, the default factory from \a grpc_lb_policy_registry_init
  * will be returned. */
-grpc_lb_policy* grpc_lb_policy_create(grpc_exec_ctx* exec_ctx, const char* name,
+grpc_lb_policy* grpc_lb_policy_create(const char* name,
                                       grpc_lb_policy_args* args);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_LB_POLICY_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/parse_address.h b/src/core/ext/filters/client_channel/parse_address.h
index b45859f..ca0a0d1 100644
--- a/src/core/ext/filters/client_channel/parse_address.h
+++ b/src/core/ext/filters/client_channel/parse_address.h
@@ -24,10 +24,6 @@
 #include "src/core/ext/filters/client_channel/uri_parser.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Populate \a resolved_addr from \a uri, whose path is expected to contain a
  * unix socket path. Returns true upon success. */
 bool grpc_parse_unix(const grpc_uri* uri, grpc_resolved_address* resolved_addr);
@@ -49,8 +45,4 @@
 bool grpc_parse_ipv6_hostport(const char* hostport, grpc_resolved_address* addr,
                               bool log_errors);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PARSE_ADDRESS_H */
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.cc b/src/core/ext/filters/client_channel/proxy_mapper.cc
index c6ea5fc..be85cfc 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.cc
+++ b/src/core/ext/filters/client_channel/proxy_mapper.cc
@@ -23,24 +23,22 @@
   mapper->vtable = vtable;
 }
 
-bool grpc_proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
-                                grpc_proxy_mapper* mapper,
+bool grpc_proxy_mapper_map_name(grpc_proxy_mapper* mapper,
                                 const char* server_uri,
                                 const grpc_channel_args* args,
                                 char** name_to_resolve,
                                 grpc_channel_args** new_args) {
-  return mapper->vtable->map_name(exec_ctx, mapper, server_uri, args,
-                                  name_to_resolve, new_args);
+  return mapper->vtable->map_name(mapper, server_uri, args, name_to_resolve,
+                                  new_args);
 }
 
-bool grpc_proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
-                                   grpc_proxy_mapper* mapper,
+bool grpc_proxy_mapper_map_address(grpc_proxy_mapper* mapper,
                                    const grpc_resolved_address* address,
                                    const grpc_channel_args* args,
                                    grpc_resolved_address** new_address,
                                    grpc_channel_args** new_args) {
-  return mapper->vtable->map_address(exec_ctx, mapper, address, args,
-                                     new_address, new_args);
+  return mapper->vtable->map_address(mapper, address, args, new_address,
+                                     new_args);
 }
 
 void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper) {
diff --git a/src/core/ext/filters/client_channel/proxy_mapper.h b/src/core/ext/filters/client_channel/proxy_mapper.h
index bb8259f..ce3e65e 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper.h
@@ -25,10 +25,6 @@
 
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_proxy_mapper grpc_proxy_mapper;
 
 typedef struct {
@@ -36,14 +32,14 @@
   /// If no proxy is needed, returns false.
   /// Otherwise, sets \a name_to_resolve, optionally sets \a new_args,
   /// and returns true.
-  bool (*map_name)(grpc_exec_ctx* exec_ctx, grpc_proxy_mapper* mapper,
-                   const char* server_uri, const grpc_channel_args* args,
-                   char** name_to_resolve, grpc_channel_args** new_args);
+  bool (*map_name)(grpc_proxy_mapper* mapper, const char* server_uri,
+                   const grpc_channel_args* args, char** name_to_resolve,
+                   grpc_channel_args** new_args);
   /// Determines the proxy address to use to contact \a address.
   /// If no proxy is needed, returns false.
   /// Otherwise, sets \a new_address, optionally sets \a new_args, and
   /// returns true.
-  bool (*map_address)(grpc_exec_ctx* exec_ctx, grpc_proxy_mapper* mapper,
+  bool (*map_address)(grpc_proxy_mapper* mapper,
                       const grpc_resolved_address* address,
                       const grpc_channel_args* args,
                       grpc_resolved_address** new_address,
@@ -59,15 +55,13 @@
 void grpc_proxy_mapper_init(const grpc_proxy_mapper_vtable* vtable,
                             grpc_proxy_mapper* mapper);
 
-bool grpc_proxy_mapper_map_name(grpc_exec_ctx* exec_ctx,
-                                grpc_proxy_mapper* mapper,
+bool grpc_proxy_mapper_map_name(grpc_proxy_mapper* mapper,
                                 const char* server_uri,
                                 const grpc_channel_args* args,
                                 char** name_to_resolve,
                                 grpc_channel_args** new_args);
 
-bool grpc_proxy_mapper_map_address(grpc_exec_ctx* exec_ctx,
-                                   grpc_proxy_mapper* mapper,
+bool grpc_proxy_mapper_map_address(grpc_proxy_mapper* mapper,
                                    const grpc_resolved_address* address,
                                    const grpc_channel_args* args,
                                    grpc_resolved_address** new_address,
@@ -75,8 +69,4 @@
 
 void grpc_proxy_mapper_destroy(grpc_proxy_mapper* mapper);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_H */
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
index 09967ee..51778a2 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.cc
@@ -46,14 +46,13 @@
   ++list->num_mappers;
 }
 
-static bool grpc_proxy_mapper_list_map_name(grpc_exec_ctx* exec_ctx,
-                                            grpc_proxy_mapper_list* list,
+static bool grpc_proxy_mapper_list_map_name(grpc_proxy_mapper_list* list,
                                             const char* server_uri,
                                             const grpc_channel_args* args,
                                             char** name_to_resolve,
                                             grpc_channel_args** new_args) {
   for (size_t i = 0; i < list->num_mappers; ++i) {
-    if (grpc_proxy_mapper_map_name(exec_ctx, list->list[i], server_uri, args,
+    if (grpc_proxy_mapper_map_name(list->list[i], server_uri, args,
                                    name_to_resolve, new_args)) {
       return true;
     }
@@ -62,12 +61,12 @@
 }
 
 static bool grpc_proxy_mapper_list_map_address(
-    grpc_exec_ctx* exec_ctx, grpc_proxy_mapper_list* list,
-    const grpc_resolved_address* address, const grpc_channel_args* args,
-    grpc_resolved_address** new_address, grpc_channel_args** new_args) {
+    grpc_proxy_mapper_list* list, const grpc_resolved_address* address,
+    const grpc_channel_args* args, grpc_resolved_address** new_address,
+    grpc_channel_args** new_args) {
   for (size_t i = 0; i < list->num_mappers; ++i) {
-    if (grpc_proxy_mapper_map_address(exec_ctx, list->list[i], address, args,
-                                      new_address, new_args)) {
+    if (grpc_proxy_mapper_map_address(list->list[i], address, args, new_address,
+                                      new_args)) {
       return true;
     }
   }
@@ -105,20 +104,17 @@
   grpc_proxy_mapper_list_register(&g_proxy_mapper_list, at_start, mapper);
 }
 
-bool grpc_proxy_mappers_map_name(grpc_exec_ctx* exec_ctx,
-                                 const char* server_uri,
+bool grpc_proxy_mappers_map_name(const char* server_uri,
                                  const grpc_channel_args* args,
                                  char** name_to_resolve,
                                  grpc_channel_args** new_args) {
-  return grpc_proxy_mapper_list_map_name(exec_ctx, &g_proxy_mapper_list,
-                                         server_uri, args, name_to_resolve,
-                                         new_args);
+  return grpc_proxy_mapper_list_map_name(&g_proxy_mapper_list, server_uri, args,
+                                         name_to_resolve, new_args);
 }
-bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx,
-                                    const grpc_resolved_address* address,
+bool grpc_proxy_mappers_map_address(const grpc_resolved_address* address,
                                     const grpc_channel_args* args,
                                     grpc_resolved_address** new_address,
                                     grpc_channel_args** new_args) {
-  return grpc_proxy_mapper_list_map_address(
-      exec_ctx, &g_proxy_mapper_list, address, args, new_address, new_args);
+  return grpc_proxy_mapper_list_map_address(&g_proxy_mapper_list, address, args,
+                                            new_address, new_args);
 }
diff --git a/src/core/ext/filters/client_channel/proxy_mapper_registry.h b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
index 39c607c..2ad6c04 100644
--- a/src/core/ext/filters/client_channel/proxy_mapper_registry.h
+++ b/src/core/ext/filters/client_channel/proxy_mapper_registry.h
@@ -21,10 +21,6 @@
 
 #include "src/core/ext/filters/client_channel/proxy_mapper.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_proxy_mapper_registry_init();
 void grpc_proxy_mapper_registry_shutdown();
 
@@ -33,20 +29,14 @@
 /// the list.  Otherwise, it will be added to the end.
 void grpc_proxy_mapper_register(bool at_start, grpc_proxy_mapper* mapper);
 
-bool grpc_proxy_mappers_map_name(grpc_exec_ctx* exec_ctx,
-                                 const char* server_uri,
+bool grpc_proxy_mappers_map_name(const char* server_uri,
                                  const grpc_channel_args* args,
                                  char** name_to_resolve,
                                  grpc_channel_args** new_args);
 
-bool grpc_proxy_mappers_map_address(grpc_exec_ctx* exec_ctx,
-                                    const grpc_resolved_address* address,
+bool grpc_proxy_mappers_map_address(const grpc_resolved_address* address,
                                     const grpc_channel_args* args,
                                     grpc_resolved_address** new_address,
                                     grpc_channel_args** new_args);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_PROXY_MAPPER_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/resolver.cc b/src/core/ext/filters/client_channel/resolver.cc
index c16b151..ff54e71 100644
--- a/src/core/ext/filters/client_channel/resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver.cc
@@ -46,8 +46,8 @@
 }
 
 #ifndef NDEBUG
-void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
-                         const char* file, int line, const char* reason) {
+void grpc_resolver_unref(grpc_resolver* resolver, const char* file, int line,
+                         const char* reason) {
   if (grpc_trace_resolver_refcount.enabled()) {
     gpr_atm old_refs = gpr_atm_no_barrier_load(&resolver->refs.count);
     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -55,27 +55,25 @@
             old_refs, old_refs - 1, reason);
   }
 #else
-void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver) {
+void grpc_resolver_unref(grpc_resolver* resolver) {
 #endif
   if (gpr_unref(&resolver->refs)) {
     grpc_combiner* combiner = resolver->combiner;
-    resolver->vtable->destroy(exec_ctx, resolver);
-    GRPC_COMBINER_UNREF(exec_ctx, combiner, "resolver");
+    resolver->vtable->destroy(resolver);
+    GRPC_COMBINER_UNREF(combiner, "resolver");
   }
 }
 
-void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_resolver* resolver) {
-  resolver->vtable->shutdown_locked(exec_ctx, resolver);
+void grpc_resolver_shutdown_locked(grpc_resolver* resolver) {
+  resolver->vtable->shutdown_locked(resolver);
 }
 
-void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                            grpc_resolver* resolver) {
-  resolver->vtable->channel_saw_error_locked(exec_ctx, resolver);
+void grpc_resolver_channel_saw_error_locked(grpc_resolver* resolver) {
+  resolver->vtable->channel_saw_error_locked(resolver);
 }
 
-void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+void grpc_resolver_next_locked(grpc_resolver* resolver,
                                grpc_channel_args** result,
                                grpc_closure* on_complete) {
-  resolver->vtable->next_locked(exec_ctx, resolver, result, on_complete);
+  resolver->vtable->next_locked(resolver, result, on_complete);
 }
diff --git a/src/core/ext/filters/client_channel/resolver.h b/src/core/ext/filters/client_channel/resolver.h
index b5806ad..f6a4af0 100644
--- a/src/core/ext/filters/client_channel/resolver.h
+++ b/src/core/ext/filters/client_channel/resolver.h
@@ -22,10 +22,6 @@
 #include "src/core/ext/filters/client_channel/subchannel.h"
 #include "src/core/lib/iomgr/iomgr.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_resolver grpc_resolver;
 typedef struct grpc_resolver_vtable grpc_resolver_vtable;
 
@@ -39,43 +35,40 @@
 };
 
 struct grpc_resolver_vtable {
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
-  void (*shutdown_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver);
-  void (*channel_saw_error_locked)(grpc_exec_ctx* exec_ctx,
-                                   grpc_resolver* resolver);
-  void (*next_locked)(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
-                      grpc_channel_args** result, grpc_closure* on_complete);
+  void (*destroy)(grpc_resolver* resolver);
+  void (*shutdown_locked)(grpc_resolver* resolver);
+  void (*channel_saw_error_locked)(grpc_resolver* resolver);
+  void (*next_locked)(grpc_resolver* resolver, grpc_channel_args** result,
+                      grpc_closure* on_complete);
 };
 
 #ifndef NDEBUG
 #define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_RESOLVER_UNREF(e, p, r) \
-  grpc_resolver_unref((e), (p), __FILE__, __LINE__, (r))
+#define GRPC_RESOLVER_UNREF(p, r) \
+  grpc_resolver_unref((p), __FILE__, __LINE__, (r))
 void grpc_resolver_ref(grpc_resolver* policy, const char* file, int line,
                        const char* reason);
-void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy,
-                         const char* file, int line, const char* reason);
+void grpc_resolver_unref(grpc_resolver* policy, const char* file, int line,
+                         const char* reason);
 #else
 #define GRPC_RESOLVER_REF(p, r) grpc_resolver_ref((p))
-#define GRPC_RESOLVER_UNREF(e, p, r) grpc_resolver_unref((e), (p))
+#define GRPC_RESOLVER_UNREF(p, r) grpc_resolver_unref((p))
 void grpc_resolver_ref(grpc_resolver* policy);
-void grpc_resolver_unref(grpc_exec_ctx* exec_ctx, grpc_resolver* policy);
+void grpc_resolver_unref(grpc_resolver* policy);
 #endif
 
 void grpc_resolver_init(grpc_resolver* resolver,
                         const grpc_resolver_vtable* vtable,
                         grpc_combiner* combiner);
 
-void grpc_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_resolver* resolver);
+void grpc_resolver_shutdown_locked(grpc_resolver* resolver);
 
 /** Notification that the channel has seen an error on some address.
     Can be used as a hint that re-resolution is desirable soon.
 
     Must be called from the combiner passed as a resolver_arg at construction
     time.*/
-void grpc_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                            grpc_resolver* resolver);
+void grpc_resolver_channel_saw_error_locked(grpc_resolver* resolver);
 
 /** Get the next result from the resolver.  Expected to set \a *result with
     new channel args and then schedule \a on_complete for execution.
@@ -85,12 +78,8 @@
 
     Must be called from the combiner passed as a resolver_arg at construction
     time.*/
-void grpc_resolver_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+void grpc_resolver_next_locked(grpc_resolver* resolver,
                                grpc_channel_args** result,
                                grpc_closure* on_complete);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_H */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
index 07737b1..4659a5f 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc
@@ -40,10 +40,10 @@
 #include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/json/json.h"
 #include "src/core/lib/support/env.h"
+#include "src/core/lib/support/manual_constructor.h"
 #include "src/core/lib/support/string.h"
 #include "src/core/lib/transport/service_config.h"
 
-#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
 #define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
 #define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6
 #define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
@@ -89,7 +89,7 @@
   bool have_retry_timer;
   grpc_timer retry_timer;
   /** retry backoff state */
-  grpc_backoff backoff_state;
+  grpc_core::ManualConstructor<grpc_core::BackOff> backoff;
 
   /** currently resolving addresses */
   grpc_lb_addresses* lb_addresses;
@@ -97,17 +97,14 @@
   char* service_config_json;
 } ares_dns_resolver;
 
-static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void dns_ares_destroy(grpc_resolver* r);
 
-static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
-                                            ares_dns_resolver* r);
-static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                              ares_dns_resolver* r);
+static void dns_ares_start_resolving_locked(ares_dns_resolver* r);
+static void dns_ares_maybe_finish_next_locked(ares_dns_resolver* r);
 
-static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                              grpc_resolver* r);
-static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+static void dns_ares_shutdown_locked(grpc_resolver* r);
+static void dns_ares_channel_saw_error_locked(grpc_resolver* r);
+static void dns_ares_next_locked(grpc_resolver* r,
                                  grpc_channel_args** target_result,
                                  grpc_closure* on_complete);
 
@@ -115,43 +112,39 @@
     dns_ares_destroy, dns_ares_shutdown_locked,
     dns_ares_channel_saw_error_locked, dns_ares_next_locked};
 
-static void dns_ares_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_resolver* resolver) {
+static void dns_ares_shutdown_locked(grpc_resolver* resolver) {
   ares_dns_resolver* r = (ares_dns_resolver*)resolver;
   if (r->have_retry_timer) {
-    grpc_timer_cancel(exec_ctx, &r->retry_timer);
+    grpc_timer_cancel(&r->retry_timer);
   }
   if (r->pending_request != nullptr) {
-    grpc_cancel_ares_request(exec_ctx, r->pending_request);
+    grpc_cancel_ares_request(r->pending_request);
   }
   if (r->next_completion != nullptr) {
     *r->target_result = nullptr;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, r->next_completion,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                               "Resolver Shutdown"));
     r->next_completion = nullptr;
   }
 }
 
-static void dns_ares_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                              grpc_resolver* resolver) {
+static void dns_ares_channel_saw_error_locked(grpc_resolver* resolver) {
   ares_dns_resolver* r = (ares_dns_resolver*)resolver;
   if (!r->resolving) {
-    grpc_backoff_reset(&r->backoff_state);
-    dns_ares_start_resolving_locked(exec_ctx, r);
+    r->backoff->Reset();
+    dns_ares_start_resolving_locked(r);
   }
 }
 
-static void dns_ares_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void dns_ares_on_retry_timer_locked(void* arg, grpc_error* error) {
   ares_dns_resolver* r = (ares_dns_resolver*)arg;
   r->have_retry_timer = false;
   if (error == GRPC_ERROR_NONE) {
     if (!r->resolving) {
-      dns_ares_start_resolving_locked(exec_ctx, r);
+      dns_ares_start_resolving_locked(r);
     }
   }
-  GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
+  GRPC_RESOLVER_UNREF(&r->base, "retry-timer");
 }
 
 static bool value_in_json_array(grpc_json* array, const char* value) {
@@ -226,8 +219,7 @@
   return service_config;
 }
 
-static void dns_ares_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void dns_ares_on_resolved_locked(void* arg, grpc_error* error) {
   ares_dns_resolver* r = (ares_dns_resolver*)arg;
   grpc_channel_args* result = nullptr;
   GPR_ASSERT(r->resolving);
@@ -268,13 +260,12 @@
         num_args_to_add);
     if (service_config != nullptr) grpc_service_config_destroy(service_config);
     gpr_free(service_config_string);
-    grpc_lb_addresses_destroy(exec_ctx, r->lb_addresses);
+    grpc_lb_addresses_destroy(r->lb_addresses);
   } else {
     const char* msg = grpc_error_string(error);
     gpr_log(GPR_DEBUG, "dns resolution failed: %s", msg);
-    grpc_millis next_try =
-        grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
-    grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+    grpc_millis next_try = r->backoff->Step();
+    grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
     gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
             grpc_error_string(error));
     GPR_ASSERT(!r->have_retry_timer);
@@ -285,20 +276,19 @@
     } else {
       gpr_log(GPR_DEBUG, "retrying immediately");
     }
-    grpc_timer_init(exec_ctx, &r->retry_timer, next_try,
+    grpc_timer_init(&r->retry_timer, next_try,
                     &r->dns_ares_on_retry_timer_locked);
   }
   if (r->resolved_result != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+    grpc_channel_args_destroy(r->resolved_result);
   }
   r->resolved_result = result;
   r->resolved_version++;
-  dns_ares_maybe_finish_next_locked(exec_ctx, r);
-  GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
+  dns_ares_maybe_finish_next_locked(r);
+  GRPC_RESOLVER_UNREF(&r->base, "dns-resolving");
 }
 
-static void dns_ares_next_locked(grpc_exec_ctx* exec_ctx,
-                                 grpc_resolver* resolver,
+static void dns_ares_next_locked(grpc_resolver* resolver,
                                  grpc_channel_args** target_result,
                                  grpc_closure* on_complete) {
   gpr_log(GPR_DEBUG, "dns_ares_next is called.");
@@ -307,57 +297,54 @@
   r->next_completion = on_complete;
   r->target_result = target_result;
   if (r->resolved_version == 0 && !r->resolving) {
-    grpc_backoff_reset(&r->backoff_state);
-    dns_ares_start_resolving_locked(exec_ctx, r);
+    r->backoff->Reset();
+    dns_ares_start_resolving_locked(r);
   } else {
-    dns_ares_maybe_finish_next_locked(exec_ctx, r);
+    dns_ares_maybe_finish_next_locked(r);
   }
 }
 
-static void dns_ares_start_resolving_locked(grpc_exec_ctx* exec_ctx,
-                                            ares_dns_resolver* r) {
+static void dns_ares_start_resolving_locked(ares_dns_resolver* r) {
   GRPC_RESOLVER_REF(&r->base, "dns-resolving");
   GPR_ASSERT(!r->resolving);
   r->resolving = true;
   r->lb_addresses = nullptr;
   r->service_config_json = nullptr;
   r->pending_request = grpc_dns_lookup_ares(
-      exec_ctx, r->dns_server, r->name_to_resolve, r->default_port,
-      r->interested_parties, &r->dns_ares_on_resolved_locked, &r->lb_addresses,
+      r->dns_server, r->name_to_resolve, r->default_port, r->interested_parties,
+      &r->dns_ares_on_resolved_locked, &r->lb_addresses,
       true /* check_grpclb */,
       r->request_service_config ? &r->service_config_json : nullptr);
 }
 
-static void dns_ares_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                              ares_dns_resolver* r) {
+static void dns_ares_maybe_finish_next_locked(ares_dns_resolver* r) {
   if (r->next_completion != nullptr &&
       r->resolved_version != r->published_version) {
     *r->target_result = r->resolved_result == nullptr
                             ? nullptr
                             : grpc_channel_args_copy(r->resolved_result);
     gpr_log(GPR_DEBUG, "dns_ares_maybe_finish_next_locked");
-    GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = nullptr;
     r->published_version = r->resolved_version;
   }
 }
 
-static void dns_ares_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+static void dns_ares_destroy(grpc_resolver* gr) {
   gpr_log(GPR_DEBUG, "dns_ares_destroy");
   ares_dns_resolver* r = (ares_dns_resolver*)gr;
   if (r->resolved_result != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+    grpc_channel_args_destroy(r->resolved_result);
   }
-  grpc_pollset_set_destroy(exec_ctx, r->interested_parties);
+  grpc_pollset_set_destroy(r->interested_parties);
   gpr_free(r->dns_server);
   gpr_free(r->name_to_resolve);
   gpr_free(r->default_port);
-  grpc_channel_args_destroy(exec_ctx, r->channel_args);
+  grpc_channel_args_destroy(r->channel_args);
   gpr_free(r);
 }
 
-static grpc_resolver* dns_ares_create(grpc_exec_ctx* exec_ctx,
-                                      grpc_resolver_args* args,
+static grpc_resolver* dns_ares_create(grpc_resolver_args* args,
                                       const char* default_port) {
   /* Get name from args. */
   const char* path = args->uri->path;
@@ -378,14 +365,15 @@
       arg, (grpc_integer_options){false, false, true});
   r->interested_parties = grpc_pollset_set_create();
   if (args->pollset_set != nullptr) {
-    grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
-                                     args->pollset_set);
+    grpc_pollset_set_add_pollset_set(r->interested_parties, args->pollset_set);
   }
-  grpc_backoff_init(
-      &r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
-      GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, GRPC_DNS_RECONNECT_JITTER,
-      GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
-      GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+  grpc_core::BackOff::Options backoff_options;
+  backoff_options
+      .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
+      .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
+      .set_jitter(GRPC_DNS_RECONNECT_JITTER)
+      .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+  r->backoff.Init(grpc_core::BackOff(backoff_options));
   GRPC_CLOSURE_INIT(&r->dns_ares_on_retry_timer_locked,
                     dns_ares_on_retry_timer_locked, r,
                     grpc_combiner_scheduler(r->base.combiner));
@@ -404,9 +392,8 @@
 static void dns_ares_factory_unref(grpc_resolver_factory* factory) {}
 
 static grpc_resolver* dns_factory_create_resolver(
-    grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
-    grpc_resolver_args* args) {
-  return dns_ares_create(exec_ctx, args, "https");
+    grpc_resolver_factory* factory, grpc_resolver_args* args) {
+  return dns_ares_create(args, "https");
 }
 
 static char* dns_ares_factory_get_default_host_name(
@@ -425,7 +412,7 @@
   return &dns_resolver_factory;
 }
 
-extern "C" void grpc_resolver_dns_ares_init(void) {
+void grpc_resolver_dns_ares_init(void) {
   char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
   /* TODO(zyc): Turn on c-ares based resolver by default after the address
      sorter and the CNAME support are added. */
@@ -441,7 +428,7 @@
   gpr_free(resolver);
 }
 
-extern "C" void grpc_resolver_dns_ares_shutdown(void) {
+void grpc_resolver_dns_ares_shutdown(void) {
   char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
   if (resolver != nullptr && gpr_stricmp(resolver, "ares") == 0) {
     grpc_ares_cleanup();
@@ -451,8 +438,8 @@
 
 #else /* GRPC_ARES == 1 && !defined(GRPC_UV) */
 
-extern "C" void grpc_resolver_dns_ares_init(void) {}
+void grpc_resolver_dns_ares_init(void) {}
 
-extern "C" void grpc_resolver_dns_ares_shutdown(void) {}
+void grpc_resolver_dns_ares_shutdown(void) {}
 
 #endif /* GRPC_ARES == 1 && !defined(GRPC_UV) */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
index 0062aa5..ba7dad6 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver.h
@@ -23,17 +23,12 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/iomgr/pollset_set.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_ares_ev_driver grpc_ares_ev_driver;
 
 /* Start \a ev_driver. It will keep working until all IO on its ares_channel is
    done, or grpc_ares_ev_driver_destroy() is called. It may notify the callbacks
    bound to its ares_channel when necessary. */
-void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
-                               grpc_ares_ev_driver* ev_driver);
+void grpc_ares_ev_driver_start(grpc_ares_ev_driver* ev_driver);
 
 /* Returns the ares_channel owned by \a ev_driver. To bind a c-ares query to
    \a ev_driver, use the ares_channel owned by \a ev_driver as the arg of the
@@ -51,12 +46,7 @@
 void grpc_ares_ev_driver_destroy(grpc_ares_ev_driver* ev_driver);
 
 /* Shutdown all the grpc_fds used by \a ev_driver */
-void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
-                                  grpc_ares_ev_driver* ev_driver);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_ares_ev_driver_shutdown(grpc_ares_ev_driver* ev_driver);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_EV_DRIVER_H \
         */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
index 4cb068a..40e2645 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc
@@ -77,8 +77,7 @@
   bool shutting_down;
 };
 
-static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
-                                             grpc_ares_ev_driver* ev_driver);
+static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver);
 
 static grpc_ares_ev_driver* grpc_ares_ev_driver_ref(
     grpc_ares_ev_driver* ev_driver) {
@@ -98,7 +97,7 @@
   }
 }
 
-static void fd_node_destroy(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
+static void fd_node_destroy(fd_node* fdn) {
   gpr_log(GPR_DEBUG, "delete fd: %d", grpc_fd_wrapped_fd(fdn->fd));
   GPR_ASSERT(!fdn->readable_registered);
   GPR_ASSERT(!fdn->writable_registered);
@@ -106,21 +105,20 @@
   /* c-ares library has closed the fd inside grpc_fd. This fd may be picked up
      immediately by another thread, and should not be closed by the following
      grpc_fd_orphan. */
-  grpc_fd_orphan(exec_ctx, fdn->fd, nullptr, nullptr, true /* already_closed */,
+  grpc_fd_orphan(fdn->fd, nullptr, nullptr, true /* already_closed */,
                  "c-ares query finished");
   gpr_free(fdn);
 }
 
-static void fd_node_shutdown(grpc_exec_ctx* exec_ctx, fd_node* fdn) {
+static void fd_node_shutdown(fd_node* fdn) {
   gpr_mu_lock(&fdn->mu);
   fdn->shutting_down = true;
   if (!fdn->readable_registered && !fdn->writable_registered) {
     gpr_mu_unlock(&fdn->mu);
-    fd_node_destroy(exec_ctx, fdn);
+    fd_node_destroy(fdn);
   } else {
     grpc_fd_shutdown(
-        exec_ctx, fdn->fd,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
+        fdn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("c-ares fd shutdown"));
     gpr_mu_unlock(&fdn->mu);
   }
 }
@@ -160,15 +158,13 @@
   grpc_ares_ev_driver_unref(ev_driver);
 }
 
-void grpc_ares_ev_driver_shutdown(grpc_exec_ctx* exec_ctx,
-                                  grpc_ares_ev_driver* ev_driver) {
+void grpc_ares_ev_driver_shutdown(grpc_ares_ev_driver* ev_driver) {
   gpr_mu_lock(&ev_driver->mu);
   ev_driver->shutting_down = true;
   fd_node* fn = ev_driver->fds;
   while (fn != nullptr) {
-    grpc_fd_shutdown(
-        exec_ctx, fn->fd,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("grpc_ares_ev_driver_shutdown"));
+    grpc_fd_shutdown(fn->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                 "grpc_ares_ev_driver_shutdown"));
     fn = fn->next;
   }
   gpr_mu_unlock(&ev_driver->mu);
@@ -199,8 +195,7 @@
   return ioctl(fd, FIONREAD, &bytes_available) == 0 && bytes_available > 0;
 }
 
-static void on_readable_cb(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
+static void on_readable_cb(void* arg, grpc_error* error) {
   fd_node* fdn = (fd_node*)arg;
   grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
   gpr_mu_lock(&fdn->mu);
@@ -208,7 +203,7 @@
   fdn->readable_registered = false;
   if (fdn->shutting_down && !fdn->writable_registered) {
     gpr_mu_unlock(&fdn->mu);
-    fd_node_destroy(exec_ctx, fdn);
+    fd_node_destroy(fdn);
     grpc_ares_ev_driver_unref(ev_driver);
     return;
   }
@@ -229,13 +224,12 @@
     ares_cancel(ev_driver->channel);
   }
   gpr_mu_lock(&ev_driver->mu);
-  grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+  grpc_ares_notify_on_event_locked(ev_driver);
   gpr_mu_unlock(&ev_driver->mu);
   grpc_ares_ev_driver_unref(ev_driver);
 }
 
-static void on_writable_cb(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
+static void on_writable_cb(void* arg, grpc_error* error) {
   fd_node* fdn = (fd_node*)arg;
   grpc_ares_ev_driver* ev_driver = fdn->ev_driver;
   gpr_mu_lock(&fdn->mu);
@@ -243,7 +237,7 @@
   fdn->writable_registered = false;
   if (fdn->shutting_down && !fdn->readable_registered) {
     gpr_mu_unlock(&fdn->mu);
-    fd_node_destroy(exec_ctx, fdn);
+    fd_node_destroy(fdn);
     grpc_ares_ev_driver_unref(ev_driver);
     return;
   }
@@ -262,7 +256,7 @@
     ares_cancel(ev_driver->channel);
   }
   gpr_mu_lock(&ev_driver->mu);
-  grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+  grpc_ares_notify_on_event_locked(ev_driver);
   gpr_mu_unlock(&ev_driver->mu);
   grpc_ares_ev_driver_unref(ev_driver);
 }
@@ -273,8 +267,7 @@
 
 // Get the file descriptors used by the ev_driver's ares channel, register
 // driver_closure with these filedescriptors.
-static void grpc_ares_notify_on_event_locked(grpc_exec_ctx* exec_ctx,
-                                             grpc_ares_ev_driver* ev_driver) {
+static void grpc_ares_notify_on_event_locked(grpc_ares_ev_driver* ev_driver) {
   fd_node* new_list = nullptr;
   if (!ev_driver->shutting_down) {
     ares_socket_t socks[ARES_GETSOCK_MAXNUM];
@@ -300,7 +293,7 @@
                             grpc_schedule_on_exec_ctx);
           GRPC_CLOSURE_INIT(&fdn->write_closure, on_writable_cb, fdn,
                             grpc_schedule_on_exec_ctx);
-          grpc_pollset_set_add_fd(exec_ctx, ev_driver->pollset_set, fdn->fd);
+          grpc_pollset_set_add_fd(ev_driver->pollset_set, fdn->fd);
           gpr_free(fd_name);
         }
         fdn->next = new_list;
@@ -312,7 +305,7 @@
             !fdn->readable_registered) {
           grpc_ares_ev_driver_ref(ev_driver);
           gpr_log(GPR_DEBUG, "notify read on: %d", grpc_fd_wrapped_fd(fdn->fd));
-          grpc_fd_notify_on_read(exec_ctx, fdn->fd, &fdn->read_closure);
+          grpc_fd_notify_on_read(fdn->fd, &fdn->read_closure);
           fdn->readable_registered = true;
         }
         // Register write_closure if the socket is writable and write_closure
@@ -322,7 +315,7 @@
           gpr_log(GPR_DEBUG, "notify write on: %d",
                   grpc_fd_wrapped_fd(fdn->fd));
           grpc_ares_ev_driver_ref(ev_driver);
-          grpc_fd_notify_on_write(exec_ctx, fdn->fd, &fdn->write_closure);
+          grpc_fd_notify_on_write(fdn->fd, &fdn->write_closure);
           fdn->writable_registered = true;
         }
         gpr_mu_unlock(&fdn->mu);
@@ -335,7 +328,7 @@
   while (ev_driver->fds != nullptr) {
     fd_node* cur = ev_driver->fds;
     ev_driver->fds = ev_driver->fds->next;
-    fd_node_shutdown(exec_ctx, cur);
+    fd_node_shutdown(cur);
   }
   ev_driver->fds = new_list;
   // If the ev driver has no working fd, all the tasks are done.
@@ -345,12 +338,11 @@
   }
 }
 
-void grpc_ares_ev_driver_start(grpc_exec_ctx* exec_ctx,
-                               grpc_ares_ev_driver* ev_driver) {
+void grpc_ares_ev_driver_start(grpc_ares_ev_driver* ev_driver) {
   gpr_mu_lock(&ev_driver->mu);
   if (!ev_driver->working) {
     ev_driver->working = true;
-    grpc_ares_notify_on_event_locked(exec_ctx, ev_driver);
+    grpc_ares_notify_on_event_locked(ev_driver);
   }
   gpr_mu_unlock(&ev_driver->mu);
 }
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
index 7846576..3a870b2 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc
@@ -96,24 +96,12 @@
   gpr_ref(&r->pending_queries);
 }
 
-static void grpc_ares_request_unref(grpc_exec_ctx* exec_ctx,
-                                    grpc_ares_request* r) {
+static void grpc_ares_request_unref(grpc_ares_request* r) {
   /* If there are no pending queries, invoke on_done callback and destroy the
      request */
   if (gpr_unref(&r->pending_queries)) {
     /* TODO(zyc): Sort results with RFC6724 before invoking on_done. */
-    if (exec_ctx == nullptr) {
-      /* A new exec_ctx is created here, as the c-ares interface does not
-         provide one in ares_host_callback. It's safe to schedule on_done with
-         the newly created exec_ctx, since the caller has been warned not to
-         acquire locks in on_done. ares_dns_resolver is using combiner to
-         protect resources needed by on_done. */
-      grpc_exec_ctx new_exec_ctx = GRPC_EXEC_CTX_INIT;
-      GRPC_CLOSURE_SCHED(&new_exec_ctx, r->on_done, r->error);
-      grpc_exec_ctx_finish(&new_exec_ctx);
-    } else {
-      GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, r->error);
-    }
+    GRPC_CLOSURE_SCHED(r->on_done, r->error);
     gpr_mu_destroy(&r->mu);
     grpc_ares_ev_driver_destroy(r->ev_driver);
     gpr_free(r);
@@ -133,9 +121,8 @@
   return hr;
 }
 
-static void destroy_hostbyname_request(grpc_exec_ctx* exec_ctx,
-                                       grpc_ares_hostbyname_request* hr) {
-  grpc_ares_request_unref(exec_ctx, hr->parent_request);
+static void destroy_hostbyname_request(grpc_ares_hostbyname_request* hr) {
+  grpc_ares_request_unref(hr->parent_request);
   gpr_free(hr->host);
   gpr_free(hr);
 }
@@ -220,13 +207,13 @@
     }
   }
   gpr_mu_unlock(&r->mu);
-  destroy_hostbyname_request(nullptr, hr);
+  destroy_hostbyname_request(hr);
 }
 
 static void on_srv_query_done_cb(void* arg, int status, int timeouts,
                                  unsigned char* abuf, int alen) {
   grpc_ares_request* r = (grpc_ares_request*)arg;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_log(GPR_DEBUG, "on_query_srv_done_cb");
   if (status == ARES_SUCCESS) {
     gpr_log(GPR_DEBUG, "on_query_srv_done_cb ARES_SUCCESS");
@@ -246,7 +233,7 @@
             r, srv_it->host, htons(srv_it->port), true /* is_balancer */);
         ares_gethostbyname(*channel, hr->host, AF_INET, on_hostbyname_done_cb,
                            hr);
-        grpc_ares_ev_driver_start(&exec_ctx, r->ev_driver);
+        grpc_ares_ev_driver_start(r->ev_driver);
       }
     }
     if (reply != nullptr) {
@@ -264,8 +251,7 @@
       r->error = grpc_error_add_child(error, r->error);
     }
   }
-  grpc_ares_request_unref(&exec_ctx, r);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_ares_request_unref(r);
 }
 
 static const char g_service_config_attribute_prefix[] = "grpc_config=";
@@ -323,14 +309,13 @@
   }
 done:
   gpr_mu_unlock(&r->mu);
-  grpc_ares_request_unref(nullptr, r);
+  grpc_ares_request_unref(r);
 }
 
 static grpc_ares_request* grpc_dns_lookup_ares_impl(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
-    char** service_config_json) {
+    const char* dns_server, const char* name, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json) {
   grpc_error* error = GRPC_ERROR_NONE;
   grpc_ares_hostbyname_request* hr = nullptr;
   grpc_ares_request* r = nullptr;
@@ -437,28 +422,28 @@
     gpr_free(config_name);
   }
   /* TODO(zyc): Handle CNAME records here. */
-  grpc_ares_ev_driver_start(exec_ctx, r->ev_driver);
-  grpc_ares_request_unref(exec_ctx, r);
+  grpc_ares_ev_driver_start(r->ev_driver);
+  grpc_ares_request_unref(r);
   gpr_free(host);
   gpr_free(port);
   return r;
 
 error_cleanup:
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
+  GRPC_CLOSURE_SCHED(on_done, error);
   gpr_free(host);
   gpr_free(port);
   return nullptr;
 }
 
 grpc_ares_request* (*grpc_dns_lookup_ares)(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+    const char* dns_server, const char* name, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** addrs, bool check_grpclb,
     char** service_config_json) = grpc_dns_lookup_ares_impl;
 
-void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {
+void grpc_cancel_ares_request(grpc_ares_request* r) {
   if (grpc_dns_lookup_ares == grpc_dns_lookup_ares_impl) {
-    grpc_ares_ev_driver_shutdown(exec_ctx, r->ev_driver);
+    grpc_ares_ev_driver_shutdown(r->ev_driver);
   }
 }
 
@@ -501,8 +486,7 @@
   grpc_closure on_dns_lookup_done;
 } grpc_resolve_address_ares_request;
 
-static void on_dns_lookup_done_cb(grpc_exec_ctx* exec_ctx, void* arg,
-                                  grpc_error* error) {
+static void on_dns_lookup_done_cb(void* arg, grpc_error* error) {
   grpc_resolve_address_ares_request* r =
       (grpc_resolve_address_ares_request*)arg;
   grpc_resolved_addresses** resolved_addresses = r->addrs_out;
@@ -520,14 +504,12 @@
              &r->lb_addrs->addresses[i].address, sizeof(grpc_resolved_address));
     }
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, r->on_resolve_address_done,
-                     GRPC_ERROR_REF(error));
-  grpc_lb_addresses_destroy(exec_ctx, r->lb_addrs);
+  GRPC_CLOSURE_SCHED(r->on_resolve_address_done, GRPC_ERROR_REF(error));
+  grpc_lb_addresses_destroy(r->lb_addrs);
   gpr_free(r);
 }
 
-static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
-                                           const char* name,
+static void grpc_resolve_address_ares_impl(const char* name,
                                            const char* default_port,
                                            grpc_pollset_set* interested_parties,
                                            grpc_closure* on_done,
@@ -539,14 +521,14 @@
   r->on_resolve_address_done = on_done;
   GRPC_CLOSURE_INIT(&r->on_dns_lookup_done, on_dns_lookup_done_cb, r,
                     grpc_schedule_on_exec_ctx);
-  grpc_dns_lookup_ares(exec_ctx, nullptr /* dns_server */, name, default_port,
+  grpc_dns_lookup_ares(nullptr /* dns_server */, name, default_port,
                        interested_parties, &r->on_dns_lookup_done, &r->lb_addrs,
                        false /* check_grpclb */,
                        nullptr /* service_config_json */);
 }
 
 void (*grpc_resolve_address_ares)(
-    grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+    const char* name, const char* default_port,
     grpc_pollset_set* interested_parties, grpc_closure* on_done,
     grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
 
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
index 6882b7b..86d870e 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.h
@@ -25,10 +25,6 @@
 #include "src/core/lib/iomgr/polling_entity.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_ares_request grpc_ares_request;
 
 /* Asynchronously resolve \a name. Use \a default_port if a port isn't
@@ -36,8 +32,7 @@
    must be called at least once before this function. \a on_done may be
    called directly in this function without being scheduled with \a exec_ctx,
    so it must not try to acquire locks that are being held by the caller. */
-extern void (*grpc_resolve_address_ares)(grpc_exec_ctx* exec_ctx,
-                                         const char* name,
+extern void (*grpc_resolve_address_ares)(const char* name,
                                          const char* default_port,
                                          grpc_pollset_set* interested_parties,
                                          grpc_closure* on_done,
@@ -51,14 +46,13 @@
   scheduled with \a exec_ctx, so it must not try to acquire locks that are
   being held by the caller. */
 extern grpc_ares_request* (*grpc_dns_lookup_ares)(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** addresses, bool check_grpclb,
+    const char* dns_server, const char* name, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** addresses, bool check_grpclb,
     char** service_config_json);
 
 /* Cancel the pending grpc_ares_request \a request */
-void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx,
-                              grpc_ares_request* request);
+void grpc_cancel_ares_request(grpc_ares_request* request);
 
 /* Initialize gRPC ares wrapper. Must be called at least once before
    grpc_resolve_address_ares(). */
@@ -69,9 +63,5 @@
    it has been called the same number of times as grpc_ares_init(). */
 void grpc_ares_cleanup(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_DNS_C_ARES_GRPC_ARES_WRAPPER_H \
         */
diff --git a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
index a68a7c4..a184cf2 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc
@@ -26,34 +26,32 @@
 };
 
 static grpc_ares_request* grpc_dns_lookup_ares_impl(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
-    char** service_config_json) {
+    const char* dns_server, const char* name, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** addrs, bool check_grpclb, char** service_config_json) {
   return NULL;
 }
 
 grpc_ares_request* (*grpc_dns_lookup_ares)(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* name,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** addrs, bool check_grpclb,
+    const char* dns_server, const char* name, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** addrs, bool check_grpclb,
     char** service_config_json) = grpc_dns_lookup_ares_impl;
 
-void grpc_cancel_ares_request(grpc_exec_ctx* exec_ctx, grpc_ares_request* r) {}
+void grpc_cancel_ares_request(grpc_ares_request* r) {}
 
 grpc_error* grpc_ares_init(void) { return GRPC_ERROR_NONE; }
 
 void grpc_ares_cleanup(void) {}
 
-static void grpc_resolve_address_ares_impl(grpc_exec_ctx* exec_ctx,
-                                           const char* name,
+static void grpc_resolve_address_ares_impl(const char* name,
                                            const char* default_port,
                                            grpc_pollset_set* interested_parties,
                                            grpc_closure* on_done,
                                            grpc_resolved_addresses** addrs) {}
 
 void (*grpc_resolve_address_ares)(
-    grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+    const char* name, const char* default_port,
     grpc_pollset_set* interested_parties, grpc_closure* on_done,
     grpc_resolved_addresses** addrs) = grpc_resolve_address_ares_impl;
 
diff --git a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
index 589c748..1c2cfc0 100644
--- a/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc
@@ -33,9 +33,9 @@
 #include "src/core/lib/iomgr/resolve_address.h"
 #include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/support/env.h"
+#include "src/core/lib/support/manual_constructor.h"
 #include "src/core/lib/support/string.h"
 
-#define GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS 1
 #define GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS 1
 #define GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER 1.6
 #define GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS 120
@@ -70,55 +70,48 @@
   grpc_timer retry_timer;
   grpc_closure on_retry;
   /** retry backoff state */
-  grpc_backoff backoff_state;
+  grpc_core::ManualConstructor<grpc_core::BackOff> backoff;
 
   /** currently resolving addresses */
   grpc_resolved_addresses* addresses;
 } dns_resolver;
 
-static void dns_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void dns_destroy(grpc_resolver* r);
 
-static void dns_start_resolving_locked(grpc_exec_ctx* exec_ctx,
-                                       dns_resolver* r);
-static void dns_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                         dns_resolver* r);
+static void dns_start_resolving_locked(dns_resolver* r);
+static void dns_maybe_finish_next_locked(dns_resolver* r);
 
-static void dns_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void dns_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                         grpc_resolver* r);
-static void dns_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
-                            grpc_channel_args** target_result,
+static void dns_shutdown_locked(grpc_resolver* r);
+static void dns_channel_saw_error_locked(grpc_resolver* r);
+static void dns_next_locked(grpc_resolver* r, grpc_channel_args** target_result,
                             grpc_closure* on_complete);
 
 static const grpc_resolver_vtable dns_resolver_vtable = {
     dns_destroy, dns_shutdown_locked, dns_channel_saw_error_locked,
     dns_next_locked};
 
-static void dns_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                grpc_resolver* resolver) {
+static void dns_shutdown_locked(grpc_resolver* resolver) {
   dns_resolver* r = (dns_resolver*)resolver;
   if (r->have_retry_timer) {
-    grpc_timer_cancel(exec_ctx, &r->retry_timer);
+    grpc_timer_cancel(&r->retry_timer);
   }
   if (r->next_completion != nullptr) {
     *r->target_result = nullptr;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, r->next_completion,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                               "Resolver Shutdown"));
     r->next_completion = nullptr;
   }
 }
 
-static void dns_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                         grpc_resolver* resolver) {
+static void dns_channel_saw_error_locked(grpc_resolver* resolver) {
   dns_resolver* r = (dns_resolver*)resolver;
   if (!r->resolving) {
-    grpc_backoff_reset(&r->backoff_state);
-    dns_start_resolving_locked(exec_ctx, r);
+    r->backoff->Reset();
+    dns_start_resolving_locked(r);
   }
 }
 
-static void dns_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* resolver,
+static void dns_next_locked(grpc_resolver* resolver,
                             grpc_channel_args** target_result,
                             grpc_closure* on_complete) {
   dns_resolver* r = (dns_resolver*)resolver;
@@ -126,29 +119,27 @@
   r->next_completion = on_complete;
   r->target_result = target_result;
   if (r->resolved_version == 0 && !r->resolving) {
-    grpc_backoff_reset(&r->backoff_state);
-    dns_start_resolving_locked(exec_ctx, r);
+    r->backoff->Reset();
+    dns_start_resolving_locked(r);
   } else {
-    dns_maybe_finish_next_locked(exec_ctx, r);
+    dns_maybe_finish_next_locked(r);
   }
 }
 
-static void dns_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_error* error) {
+static void dns_on_retry_timer_locked(void* arg, grpc_error* error) {
   dns_resolver* r = (dns_resolver*)arg;
 
   r->have_retry_timer = false;
   if (error == GRPC_ERROR_NONE) {
     if (!r->resolving) {
-      dns_start_resolving_locked(exec_ctx, r);
+      dns_start_resolving_locked(r);
     }
   }
 
-  GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "retry-timer");
+  GRPC_RESOLVER_UNREF(&r->base, "retry-timer");
 }
 
-static void dns_on_resolved_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void dns_on_resolved_locked(void* arg, grpc_error* error) {
   dns_resolver* r = (dns_resolver*)arg;
   grpc_channel_args* result = nullptr;
   GPR_ASSERT(r->resolving);
@@ -168,11 +159,10 @@
     grpc_arg new_arg = grpc_lb_addresses_create_channel_arg(addresses);
     result = grpc_channel_args_copy_and_add(r->channel_args, &new_arg, 1);
     grpc_resolved_addresses_destroy(r->addresses);
-    grpc_lb_addresses_destroy(exec_ctx, addresses);
+    grpc_lb_addresses_destroy(addresses);
   } else {
-    grpc_millis next_try =
-        grpc_backoff_step(exec_ctx, &r->backoff_state).next_attempt_start_time;
-    grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+    grpc_millis next_try = r->backoff->Step();
+    grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
     gpr_log(GPR_INFO, "dns resolution failed (will retry): %s",
             grpc_error_string(error));
     GPR_ASSERT(!r->have_retry_timer);
@@ -185,59 +175,56 @@
     }
     GRPC_CLOSURE_INIT(&r->on_retry, dns_on_retry_timer_locked, r,
                       grpc_combiner_scheduler(r->base.combiner));
-    grpc_timer_init(exec_ctx, &r->retry_timer, next_try, &r->on_retry);
+    grpc_timer_init(&r->retry_timer, next_try, &r->on_retry);
   }
   if (r->resolved_result != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+    grpc_channel_args_destroy(r->resolved_result);
   }
   r->resolved_result = result;
   r->resolved_version++;
-  dns_maybe_finish_next_locked(exec_ctx, r);
+  dns_maybe_finish_next_locked(r);
   GRPC_ERROR_UNREF(error);
 
-  GRPC_RESOLVER_UNREF(exec_ctx, &r->base, "dns-resolving");
+  GRPC_RESOLVER_UNREF(&r->base, "dns-resolving");
 }
 
-static void dns_start_resolving_locked(grpc_exec_ctx* exec_ctx,
-                                       dns_resolver* r) {
+static void dns_start_resolving_locked(dns_resolver* r) {
   GRPC_RESOLVER_REF(&r->base, "dns-resolving");
   GPR_ASSERT(!r->resolving);
   r->resolving = true;
   r->addresses = nullptr;
   grpc_resolve_address(
-      exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
+      r->name_to_resolve, r->default_port, r->interested_parties,
       GRPC_CLOSURE_CREATE(dns_on_resolved_locked, r,
                           grpc_combiner_scheduler(r->base.combiner)),
       &r->addresses);
 }
 
-static void dns_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                         dns_resolver* r) {
+static void dns_maybe_finish_next_locked(dns_resolver* r) {
   if (r->next_completion != nullptr &&
       r->resolved_version != r->published_version) {
     *r->target_result = r->resolved_result == nullptr
                             ? nullptr
                             : grpc_channel_args_copy(r->resolved_result);
-    GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = nullptr;
     r->published_version = r->resolved_version;
   }
 }
 
-static void dns_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+static void dns_destroy(grpc_resolver* gr) {
   dns_resolver* r = (dns_resolver*)gr;
   if (r->resolved_result != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, r->resolved_result);
+    grpc_channel_args_destroy(r->resolved_result);
   }
-  grpc_pollset_set_destroy(exec_ctx, r->interested_parties);
+  grpc_pollset_set_destroy(r->interested_parties);
   gpr_free(r->name_to_resolve);
   gpr_free(r->default_port);
-  grpc_channel_args_destroy(exec_ctx, r->channel_args);
+  grpc_channel_args_destroy(r->channel_args);
   gpr_free(r);
 }
 
-static grpc_resolver* dns_create(grpc_exec_ctx* exec_ctx,
-                                 grpc_resolver_args* args,
+static grpc_resolver* dns_create(grpc_resolver_args* args,
                                  const char* default_port) {
   if (0 != strcmp(args->uri->authority, "")) {
     gpr_log(GPR_ERROR, "authority based dns uri's not supported");
@@ -254,14 +241,15 @@
   r->channel_args = grpc_channel_args_copy(args->args);
   r->interested_parties = grpc_pollset_set_create();
   if (args->pollset_set != nullptr) {
-    grpc_pollset_set_add_pollset_set(exec_ctx, r->interested_parties,
-                                     args->pollset_set);
+    grpc_pollset_set_add_pollset_set(r->interested_parties, args->pollset_set);
   }
-  grpc_backoff_init(
-      &r->backoff_state, GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000,
-      GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER, GRPC_DNS_RECONNECT_JITTER,
-      GRPC_DNS_MIN_CONNECT_TIMEOUT_SECONDS * 1000,
-      GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+  grpc_core::BackOff::Options backoff_options;
+  backoff_options
+      .set_initial_backoff(GRPC_DNS_INITIAL_CONNECT_BACKOFF_SECONDS * 1000)
+      .set_multiplier(GRPC_DNS_RECONNECT_BACKOFF_MULTIPLIER)
+      .set_jitter(GRPC_DNS_RECONNECT_JITTER)
+      .set_max_backoff(GRPC_DNS_RECONNECT_MAX_BACKOFF_SECONDS * 1000);
+  r->backoff.Init(grpc_core::BackOff(backoff_options));
   return &r->base;
 }
 
@@ -274,9 +262,8 @@
 static void dns_factory_unref(grpc_resolver_factory* factory) {}
 
 static grpc_resolver* dns_factory_create_resolver(
-    grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
-    grpc_resolver_args* args) {
-  return dns_create(exec_ctx, args, "https");
+    grpc_resolver_factory* factory, grpc_resolver_args* args) {
+  return dns_create(args, "https");
 }
 
 static char* dns_factory_get_default_host_name(grpc_resolver_factory* factory,
@@ -295,7 +282,7 @@
   return &dns_resolver_factory;
 }
 
-extern "C" void grpc_resolver_dns_native_init(void) {
+void grpc_resolver_dns_native_init(void) {
   char* resolver = gpr_getenv("GRPC_DNS_RESOLVER");
   if (resolver != nullptr && gpr_stricmp(resolver, "native") == 0) {
     gpr_log(GPR_DEBUG, "Using native dns resolver");
@@ -313,4 +300,4 @@
   gpr_free(resolver);
 }
 
-extern "C" void grpc_resolver_dns_native_shutdown(void) {}
+void grpc_resolver_dns_native_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
index 85d7090..fe3ad14 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc
@@ -67,57 +67,52 @@
   grpc_channel_args** target_result;
 } fake_resolver;
 
-static void fake_resolver_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+static void fake_resolver_destroy(grpc_resolver* gr) {
   fake_resolver* r = (fake_resolver*)gr;
-  grpc_channel_args_destroy(exec_ctx, r->next_results);
-  grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
-  grpc_channel_args_destroy(exec_ctx, r->channel_args);
+  grpc_channel_args_destroy(r->next_results);
+  grpc_channel_args_destroy(r->results_upon_error);
+  grpc_channel_args_destroy(r->channel_args);
   gpr_free(r);
 }
 
-static void fake_resolver_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                          grpc_resolver* resolver) {
+static void fake_resolver_shutdown_locked(grpc_resolver* resolver) {
   fake_resolver* r = (fake_resolver*)resolver;
   if (r->next_completion != nullptr) {
     *r->target_result = nullptr;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, r->next_completion,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                               "Resolver Shutdown"));
     r->next_completion = nullptr;
   }
 }
 
-static void fake_resolver_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                                   fake_resolver* r) {
+static void fake_resolver_maybe_finish_next_locked(fake_resolver* r) {
   if (r->next_completion != nullptr && r->next_results != nullptr) {
     *r->target_result =
         grpc_channel_args_union(r->next_results, r->channel_args);
-    grpc_channel_args_destroy(exec_ctx, r->next_results);
+    grpc_channel_args_destroy(r->next_results);
     r->next_results = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = nullptr;
   }
 }
 
-static void fake_resolver_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                                   grpc_resolver* resolver) {
+static void fake_resolver_channel_saw_error_locked(grpc_resolver* resolver) {
   fake_resolver* r = (fake_resolver*)resolver;
   if (r->next_results == nullptr && r->results_upon_error != nullptr) {
     // Pretend we re-resolved.
     r->next_results = grpc_channel_args_copy(r->results_upon_error);
   }
-  fake_resolver_maybe_finish_next_locked(exec_ctx, r);
+  fake_resolver_maybe_finish_next_locked(r);
 }
 
-static void fake_resolver_next_locked(grpc_exec_ctx* exec_ctx,
-                                      grpc_resolver* resolver,
+static void fake_resolver_next_locked(grpc_resolver* resolver,
                                       grpc_channel_args** target_result,
                                       grpc_closure* on_complete) {
   fake_resolver* r = (fake_resolver*)resolver;
   GPR_ASSERT(!r->next_completion);
   r->next_completion = on_complete;
   r->target_result = target_result;
-  fake_resolver_maybe_finish_next_locked(exec_ctx, r);
+  fake_resolver_maybe_finish_next_locked(r);
 }
 
 static const grpc_resolver_vtable fake_resolver_vtable = {
@@ -157,33 +152,31 @@
   grpc_channel_args* next_response;
 } set_response_closure_arg;
 
-static void set_response_closure_fn(grpc_exec_ctx* exec_ctx, void* arg,
-                                    grpc_error* error) {
+static void set_response_closure_fn(void* arg, grpc_error* error) {
   set_response_closure_arg* closure_arg = (set_response_closure_arg*)arg;
   grpc_fake_resolver_response_generator* generator = closure_arg->generator;
   fake_resolver* r = generator->resolver;
   if (r->next_results != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, r->next_results);
+    grpc_channel_args_destroy(r->next_results);
   }
   r->next_results = closure_arg->next_response;
   if (r->results_upon_error != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, r->results_upon_error);
+    grpc_channel_args_destroy(r->results_upon_error);
   }
   r->results_upon_error = grpc_channel_args_copy(closure_arg->next_response);
   gpr_free(closure_arg);
-  fake_resolver_maybe_finish_next_locked(exec_ctx, r);
+  fake_resolver_maybe_finish_next_locked(r);
 }
 
 void grpc_fake_resolver_response_generator_set_response(
-    grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
+    grpc_fake_resolver_response_generator* generator,
     grpc_channel_args* next_response) {
   GPR_ASSERT(generator->resolver != nullptr);
   set_response_closure_arg* closure_arg =
       (set_response_closure_arg*)gpr_zalloc(sizeof(*closure_arg));
   closure_arg->generator = generator;
   closure_arg->next_response = grpc_channel_args_copy(next_response);
-  GRPC_CLOSURE_SCHED(exec_ctx,
-                     GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
+  GRPC_CLOSURE_SCHED(GRPC_CLOSURE_INIT(&closure_arg->set_response_closure,
                                        set_response_closure_fn, closure_arg,
                                        grpc_combiner_scheduler(
                                            generator->resolver->base.combiner)),
@@ -195,7 +188,7 @@
       (grpc_fake_resolver_response_generator*)p);
 }
 
-static void response_generator_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+static void response_generator_arg_destroy(void* p) {
   grpc_fake_resolver_response_generator_unref(
       (grpc_fake_resolver_response_generator*)p);
 }
@@ -232,8 +225,7 @@
 
 static void fake_resolver_factory_unref(grpc_resolver_factory* factory) {}
 
-static grpc_resolver* fake_resolver_create(grpc_exec_ctx* exec_ctx,
-                                           grpc_resolver_factory* factory,
+static grpc_resolver* fake_resolver_create(grpc_resolver_factory* factory,
                                            grpc_resolver_args* args) {
   fake_resolver* r = (fake_resolver*)gpr_zalloc(sizeof(*r));
   r->channel_args = grpc_channel_args_copy(args->args);
@@ -258,8 +250,8 @@
 static grpc_resolver_factory fake_resolver_factory = {
     &fake_resolver_factory_vtable};
 
-extern "C" void grpc_resolver_fake_init(void) {
+void grpc_resolver_fake_init(void) {
   grpc_register_resolver_type(&fake_resolver_factory);
 }
 
-extern "C" void grpc_resolver_fake_shutdown(void) {}
+void grpc_resolver_fake_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
index 3f341fa..a8977e5 100644
--- a/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
+++ b/src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h
@@ -21,10 +21,6 @@
 #include "src/core/ext/filters/client_channel/uri_parser.h"
 #include "src/core/lib/channel/channel_args.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #define GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR \
   "grpc.fake_resolver.response_generator"
 
@@ -43,7 +39,7 @@
 // Instruct the fake resolver associated with the \a response_generator instance
 // to trigger a new resolution for \a uri and \a args.
 void grpc_fake_resolver_response_generator_set_response(
-    grpc_exec_ctx* exec_ctx, grpc_fake_resolver_response_generator* generator,
+    grpc_fake_resolver_response_generator* generator,
     grpc_channel_args* next_response);
 
 // Return a \a grpc_arg for a \a grpc_fake_resolver_response_generator instance.
@@ -60,9 +56,5 @@
 void grpc_fake_resolver_response_generator_unref(
     grpc_fake_resolver_response_generator* generator);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FAKE_FAKE_RESOLVER_H \
         */
diff --git a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
index 1da8ab9..7d1e283 100644
--- a/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
+++ b/src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc
@@ -52,15 +52,13 @@
   grpc_channel_args** target_result;
 } sockaddr_resolver;
 
-static void sockaddr_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
+static void sockaddr_destroy(grpc_resolver* r);
 
-static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                              sockaddr_resolver* r);
+static void sockaddr_maybe_finish_next_locked(sockaddr_resolver* r);
 
-static void sockaddr_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r);
-static void sockaddr_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                              grpc_resolver* r);
-static void sockaddr_next_locked(grpc_exec_ctx* exec_ctx, grpc_resolver* r,
+static void sockaddr_shutdown_locked(grpc_resolver* r);
+static void sockaddr_channel_saw_error_locked(grpc_resolver* r);
+static void sockaddr_next_locked(grpc_resolver* r,
                                  grpc_channel_args** target_result,
                                  grpc_closure* on_complete);
 
@@ -68,52 +66,47 @@
     sockaddr_destroy, sockaddr_shutdown_locked,
     sockaddr_channel_saw_error_locked, sockaddr_next_locked};
 
-static void sockaddr_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_resolver* resolver) {
+static void sockaddr_shutdown_locked(grpc_resolver* resolver) {
   sockaddr_resolver* r = (sockaddr_resolver*)resolver;
   if (r->next_completion != nullptr) {
     *r->target_result = nullptr;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, r->next_completion,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Resolver Shutdown"));
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                               "Resolver Shutdown"));
     r->next_completion = nullptr;
   }
 }
 
-static void sockaddr_channel_saw_error_locked(grpc_exec_ctx* exec_ctx,
-                                              grpc_resolver* resolver) {
+static void sockaddr_channel_saw_error_locked(grpc_resolver* resolver) {
   sockaddr_resolver* r = (sockaddr_resolver*)resolver;
   r->published = false;
-  sockaddr_maybe_finish_next_locked(exec_ctx, r);
+  sockaddr_maybe_finish_next_locked(r);
 }
 
-static void sockaddr_next_locked(grpc_exec_ctx* exec_ctx,
-                                 grpc_resolver* resolver,
+static void sockaddr_next_locked(grpc_resolver* resolver,
                                  grpc_channel_args** target_result,
                                  grpc_closure* on_complete) {
   sockaddr_resolver* r = (sockaddr_resolver*)resolver;
   GPR_ASSERT(!r->next_completion);
   r->next_completion = on_complete;
   r->target_result = target_result;
-  sockaddr_maybe_finish_next_locked(exec_ctx, r);
+  sockaddr_maybe_finish_next_locked(r);
 }
 
-static void sockaddr_maybe_finish_next_locked(grpc_exec_ctx* exec_ctx,
-                                              sockaddr_resolver* r) {
+static void sockaddr_maybe_finish_next_locked(sockaddr_resolver* r) {
   if (r->next_completion != nullptr && !r->published) {
     r->published = true;
     grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
     *r->target_result =
         grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
-    GRPC_CLOSURE_SCHED(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = nullptr;
   }
 }
 
-static void sockaddr_destroy(grpc_exec_ctx* exec_ctx, grpc_resolver* gr) {
+static void sockaddr_destroy(grpc_resolver* gr) {
   sockaddr_resolver* r = (sockaddr_resolver*)gr;
-  grpc_lb_addresses_destroy(exec_ctx, r->addresses);
-  grpc_channel_args_destroy(exec_ctx, r->channel_args);
+  grpc_lb_addresses_destroy(r->addresses);
+  grpc_channel_args_destroy(r->channel_args);
   gpr_free(r);
 }
 
@@ -142,8 +135,7 @@
 
 static void do_nothing(void* ignored) {}
 
-static grpc_resolver* sockaddr_create(grpc_exec_ctx* exec_ctx,
-                                      grpc_resolver_args* args,
+static grpc_resolver* sockaddr_create(grpc_resolver_args* args,
                                       bool parse(const grpc_uri* uri,
                                                  grpc_resolved_address* dst)) {
   if (0 != strcmp(args->uri->authority, "")) {
@@ -170,10 +162,10 @@
     gpr_free(part_str);
     if (errors_found) break;
   }
-  grpc_slice_buffer_destroy_internal(exec_ctx, &path_parts);
-  grpc_slice_unref_internal(exec_ctx, path_slice);
+  grpc_slice_buffer_destroy_internal(&path_parts);
+  grpc_slice_unref_internal(path_slice);
   if (errors_found) {
-    grpc_lb_addresses_destroy(exec_ctx, addresses);
+    grpc_lb_addresses_destroy(addresses);
     return nullptr;
   }
   /* Instantiate resolver. */
@@ -195,9 +187,8 @@
 
 #define DECL_FACTORY(name)                                                  \
   static grpc_resolver* name##_factory_create_resolver(                     \
-      grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,              \
-      grpc_resolver_args* args) {                                           \
-    return sockaddr_create(exec_ctx, args, grpc_parse_##name);              \
+      grpc_resolver_factory* factory, grpc_resolver_args* args) {           \
+    return sockaddr_create(args, grpc_parse_##name);                        \
   }                                                                         \
   static const grpc_resolver_factory_vtable name##_factory_vtable = {       \
       sockaddr_factory_ref, sockaddr_factory_unref,                         \
@@ -211,7 +202,7 @@
 DECL_FACTORY(ipv4);
 DECL_FACTORY(ipv6);
 
-extern "C" void grpc_resolver_sockaddr_init(void) {
+void grpc_resolver_sockaddr_init(void) {
   grpc_register_resolver_type(&ipv4_resolver_factory);
   grpc_register_resolver_type(&ipv6_resolver_factory);
 #ifdef GRPC_HAVE_UNIX_SOCKET
@@ -219,4 +210,4 @@
 #endif
 }
 
-extern "C" void grpc_resolver_sockaddr_shutdown(void) {}
+void grpc_resolver_sockaddr_shutdown(void) {}
diff --git a/src/core/ext/filters/client_channel/resolver_factory.cc b/src/core/ext/filters/client_channel/resolver_factory.cc
index 1a289d9..9b3ec2f 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.cc
+++ b/src/core/ext/filters/client_channel/resolver_factory.cc
@@ -28,10 +28,9 @@
 
 /** Create a resolver instance for a name */
 grpc_resolver* grpc_resolver_factory_create_resolver(
-    grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
-    grpc_resolver_args* args) {
+    grpc_resolver_factory* factory, grpc_resolver_args* args) {
   if (factory == nullptr) return nullptr;
-  return factory->vtable->create_resolver(exec_ctx, factory, args);
+  return factory->vtable->create_resolver(factory, args);
 }
 
 char* grpc_resolver_factory_get_default_authority(
diff --git a/src/core/ext/filters/client_channel/resolver_factory.h b/src/core/ext/filters/client_channel/resolver_factory.h
index 62555a4..170ecc0 100644
--- a/src/core/ext/filters/client_channel/resolver_factory.h
+++ b/src/core/ext/filters/client_channel/resolver_factory.h
@@ -24,10 +24,6 @@
 #include "src/core/ext/filters/client_channel/uri_parser.h"
 #include "src/core/lib/iomgr/pollset_set.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_resolver_factory grpc_resolver_factory;
 typedef struct grpc_resolver_factory_vtable grpc_resolver_factory_vtable;
 
@@ -47,8 +43,7 @@
   void (*unref)(grpc_resolver_factory* factory);
 
   /** Implementation of grpc_resolver_factory_create_resolver */
-  grpc_resolver* (*create_resolver)(grpc_exec_ctx* exec_ctx,
-                                    grpc_resolver_factory* factory,
+  grpc_resolver* (*create_resolver)(grpc_resolver_factory* factory,
                                     grpc_resolver_args* args);
 
   /** Implementation of grpc_resolver_factory_get_default_authority */
@@ -63,16 +58,11 @@
 
 /** Create a resolver instance for a name */
 grpc_resolver* grpc_resolver_factory_create_resolver(
-    grpc_exec_ctx* exec_ctx, grpc_resolver_factory* factory,
-    grpc_resolver_args* args);
+    grpc_resolver_factory* factory, grpc_resolver_args* args);
 
 /** Return a (freshly allocated with gpr_malloc) string representing
     the default authority to use for this scheme. */
 char* grpc_resolver_factory_get_default_authority(
     grpc_resolver_factory* factory, grpc_uri* uri);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_FACTORY_H */
diff --git a/src/core/ext/filters/client_channel/resolver_registry.cc b/src/core/ext/filters/client_channel/resolver_registry.cc
index 5da6114..3f8451d 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.cc
+++ b/src/core/ext/filters/client_channel/resolver_registry.cc
@@ -92,23 +92,22 @@
   return lookup_factory(uri->scheme);
 }
 
-static grpc_resolver_factory* resolve_factory(grpc_exec_ctx* exec_ctx,
-                                              const char* target,
+static grpc_resolver_factory* resolve_factory(const char* target,
                                               grpc_uri** uri,
                                               char** canonical_target) {
   grpc_resolver_factory* factory = nullptr;
 
   GPR_ASSERT(uri != nullptr);
-  *uri = grpc_uri_parse(exec_ctx, target, 1);
+  *uri = grpc_uri_parse(target, 1);
   factory = lookup_factory_by_uri(*uri);
   if (factory == nullptr) {
     grpc_uri_destroy(*uri);
     gpr_asprintf(canonical_target, "%s%s", g_default_resolver_prefix, target);
-    *uri = grpc_uri_parse(exec_ctx, *canonical_target, 1);
+    *uri = grpc_uri_parse(*canonical_target, 1);
     factory = lookup_factory_by_uri(*uri);
     if (factory == nullptr) {
-      grpc_uri_destroy(grpc_uri_parse(exec_ctx, target, 0));
-      grpc_uri_destroy(grpc_uri_parse(exec_ctx, *canonical_target, 0));
+      grpc_uri_destroy(grpc_uri_parse(target, 0));
+      grpc_uri_destroy(grpc_uri_parse(*canonical_target, 0));
       gpr_log(GPR_ERROR, "don't know how to resolve '%s' or '%s'", target,
               *canonical_target);
     }
@@ -116,14 +115,14 @@
   return factory;
 }
 
-grpc_resolver* grpc_resolver_create(grpc_exec_ctx* exec_ctx, const char* target,
+grpc_resolver* grpc_resolver_create(const char* target,
                                     const grpc_channel_args* args,
                                     grpc_pollset_set* pollset_set,
                                     grpc_combiner* combiner) {
   grpc_uri* uri = nullptr;
   char* canonical_target = nullptr;
   grpc_resolver_factory* factory =
-      resolve_factory(exec_ctx, target, &uri, &canonical_target);
+      resolve_factory(target, &uri, &canonical_target);
   grpc_resolver* resolver;
   grpc_resolver_args resolver_args;
   memset(&resolver_args, 0, sizeof(resolver_args));
@@ -131,29 +130,27 @@
   resolver_args.args = args;
   resolver_args.pollset_set = pollset_set;
   resolver_args.combiner = combiner;
-  resolver =
-      grpc_resolver_factory_create_resolver(exec_ctx, factory, &resolver_args);
+  resolver = grpc_resolver_factory_create_resolver(factory, &resolver_args);
   grpc_uri_destroy(uri);
   gpr_free(canonical_target);
   return resolver;
 }
 
-char* grpc_get_default_authority(grpc_exec_ctx* exec_ctx, const char* target) {
+char* grpc_get_default_authority(const char* target) {
   grpc_uri* uri = nullptr;
   char* canonical_target = nullptr;
   grpc_resolver_factory* factory =
-      resolve_factory(exec_ctx, target, &uri, &canonical_target);
+      resolve_factory(target, &uri, &canonical_target);
   char* authority = grpc_resolver_factory_get_default_authority(factory, uri);
   grpc_uri_destroy(uri);
   gpr_free(canonical_target);
   return authority;
 }
 
-char* grpc_resolver_factory_add_default_prefix_if_needed(
-    grpc_exec_ctx* exec_ctx, const char* target) {
+char* grpc_resolver_factory_add_default_prefix_if_needed(const char* target) {
   grpc_uri* uri = nullptr;
   char* canonical_target = nullptr;
-  resolve_factory(exec_ctx, target, &uri, &canonical_target);
+  resolve_factory(target, &uri, &canonical_target);
   grpc_uri_destroy(uri);
   return canonical_target == nullptr ? gpr_strdup(target) : canonical_target;
 }
diff --git a/src/core/ext/filters/client_channel/resolver_registry.h b/src/core/ext/filters/client_channel/resolver_registry.h
index 01a2d0b..bbd30df 100644
--- a/src/core/ext/filters/client_channel/resolver_registry.h
+++ b/src/core/ext/filters/client_channel/resolver_registry.h
@@ -22,10 +22,6 @@
 #include "src/core/ext/filters/client_channel/resolver_factory.h"
 #include "src/core/lib/iomgr/pollset_set.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_resolver_registry_init();
 void grpc_resolver_registry_shutdown(void);
 
@@ -52,7 +48,7 @@
     (typically the set of arguments passed in from the client API).
     \a pollset_set is used to drive IO in the name resolution process, it
     should not be NULL. */
-grpc_resolver* grpc_resolver_create(grpc_exec_ctx* exec_ctx, const char* target,
+grpc_resolver* grpc_resolver_create(const char* target,
                                     const grpc_channel_args* args,
                                     grpc_pollset_set* pollset_set,
                                     grpc_combiner* combiner);
@@ -63,15 +59,10 @@
 
 /** Given a target, return a (freshly allocated with gpr_malloc) string
     representing the default authority to pass from a client. */
-char* grpc_get_default_authority(grpc_exec_ctx* exec_ctx, const char* target);
+char* grpc_get_default_authority(const char* target);
 
 /** Returns a newly allocated string containing \a target, adding the
     default prefix if needed. */
-char* grpc_resolver_factory_add_default_prefix_if_needed(
-    grpc_exec_ctx* exec_ctx, const char* target);
-
-#ifdef __cplusplus
-}
-#endif
+char* grpc_resolver_factory_add_default_prefix_if_needed(const char* target);
 
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RESOLVER_REGISTRY_H */
diff --git a/src/core/ext/filters/client_channel/retry_throttle.h b/src/core/ext/filters/client_channel/retry_throttle.h
index 399383d..bf99297 100644
--- a/src/core/ext/filters/client_channel/retry_throttle.h
+++ b/src/core/ext/filters/client_channel/retry_throttle.h
@@ -21,10 +21,6 @@
 
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// Tracks retry throttling data for an individual server name.
 typedef struct grpc_server_retry_throttle_data grpc_server_retry_throttle_data;
 
@@ -51,8 +47,4 @@
 grpc_server_retry_throttle_data* grpc_retry_throttle_map_get_data_for_server(
     const char* server_name, int max_milli_tokens, int milli_token_ratio);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_RETRY_THROTTLE_H */
diff --git a/src/core/ext/filters/client_channel/subchannel.cc b/src/core/ext/filters/client_channel/subchannel.cc
index 58e294d..f07394d 100644
--- a/src/core/ext/filters/client_channel/subchannel.cc
+++ b/src/core/ext/filters/client_channel/subchannel.cc
@@ -20,7 +20,9 @@
 
 #include <inttypes.h>
 #include <limits.h>
-#include <string.h>
+
+#include <algorithm>
+#include <cstring>
 
 #include <grpc/support/alloc.h>
 #include <grpc/support/avl.h>
@@ -39,6 +41,7 @@
 #include "src/core/lib/iomgr/timer.h"
 #include "src/core/lib/profiling/timers.h"
 #include "src/core/lib/slice/slice_internal.h"
+#include "src/core/lib/support/manual_constructor.h"
 #include "src/core/lib/surface/channel.h"
 #include "src/core/lib/surface/channel_init.h"
 #include "src/core/lib/transport/connectivity_state.h"
@@ -48,7 +51,7 @@
 
 #define GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS 1
 #define GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER 1.6
-#define GRPC_SUBCHANNEL_RECONNECT_MIN_BACKOFF_SECONDS 20
+#define GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS 20
 #define GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS 120
 #define GRPC_SUBCHANNEL_RECONNECT_JITTER 0.2
 
@@ -118,8 +121,9 @@
   external_state_watcher root_external_state_watcher;
 
   /** backoff state */
-  grpc_backoff backoff_state;
-  grpc_backoff_result backoff_result;
+  grpc_core::ManualConstructor<grpc_core::BackOff> backoff;
+  grpc_millis next_attempt_deadline;
+  grpc_millis min_connect_timeout_ms;
 
   /** do we have an active alarm? */
   bool have_alarm;
@@ -139,8 +143,7 @@
 #define CALLSTACK_TO_SUBCHANNEL_CALL(callstack) \
   (((grpc_subchannel_call*)(callstack)) - 1)
 
-static void subchannel_connected(grpc_exec_ctx* exec_ctx, void* subchannel,
-                                 grpc_error* error);
+static void subchannel_connected(void* subchannel, grpc_error* error);
 
 #ifndef NDEBUG
 #define REF_REASON reason
@@ -157,10 +160,9 @@
  * connection implementation
  */
 
-static void connection_destroy(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error) {
+static void connection_destroy(void* arg, grpc_error* error) {
   grpc_connected_subchannel* c = (grpc_connected_subchannel*)arg;
-  grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c));
+  grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CONNECTION(c));
   gpr_free(c);
 }
 
@@ -170,26 +172,23 @@
   return c;
 }
 
-void grpc_connected_subchannel_unref(grpc_exec_ctx* exec_ctx,
-                                     grpc_connected_subchannel* c
-                                         GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CONNECTION(c),
-                           REF_REASON);
+void grpc_connected_subchannel_unref(
+    grpc_connected_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CONNECTION(c), REF_REASON);
 }
 
 /*
  * grpc_subchannel implementation
  */
 
-static void subchannel_destroy(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error) {
+static void subchannel_destroy(void* arg, grpc_error* error) {
   grpc_subchannel* c = (grpc_subchannel*)arg;
   gpr_free((void*)c->filters);
-  grpc_channel_args_destroy(exec_ctx, c->args);
-  grpc_connectivity_state_destroy(exec_ctx, &c->state_tracker);
-  grpc_connector_unref(exec_ctx, c->connector);
-  grpc_pollset_set_destroy(exec_ctx, c->pollset_set);
-  grpc_subchannel_key_destroy(exec_ctx, c->key);
+  grpc_channel_args_destroy(c->args);
+  grpc_connectivity_state_destroy(&c->state_tracker);
+  grpc_connector_unref(c->connector);
+  grpc_pollset_set_destroy(c->pollset_set);
+  grpc_subchannel_key_destroy(c->key);
   gpr_mu_destroy(&c->mu);
   gpr_free(c);
 }
@@ -241,59 +240,102 @@
   }
 }
 
-static void disconnect(grpc_exec_ctx* exec_ctx, grpc_subchannel* c) {
+static void disconnect(grpc_subchannel* c) {
   grpc_connected_subchannel* con;
-  grpc_subchannel_index_unregister(exec_ctx, c->key, c);
+  grpc_subchannel_index_unregister(c->key, c);
   gpr_mu_lock(&c->mu);
   GPR_ASSERT(!c->disconnected);
   c->disconnected = true;
-  grpc_connector_shutdown(
-      exec_ctx, c->connector,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("Subchannel disconnected"));
+  grpc_connector_shutdown(c->connector, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                            "Subchannel disconnected"));
   con = GET_CONNECTED_SUBCHANNEL(c, no_barrier);
   if (con != nullptr) {
-    GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, con, "connection");
+    GRPC_CONNECTED_SUBCHANNEL_UNREF(con, "connection");
     gpr_atm_no_barrier_store(&c->connected_subchannel, (gpr_atm)0xdeadbeef);
   }
   gpr_mu_unlock(&c->mu);
 }
 
-void grpc_subchannel_unref(grpc_exec_ctx* exec_ctx,
-                           grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+void grpc_subchannel_unref(grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
   gpr_atm old_refs;
   // add a weak ref and subtract a strong ref (atomically)
   old_refs = ref_mutate(c, (gpr_atm)1 - (gpr_atm)(1 << INTERNAL_REF_BITS),
                         1 REF_MUTATE_PURPOSE("STRONG_UNREF"));
   if ((old_refs & STRONG_REF_MASK) == (1 << INTERNAL_REF_BITS)) {
-    disconnect(exec_ctx, c);
+    disconnect(c);
   }
-  GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "strong-unref");
+  GRPC_SUBCHANNEL_WEAK_UNREF(c, "strong-unref");
 }
 
-void grpc_subchannel_weak_unref(grpc_exec_ctx* exec_ctx,
-                                grpc_subchannel* c
-                                    GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+void grpc_subchannel_weak_unref(
+    grpc_subchannel* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
   gpr_atm old_refs;
   old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
   if (old_refs == 1) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_CREATE(subchannel_destroy, c, grpc_schedule_on_exec_ctx),
         GRPC_ERROR_NONE);
   }
 }
 
-grpc_subchannel* grpc_subchannel_create(grpc_exec_ctx* exec_ctx,
-                                        grpc_connector* connector,
+static void parse_args_for_backoff_values(
+    const grpc_channel_args* args, grpc_core::BackOff::Options* backoff_options,
+    grpc_millis* min_connect_timeout_ms) {
+  grpc_millis initial_backoff_ms =
+      GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000;
+  *min_connect_timeout_ms =
+      GRPC_SUBCHANNEL_RECONNECT_MIN_TIMEOUT_SECONDS * 1000;
+  grpc_millis max_backoff_ms =
+      GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000;
+  bool fixed_reconnect_backoff = false;
+  if (args != nullptr) {
+    for (size_t i = 0; i < args->num_args; i++) {
+      if (0 == strcmp(args->args[i].key,
+                      "grpc.testing.fixed_reconnect_backoff_ms")) {
+        fixed_reconnect_backoff = true;
+        initial_backoff_ms = *min_connect_timeout_ms = max_backoff_ms =
+            grpc_channel_arg_get_integer(
+                &args->args[i],
+                {static_cast<int>(initial_backoff_ms), 100, INT_MAX});
+      } else if (0 ==
+                 strcmp(args->args[i].key, GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) {
+        fixed_reconnect_backoff = false;
+        *min_connect_timeout_ms = grpc_channel_arg_get_integer(
+            &args->args[i],
+            {static_cast<int>(*min_connect_timeout_ms), 100, INT_MAX});
+      } else if (0 ==
+                 strcmp(args->args[i].key, GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) {
+        fixed_reconnect_backoff = false;
+        max_backoff_ms = grpc_channel_arg_get_integer(
+            &args->args[i], {static_cast<int>(max_backoff_ms), 100, INT_MAX});
+      } else if (0 == strcmp(args->args[i].key,
+                             GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) {
+        fixed_reconnect_backoff = false;
+        initial_backoff_ms = grpc_channel_arg_get_integer(
+            &args->args[i],
+            {static_cast<int>(initial_backoff_ms), 100, INT_MAX});
+      }
+    }
+  }
+  backoff_options->set_initial_backoff(initial_backoff_ms)
+      .set_multiplier(fixed_reconnect_backoff
+                          ? 1.0
+                          : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER)
+      .set_jitter(fixed_reconnect_backoff ? 0.0
+                                          : GRPC_SUBCHANNEL_RECONNECT_JITTER)
+      .set_max_backoff(max_backoff_ms);
+}
+
+grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
                                         const grpc_subchannel_args* args) {
   grpc_subchannel_key* key = grpc_subchannel_key_create(args);
-  grpc_subchannel* c = grpc_subchannel_index_find(exec_ctx, key);
+  grpc_subchannel* c = grpc_subchannel_index_find(key);
   if (c) {
-    grpc_subchannel_key_destroy(exec_ctx, key);
+    grpc_subchannel_key_destroy(key);
     return c;
   }
 
-  GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx);
+  GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED();
   c = (grpc_subchannel*)gpr_zalloc(sizeof(*c));
   c->key = key;
   gpr_atm_no_barrier_store(&c->ref_pair, 1 << INTERNAL_REF_BITS);
@@ -311,10 +353,10 @@
   c->pollset_set = grpc_pollset_set_create();
   grpc_resolved_address* addr =
       (grpc_resolved_address*)gpr_malloc(sizeof(*addr));
-  grpc_get_subchannel_address_arg(exec_ctx, args->args, addr);
+  grpc_get_subchannel_address_arg(args->args, addr);
   grpc_resolved_address* new_address = nullptr;
   grpc_channel_args* new_args = nullptr;
-  if (grpc_proxy_mappers_map_address(exec_ctx, addr, args->args, &new_address,
+  if (grpc_proxy_mappers_map_address(addr, args->args, &new_address,
                                      &new_args)) {
     GPR_ASSERT(new_address != nullptr);
     gpr_free(addr);
@@ -327,67 +369,32 @@
       new_args != nullptr ? new_args : args->args, keys_to_remove,
       GPR_ARRAY_SIZE(keys_to_remove), &new_arg, 1);
   gpr_free(new_arg.value.string);
-  if (new_args != nullptr) grpc_channel_args_destroy(exec_ctx, new_args);
+  if (new_args != nullptr) grpc_channel_args_destroy(new_args);
   c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
       &c->root_external_state_watcher;
   GRPC_CLOSURE_INIT(&c->connected, subchannel_connected, c,
                     grpc_schedule_on_exec_ctx);
   grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
                                "subchannel");
-  int initial_backoff_ms =
-      GRPC_SUBCHANNEL_INITIAL_CONNECT_BACKOFF_SECONDS * 1000;
-  int min_backoff_ms = GRPC_SUBCHANNEL_RECONNECT_MIN_BACKOFF_SECONDS * 1000;
-  int max_backoff_ms = GRPC_SUBCHANNEL_RECONNECT_MAX_BACKOFF_SECONDS * 1000;
-  bool fixed_reconnect_backoff = false;
-  if (c->args) {
-    for (size_t i = 0; i < c->args->num_args; i++) {
-      if (0 == strcmp(c->args->args[i].key,
-                      "grpc.testing.fixed_reconnect_backoff_ms")) {
-        fixed_reconnect_backoff = true;
-        initial_backoff_ms = min_backoff_ms = max_backoff_ms =
-            grpc_channel_arg_get_integer(&c->args->args[i],
-                                         {initial_backoff_ms, 100, INT_MAX});
-      } else if (0 == strcmp(c->args->args[i].key,
-                             GRPC_ARG_MIN_RECONNECT_BACKOFF_MS)) {
-        fixed_reconnect_backoff = false;
-        min_backoff_ms = grpc_channel_arg_get_integer(
-            &c->args->args[i], {min_backoff_ms, 100, INT_MAX});
-      } else if (0 == strcmp(c->args->args[i].key,
-                             GRPC_ARG_MAX_RECONNECT_BACKOFF_MS)) {
-        fixed_reconnect_backoff = false;
-        max_backoff_ms = grpc_channel_arg_get_integer(
-            &c->args->args[i], {max_backoff_ms, 100, INT_MAX});
-      } else if (0 == strcmp(c->args->args[i].key,
-                             GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS)) {
-        fixed_reconnect_backoff = false;
-        initial_backoff_ms = grpc_channel_arg_get_integer(
-            &c->args->args[i], {initial_backoff_ms, 100, INT_MAX});
-      }
-    }
-  }
-  grpc_backoff_init(
-      &c->backoff_state, initial_backoff_ms,
-      fixed_reconnect_backoff ? 1.0
-                              : GRPC_SUBCHANNEL_RECONNECT_BACKOFF_MULTIPLIER,
-      fixed_reconnect_backoff ? 0.0 : GRPC_SUBCHANNEL_RECONNECT_JITTER,
-      min_backoff_ms, max_backoff_ms);
+  grpc_core::BackOff::Options backoff_options;
+  parse_args_for_backoff_values(args->args, &backoff_options,
+                                &c->min_connect_timeout_ms);
+  c->backoff.Init(backoff_options);
   gpr_mu_init(&c->mu);
 
-  return grpc_subchannel_index_register(exec_ctx, key, c);
+  return grpc_subchannel_index_register(key, c);
 }
 
-static void continue_connect_locked(grpc_exec_ctx* exec_ctx,
-                                    grpc_subchannel* c) {
+static void continue_connect_locked(grpc_subchannel* c) {
   grpc_connect_in_args args;
-
   args.interested_parties = c->pollset_set;
-  args.deadline = c->backoff_result.current_deadline;
+  const grpc_millis min_deadline =
+      c->min_connect_timeout_ms + grpc_core::ExecCtx::Get()->Now();
+  args.deadline = std::max(c->next_attempt_deadline, min_deadline);
   args.channel_args = c->args;
-
-  grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
-                              GRPC_CHANNEL_CONNECTING, GRPC_ERROR_NONE,
-                              "state_change");
-  grpc_connector_connect(exec_ctx, c->connector, &args, &c->connecting_result,
+  grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_CONNECTING,
+                              GRPC_ERROR_NONE, "state_change");
+  grpc_connector_connect(c->connector, &args, &c->connecting_result,
                          &c->connected);
 }
 
@@ -400,24 +407,23 @@
   return state;
 }
 
-static void on_external_state_watcher_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void on_external_state_watcher_done(void* arg, grpc_error* error) {
   external_state_watcher* w = (external_state_watcher*)arg;
   grpc_closure* follow_up = w->notify;
   if (w->pollset_set != nullptr) {
-    grpc_pollset_set_del_pollset_set(exec_ctx, w->subchannel->pollset_set,
+    grpc_pollset_set_del_pollset_set(w->subchannel->pollset_set,
                                      w->pollset_set);
   }
   gpr_mu_lock(&w->subchannel->mu);
   w->next->prev = w->prev;
   w->prev->next = w->next;
   gpr_mu_unlock(&w->subchannel->mu);
-  GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, w->subchannel, "external_state_watcher");
+  GRPC_SUBCHANNEL_WEAK_UNREF(w->subchannel, "external_state_watcher");
   gpr_free(w);
-  GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error));
 }
 
-static void on_alarm(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_alarm(void* arg, grpc_error* error) {
   grpc_subchannel* c = (grpc_subchannel*)arg;
   gpr_mu_lock(&c->mu);
   c->have_alarm = false;
@@ -429,18 +435,17 @@
   }
   if (error == GRPC_ERROR_NONE) {
     gpr_log(GPR_INFO, "Failed to connect to channel, retrying");
-    c->backoff_result = grpc_backoff_step(exec_ctx, &c->backoff_state);
-    continue_connect_locked(exec_ctx, c);
+    c->next_attempt_deadline = c->backoff->Step();
+    continue_connect_locked(c);
     gpr_mu_unlock(&c->mu);
   } else {
     gpr_mu_unlock(&c->mu);
-    GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
   }
   GRPC_ERROR_UNREF(error);
 }
 
-static void maybe_start_connecting_locked(grpc_exec_ctx* exec_ctx,
-                                          grpc_subchannel* c) {
+static void maybe_start_connecting_locked(grpc_subchannel* c) {
   if (c->disconnected) {
     /* Don't try to connect if we're already disconnected */
     return;
@@ -466,28 +471,26 @@
 
   if (!c->backoff_begun) {
     c->backoff_begun = true;
-    c->backoff_result = grpc_backoff_begin(exec_ctx, &c->backoff_state);
-    continue_connect_locked(exec_ctx, c);
+    c->next_attempt_deadline = c->backoff->Begin();
+    continue_connect_locked(c);
   } else {
     GPR_ASSERT(!c->have_alarm);
     c->have_alarm = true;
     const grpc_millis time_til_next =
-        c->backoff_result.next_attempt_start_time - grpc_exec_ctx_now(exec_ctx);
+        c->next_attempt_deadline - grpc_core::ExecCtx::Get()->Now();
     if (time_til_next <= 0) {
       gpr_log(GPR_INFO, "Retry immediately");
     } else {
       gpr_log(GPR_INFO, "Retry in %" PRIdPTR " milliseconds", time_til_next);
     }
     GRPC_CLOSURE_INIT(&c->on_alarm, on_alarm, c, grpc_schedule_on_exec_ctx);
-    grpc_timer_init(exec_ctx, &c->alarm,
-                    c->backoff_result.next_attempt_start_time, &c->on_alarm);
+    grpc_timer_init(&c->alarm, c->next_attempt_deadline, &c->on_alarm);
   }
 }
 
 void grpc_subchannel_notify_on_state_change(
-    grpc_exec_ctx* exec_ctx, grpc_subchannel* c,
-    grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
-    grpc_closure* notify) {
+    grpc_subchannel* c, grpc_pollset_set* interested_parties,
+    grpc_connectivity_state* state, grpc_closure* notify) {
   external_state_watcher* w;
 
   if (state == nullptr) {
@@ -495,8 +498,8 @@
     for (w = c->root_external_state_watcher.next;
          w != &c->root_external_state_watcher; w = w->next) {
       if (w->notify == notify) {
-        grpc_connectivity_state_notify_on_state_change(
-            exec_ctx, &c->state_tracker, nullptr, &w->closure);
+        grpc_connectivity_state_notify_on_state_change(&c->state_tracker,
+                                                       nullptr, &w->closure);
       }
     }
     gpr_mu_unlock(&c->mu);
@@ -508,31 +511,28 @@
     GRPC_CLOSURE_INIT(&w->closure, on_external_state_watcher_done, w,
                       grpc_schedule_on_exec_ctx);
     if (interested_parties != nullptr) {
-      grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
-                                       interested_parties);
+      grpc_pollset_set_add_pollset_set(c->pollset_set, interested_parties);
     }
     GRPC_SUBCHANNEL_WEAK_REF(c, "external_state_watcher");
     gpr_mu_lock(&c->mu);
     w->next = &c->root_external_state_watcher;
     w->prev = w->next->prev;
     w->next->prev = w->prev->next = w;
-    grpc_connectivity_state_notify_on_state_change(exec_ctx, &c->state_tracker,
-                                                   state, &w->closure);
-    maybe_start_connecting_locked(exec_ctx, c);
+    grpc_connectivity_state_notify_on_state_change(&c->state_tracker, state,
+                                                   &w->closure);
+    maybe_start_connecting_locked(c);
     gpr_mu_unlock(&c->mu);
   }
 }
 
 void grpc_connected_subchannel_process_transport_op(
-    grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
-    grpc_transport_op* op) {
+    grpc_connected_subchannel* con, grpc_transport_op* op) {
   grpc_channel_stack* channel_stack = CHANNEL_STACK_FROM_CONNECTION(con);
   grpc_channel_element* top_elem = grpc_channel_stack_element(channel_stack, 0);
-  top_elem->filter->start_transport_op(exec_ctx, top_elem, op);
+  top_elem->filter->start_transport_op(top_elem, op);
 }
 
-static void subchannel_on_child_state_changed(grpc_exec_ctx* exec_ctx, void* p,
-                                              grpc_error* error) {
+static void subchannel_on_child_state_changed(void* p, grpc_error* error) {
   state_watcher* sw = (state_watcher*)p;
   grpc_subchannel* c = sw->subchannel;
   gpr_mu* mu = &c->mu;
@@ -544,24 +544,22 @@
     /* any errors on a subchannel ==> we're done, create a new one */
     sw->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
   }
-  grpc_connectivity_state_set(exec_ctx, &c->state_tracker,
-                              sw->connectivity_state, GRPC_ERROR_REF(error),
-                              "reflect_child");
+  grpc_connectivity_state_set(&c->state_tracker, sw->connectivity_state,
+                              GRPC_ERROR_REF(error), "reflect_child");
   if (sw->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
     grpc_connected_subchannel_notify_on_state_change(
-        exec_ctx, GET_CONNECTED_SUBCHANNEL(c, no_barrier), nullptr,
+        GET_CONNECTED_SUBCHANNEL(c, no_barrier), nullptr,
         &sw->connectivity_state, &sw->closure);
     GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
     sw = nullptr;
   }
 
   gpr_mu_unlock(mu);
-  GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "state_watcher");
+  GRPC_SUBCHANNEL_WEAK_UNREF(c, "state_watcher");
   gpr_free(sw);
 }
 
-static void connected_subchannel_state_op(grpc_exec_ctx* exec_ctx,
-                                          grpc_connected_subchannel* con,
+static void connected_subchannel_state_op(grpc_connected_subchannel* con,
                                           grpc_pollset_set* interested_parties,
                                           grpc_connectivity_state* state,
                                           grpc_closure* closure) {
@@ -571,29 +569,27 @@
   op->on_connectivity_state_change = closure;
   op->bind_pollset_set = interested_parties;
   elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
-  elem->filter->start_transport_op(exec_ctx, elem, op);
+  elem->filter->start_transport_op(elem, op);
 }
 
 void grpc_connected_subchannel_notify_on_state_change(
-    grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
-    grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
-    grpc_closure* closure) {
-  connected_subchannel_state_op(exec_ctx, con, interested_parties, state,
-                                closure);
+    grpc_connected_subchannel* con, grpc_pollset_set* interested_parties,
+    grpc_connectivity_state* state, grpc_closure* closure) {
+  connected_subchannel_state_op(con, interested_parties, state, closure);
 }
 
-void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
-                                    grpc_connected_subchannel* con,
-                                    grpc_closure* closure) {
+void grpc_connected_subchannel_ping(grpc_connected_subchannel* con,
+                                    grpc_closure* on_initiate,
+                                    grpc_closure* on_ack) {
   grpc_transport_op* op = grpc_make_transport_op(nullptr);
   grpc_channel_element* elem;
-  op->send_ping = closure;
+  op->send_ping.on_initiate = on_initiate;
+  op->send_ping.on_ack = on_ack;
   elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CONNECTION(con), 0);
-  elem->filter->start_transport_op(exec_ctx, elem, op);
+  elem->filter->start_transport_op(elem, op);
 }
 
-static bool publish_transport_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_subchannel* c) {
+static bool publish_transport_locked(grpc_subchannel* c) {
   grpc_connected_subchannel* con;
   grpc_channel_stack* stk;
   state_watcher* sw_subchannel;
@@ -601,19 +597,18 @@
   /* construct channel stack */
   grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
   grpc_channel_stack_builder_set_channel_arguments(
-      exec_ctx, builder, c->connecting_result.channel_args);
+      builder, c->connecting_result.channel_args);
   grpc_channel_stack_builder_set_transport(builder,
                                            c->connecting_result.transport);
 
-  if (!grpc_channel_init_create_stack(exec_ctx, builder,
-                                      GRPC_CLIENT_SUBCHANNEL)) {
-    grpc_channel_stack_builder_destroy(exec_ctx, builder);
+  if (!grpc_channel_init_create_stack(builder, GRPC_CLIENT_SUBCHANNEL)) {
+    grpc_channel_stack_builder_destroy(builder);
     return false;
   }
   grpc_error* error = grpc_channel_stack_builder_finish(
-      exec_ctx, builder, 0, 1, connection_destroy, nullptr, (void**)&con);
+      builder, 0, 1, connection_destroy, nullptr, (void**)&con);
   if (error != GRPC_ERROR_NONE) {
-    grpc_transport_destroy(exec_ctx, c->connecting_result.transport);
+    grpc_transport_destroy(c->connecting_result.transport);
     gpr_log(GPR_ERROR, "error initializing subchannel stack: %s",
             grpc_error_string(error));
     GRPC_ERROR_UNREF(error);
@@ -631,7 +626,7 @@
 
   if (c->disconnected) {
     gpr_free(sw_subchannel);
-    grpc_channel_stack_destroy(exec_ctx, stk);
+    grpc_channel_stack_destroy(stk);
     gpr_free(con);
     return false;
   }
@@ -647,19 +642,18 @@
   /* setup subchannel watching connected subchannel for changes; subchannel
      ref for connecting is donated to the state watcher */
   GRPC_SUBCHANNEL_WEAK_REF(c, "state_watcher");
-  GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+  GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
   grpc_connected_subchannel_notify_on_state_change(
-      exec_ctx, con, c->pollset_set, &sw_subchannel->connectivity_state,
+      con, c->pollset_set, &sw_subchannel->connectivity_state,
       &sw_subchannel->closure);
 
   /* signal completion */
-  grpc_connectivity_state_set(exec_ctx, &c->state_tracker, GRPC_CHANNEL_READY,
+  grpc_connectivity_state_set(&c->state_tracker, GRPC_CHANNEL_READY,
                               GRPC_ERROR_NONE, "connected");
   return true;
 }
 
-static void subchannel_connected(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void subchannel_connected(void* arg, grpc_error* error) {
   grpc_subchannel* c = (grpc_subchannel*)arg;
   grpc_channel_args* delete_channel_args = c->connecting_result.channel_args;
 
@@ -667,13 +661,13 @@
   gpr_mu_lock(&c->mu);
   c->connecting = false;
   if (c->connecting_result.transport != nullptr &&
-      publish_transport_locked(exec_ctx, c)) {
+      publish_transport_locked(c)) {
     /* do nothing, transport was published */
   } else if (c->disconnected) {
-    GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
   } else {
     grpc_connectivity_state_set(
-        exec_ctx, &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+        &c->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
         grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                                "Connect Failed", &error, 1),
                            GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
@@ -682,27 +676,26 @@
     const char* errmsg = grpc_error_string(error);
     gpr_log(GPR_INFO, "Connect failed: %s", errmsg);
 
-    maybe_start_connecting_locked(exec_ctx, c);
-    GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connecting");
+    maybe_start_connecting_locked(c);
+    GRPC_SUBCHANNEL_WEAK_UNREF(c, "connecting");
   }
   gpr_mu_unlock(&c->mu);
-  GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, c, "connected");
-  grpc_channel_args_destroy(exec_ctx, delete_channel_args);
+  GRPC_SUBCHANNEL_WEAK_UNREF(c, "connected");
+  grpc_channel_args_destroy(delete_channel_args);
 }
 
 /*
  * grpc_subchannel_call implementation
  */
 
-static void subchannel_call_destroy(grpc_exec_ctx* exec_ctx, void* call,
-                                    grpc_error* error) {
+static void subchannel_call_destroy(void* call, grpc_error* error) {
   grpc_subchannel_call* c = (grpc_subchannel_call*)call;
   GPR_ASSERT(c->schedule_closure_after_destroy != nullptr);
   GPR_TIMER_BEGIN("grpc_subchannel_call_unref.destroy", 0);
   grpc_connected_subchannel* connection = c->connection;
-  grpc_call_stack_destroy(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
+  grpc_call_stack_destroy(SUBCHANNEL_CALL_TO_CALL_STACK(c), nullptr,
                           c->schedule_closure_after_destroy);
-  GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, connection, "subchannel_call");
+  GRPC_CONNECTED_SUBCHANNEL_UNREF(connection, "subchannel_call");
   GPR_TIMER_END("grpc_subchannel_call_unref.destroy", 0);
 }
 
@@ -718,20 +711,18 @@
   GRPC_CALL_STACK_REF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
 }
 
-void grpc_subchannel_call_unref(grpc_exec_ctx* exec_ctx,
-                                grpc_subchannel_call* c
-                                    GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
-  GRPC_CALL_STACK_UNREF(exec_ctx, SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
+void grpc_subchannel_call_unref(
+    grpc_subchannel_call* c GRPC_SUBCHANNEL_REF_EXTRA_ARGS) {
+  GRPC_CALL_STACK_UNREF(SUBCHANNEL_CALL_TO_CALL_STACK(c), REF_REASON);
 }
 
-void grpc_subchannel_call_process_op(grpc_exec_ctx* exec_ctx,
-                                     grpc_subchannel_call* call,
+void grpc_subchannel_call_process_op(grpc_subchannel_call* call,
                                      grpc_transport_stream_op_batch* batch) {
   GPR_TIMER_BEGIN("grpc_subchannel_call_process_op", 0);
   grpc_call_stack* call_stack = SUBCHANNEL_CALL_TO_CALL_STACK(call);
   grpc_call_element* top_elem = grpc_call_stack_element(call_stack, 0);
   GRPC_CALL_LOG_OP(GPR_INFO, top_elem, batch);
-  top_elem->filter->start_transport_stream_op_batch(exec_ctx, top_elem, batch);
+  top_elem->filter->start_transport_stream_op_batch(top_elem, batch);
   GPR_TIMER_END("grpc_subchannel_call_process_op", 0);
 }
 
@@ -746,7 +737,7 @@
 }
 
 grpc_error* grpc_connected_subchannel_create_call(
-    grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* con,
+    grpc_connected_subchannel* con,
     const grpc_connected_subchannel_call_args* args,
     grpc_subchannel_call** call) {
   grpc_channel_stack* chanstk = CHANNEL_STACK_FROM_CONNECTION(con);
@@ -764,14 +755,14 @@
       args->arena,        /* arena */
       args->call_combiner /* call_combiner */
   };
-  grpc_error* error = grpc_call_stack_init(
-      exec_ctx, chanstk, 1, subchannel_call_destroy, *call, &call_args);
+  grpc_error* error = grpc_call_stack_init(chanstk, 1, subchannel_call_destroy,
+                                           *call, &call_args);
   if (error != GRPC_ERROR_NONE) {
     const char* error_string = grpc_error_string(error);
     gpr_log(GPR_ERROR, "error: %s", error_string);
     return error;
   }
-  grpc_call_stack_set_pollset_or_pollset_set(exec_ctx, callstk, args->pollent);
+  grpc_call_stack_set_pollset_or_pollset_set(callstk, args->pollent);
   return GRPC_ERROR_NONE;
 }
 
@@ -780,21 +771,20 @@
   return SUBCHANNEL_CALL_TO_CALL_STACK(subchannel_call);
 }
 
-static void grpc_uri_to_sockaddr(grpc_exec_ctx* exec_ctx, const char* uri_str,
+static void grpc_uri_to_sockaddr(const char* uri_str,
                                  grpc_resolved_address* addr) {
-  grpc_uri* uri = grpc_uri_parse(exec_ctx, uri_str, 0 /* suppress_errors */);
+  grpc_uri* uri = grpc_uri_parse(uri_str, 0 /* suppress_errors */);
   GPR_ASSERT(uri != nullptr);
   if (!grpc_parse_uri(uri, addr)) memset(addr, 0, sizeof(*addr));
   grpc_uri_destroy(uri);
 }
 
-void grpc_get_subchannel_address_arg(grpc_exec_ctx* exec_ctx,
-                                     const grpc_channel_args* args,
+void grpc_get_subchannel_address_arg(const grpc_channel_args* args,
                                      grpc_resolved_address* addr) {
   const char* addr_uri_str = grpc_get_subchannel_address_uri_arg(args);
   memset(addr, 0, sizeof(*addr));
   if (*addr_uri_str != '\0') {
-    grpc_uri_to_sockaddr(exec_ctx, addr_uri_str, addr);
+    grpc_uri_to_sockaddr(addr_uri_str, addr);
   }
 }
 
diff --git a/src/core/ext/filters/client_channel/subchannel.h b/src/core/ext/filters/client_channel/subchannel.h
index 970f182..9d34fff 100644
--- a/src/core/ext/filters/client_channel/subchannel.h
+++ b/src/core/ext/filters/client_channel/subchannel.h
@@ -26,10 +26,6 @@
 #include "src/core/lib/transport/connectivity_state.h"
 #include "src/core/lib/transport/metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // Channel arg containing a grpc_resolved_address to connect to.
 #define GRPC_ARG_SUBCHANNEL_ADDRESS "grpc.subchannel_address"
 
@@ -46,36 +42,34 @@
   grpc_subchannel_ref((p), __FILE__, __LINE__, (r))
 #define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
   grpc_subchannel_ref_from_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_UNREF(cl, p, r) \
-  grpc_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_UNREF(p, r) \
+  grpc_subchannel_unref((p), __FILE__, __LINE__, (r))
 #define GRPC_SUBCHANNEL_WEAK_REF(p, r) \
   grpc_subchannel_weak_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
-  grpc_subchannel_weak_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) \
+  grpc_subchannel_weak_unref((p), __FILE__, __LINE__, (r))
 #define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) \
   grpc_connected_subchannel_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
-  grpc_connected_subchannel_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
+  grpc_connected_subchannel_unref((p), __FILE__, __LINE__, (r))
 #define GRPC_SUBCHANNEL_CALL_REF(p, r) \
   grpc_subchannel_call_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
-  grpc_subchannel_call_unref((cl), (p), __FILE__, __LINE__, (r))
+#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) \
+  grpc_subchannel_call_unref((p), __FILE__, __LINE__, (r))
 #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS \
   , const char *file, int line, const char *reason
 #else
 #define GRPC_SUBCHANNEL_REF(p, r) grpc_subchannel_ref((p))
 #define GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(p, r) \
   grpc_subchannel_ref_from_weak_ref((p))
-#define GRPC_SUBCHANNEL_UNREF(cl, p, r) grpc_subchannel_unref((cl), (p))
+#define GRPC_SUBCHANNEL_UNREF(p, r) grpc_subchannel_unref((p))
 #define GRPC_SUBCHANNEL_WEAK_REF(p, r) grpc_subchannel_weak_ref((p))
-#define GRPC_SUBCHANNEL_WEAK_UNREF(cl, p, r) \
-  grpc_subchannel_weak_unref((cl), (p))
+#define GRPC_SUBCHANNEL_WEAK_UNREF(p, r) grpc_subchannel_weak_unref((p))
 #define GRPC_CONNECTED_SUBCHANNEL_REF(p, r) grpc_connected_subchannel_ref((p))
-#define GRPC_CONNECTED_SUBCHANNEL_UNREF(cl, p, r) \
-  grpc_connected_subchannel_unref((cl), (p))
+#define GRPC_CONNECTED_SUBCHANNEL_UNREF(p, r) \
+  grpc_connected_subchannel_unref((p))
 #define GRPC_SUBCHANNEL_CALL_REF(p, r) grpc_subchannel_call_ref((p))
-#define GRPC_SUBCHANNEL_CALL_UNREF(cl, p, r) \
-  grpc_subchannel_call_unref((cl), (p))
+#define GRPC_SUBCHANNEL_CALL_UNREF(p, r) grpc_subchannel_call_unref((p))
 #define GRPC_SUBCHANNEL_REF_EXTRA_ARGS
 #endif
 
@@ -83,24 +77,20 @@
     grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
 grpc_subchannel* grpc_subchannel_ref_from_weak_ref(
     grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_unref(grpc_exec_ctx* exec_ctx,
-                           grpc_subchannel* channel
-                               GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_unref(
+    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
 grpc_subchannel* grpc_subchannel_weak_ref(
     grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_weak_unref(grpc_exec_ctx* exec_ctx,
-                                grpc_subchannel* channel
-                                    GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_weak_unref(
+    grpc_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
 grpc_connected_subchannel* grpc_connected_subchannel_ref(
     grpc_connected_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_connected_subchannel_unref(grpc_exec_ctx* exec_ctx,
-                                     grpc_connected_subchannel* channel
-                                         GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_connected_subchannel_unref(
+    grpc_connected_subchannel* channel GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
 void grpc_subchannel_call_ref(
     grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
-void grpc_subchannel_call_unref(grpc_exec_ctx* exec_ctx,
-                                grpc_subchannel_call* call
-                                    GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
+void grpc_subchannel_call_unref(
+    grpc_subchannel_call* call GRPC_SUBCHANNEL_REF_EXTRA_ARGS);
 
 /** construct a subchannel call */
 typedef struct {
@@ -114,14 +104,13 @@
 } grpc_connected_subchannel_call_args;
 
 grpc_error* grpc_connected_subchannel_create_call(
-    grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* connected_subchannel,
+    grpc_connected_subchannel* connected_subchannel,
     const grpc_connected_subchannel_call_args* args,
     grpc_subchannel_call** subchannel_call);
 
 /** process a transport level op */
 void grpc_connected_subchannel_process_transport_op(
-    grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* subchannel,
-    grpc_transport_op* op);
+    grpc_connected_subchannel* subchannel, grpc_transport_op* op);
 
 /** poll the current connectivity state of a channel */
 grpc_connectivity_state grpc_subchannel_check_connectivity(
@@ -130,16 +119,14 @@
 /** Calls notify when the connectivity state of a channel becomes different
     from *state.  Updates *state with the new state of the channel. */
 void grpc_subchannel_notify_on_state_change(
-    grpc_exec_ctx* exec_ctx, grpc_subchannel* channel,
-    grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
-    grpc_closure* notify);
+    grpc_subchannel* channel, grpc_pollset_set* interested_parties,
+    grpc_connectivity_state* state, grpc_closure* notify);
 void grpc_connected_subchannel_notify_on_state_change(
-    grpc_exec_ctx* exec_ctx, grpc_connected_subchannel* channel,
-    grpc_pollset_set* interested_parties, grpc_connectivity_state* state,
-    grpc_closure* notify);
-void grpc_connected_subchannel_ping(grpc_exec_ctx* exec_ctx,
-                                    grpc_connected_subchannel* channel,
-                                    grpc_closure* notify);
+    grpc_connected_subchannel* channel, grpc_pollset_set* interested_parties,
+    grpc_connectivity_state* state, grpc_closure* notify);
+void grpc_connected_subchannel_ping(grpc_connected_subchannel* channel,
+                                    grpc_closure* on_initiate,
+                                    grpc_closure* on_ack);
 
 /** retrieve the grpc_connected_subchannel - or NULL if called before
     the subchannel becomes connected */
@@ -151,8 +138,7 @@
     const grpc_subchannel* subchannel);
 
 /** continue processing a transport op */
-void grpc_subchannel_call_process_op(grpc_exec_ctx* exec_ctx,
-                                     grpc_subchannel_call* subchannel_call,
+void grpc_subchannel_call_process_op(grpc_subchannel_call* subchannel_call,
                                      grpc_transport_stream_op_batch* op);
 
 /** Must be called once per call. Sets the 'then_schedule_closure' argument for
@@ -176,13 +162,11 @@
 };
 
 /** create a subchannel given a connector */
-grpc_subchannel* grpc_subchannel_create(grpc_exec_ctx* exec_ctx,
-                                        grpc_connector* connector,
+grpc_subchannel* grpc_subchannel_create(grpc_connector* connector,
                                         const grpc_subchannel_args* args);
 
 /// Sets \a addr from \a args.
-void grpc_get_subchannel_address_arg(grpc_exec_ctx* exec_ctx,
-                                     const grpc_channel_args* args,
+void grpc_get_subchannel_address_arg(const grpc_channel_args* args,
                                      grpc_resolved_address* addr);
 
 /// Returns the URI string for the address to connect to.
@@ -192,8 +176,4 @@
 /// Caller is responsible for freeing the string.
 grpc_arg grpc_create_subchannel_address_arg(const grpc_resolved_address* addr);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_H */
diff --git a/src/core/ext/filters/client_channel/subchannel_index.cc b/src/core/ext/filters/client_channel/subchannel_index.cc
index 1624643..052b047 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.cc
+++ b/src/core/ext/filters/client_channel/subchannel_index.cc
@@ -81,16 +81,14 @@
   return grpc_channel_args_compare(a->args.args, b->args.args);
 }
 
-void grpc_subchannel_key_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_subchannel_key* k) {
+void grpc_subchannel_key_destroy(grpc_subchannel_key* k) {
   gpr_free((grpc_channel_args*)k->args.filters);
-  grpc_channel_args_destroy(exec_ctx, (grpc_channel_args*)k->args.args);
+  grpc_channel_args_destroy((grpc_channel_args*)k->args.args);
   gpr_free(k);
 }
 
 static void sck_avl_destroy(void* p, void* user_data) {
-  grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
-  grpc_subchannel_key_destroy(exec_ctx, (grpc_subchannel_key*)p);
+  grpc_subchannel_key_destroy((grpc_subchannel_key*)p);
 }
 
 static void* sck_avl_copy(void* p, void* unused) {
@@ -103,8 +101,7 @@
 }
 
 static void scv_avl_destroy(void* p, void* user_data) {
-  grpc_exec_ctx* exec_ctx = (grpc_exec_ctx*)user_data;
-  GRPC_SUBCHANNEL_WEAK_UNREF(exec_ctx, (grpc_subchannel*)p, "subchannel_index");
+  GRPC_SUBCHANNEL_WEAK_UNREF((grpc_subchannel*)p, "subchannel_index");
 }
 
 static void* scv_avl_copy(void* p, void* unused) {
@@ -135,32 +132,29 @@
 
 void grpc_subchannel_index_unref(void) {
   if (gpr_unref(&g_refcount)) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
     gpr_mu_destroy(&g_mu);
-    gpr_avl_unref(g_subchannel_index, &exec_ctx);
-    grpc_exec_ctx_finish(&exec_ctx);
+    gpr_avl_unref(g_subchannel_index, grpc_core::ExecCtx::Get());
   }
 }
 
 void grpc_subchannel_index_ref(void) { gpr_ref_non_zero(&g_refcount); }
 
-grpc_subchannel* grpc_subchannel_index_find(grpc_exec_ctx* exec_ctx,
-                                            grpc_subchannel_key* key) {
+grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key) {
   // Lock, and take a reference to the subchannel index.
   // We don't need to do the search under a lock as avl's are immutable.
   gpr_mu_lock(&g_mu);
-  gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx);
+  gpr_avl index = gpr_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
   gpr_mu_unlock(&g_mu);
 
   grpc_subchannel* c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(
-      (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx), "index_find");
-  gpr_avl_unref(index, exec_ctx);
+      (grpc_subchannel*)gpr_avl_get(index, key, grpc_core::ExecCtx::Get()),
+      "index_find");
+  gpr_avl_unref(index, grpc_core::ExecCtx::Get());
 
   return c;
 }
 
-grpc_subchannel* grpc_subchannel_index_register(grpc_exec_ctx* exec_ctx,
-                                                grpc_subchannel_key* key,
+grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
                                                 grpc_subchannel* constructed) {
   grpc_subchannel* c = nullptr;
   bool need_to_unref_constructed = false;
@@ -171,11 +165,11 @@
     // Compare and swap loop:
     // - take a reference to the current index
     gpr_mu_lock(&g_mu);
-    gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx);
+    gpr_avl index = gpr_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
     gpr_mu_unlock(&g_mu);
 
     // - Check to see if a subchannel already exists
-    c = (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx);
+    c = (grpc_subchannel*)gpr_avl_get(index, key, grpc_core::ExecCtx::Get());
     if (c != nullptr) {
       c = GRPC_SUBCHANNEL_REF_FROM_WEAK_REF(c, "index_register");
     }
@@ -184,9 +178,11 @@
       need_to_unref_constructed = true;
     } else {
       // no -> update the avl and compare/swap
-      gpr_avl updated = gpr_avl_add(
-          gpr_avl_ref(index, exec_ctx), subchannel_key_copy(key),
-          GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"), exec_ctx);
+      gpr_avl updated =
+          gpr_avl_add(gpr_avl_ref(index, grpc_core::ExecCtx::Get()),
+                      subchannel_key_copy(key),
+                      GRPC_SUBCHANNEL_WEAK_REF(constructed, "index_register"),
+                      grpc_core::ExecCtx::Get());
 
       // it may happen (but it's expected to be unlikely)
       // that some other thread has changed the index:
@@ -198,41 +194,42 @@
       }
       gpr_mu_unlock(&g_mu);
 
-      gpr_avl_unref(updated, exec_ctx);
+      gpr_avl_unref(updated, grpc_core::ExecCtx::Get());
     }
-    gpr_avl_unref(index, exec_ctx);
+    gpr_avl_unref(index, grpc_core::ExecCtx::Get());
   }
 
   if (need_to_unref_constructed) {
-    GRPC_SUBCHANNEL_UNREF(exec_ctx, constructed, "index_register");
+    GRPC_SUBCHANNEL_UNREF(constructed, "index_register");
   }
 
   return c;
 }
 
-void grpc_subchannel_index_unregister(grpc_exec_ctx* exec_ctx,
-                                      grpc_subchannel_key* key,
+void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
                                       grpc_subchannel* constructed) {
   bool done = false;
   while (!done) {
     // Compare and swap loop:
     // - take a reference to the current index
     gpr_mu_lock(&g_mu);
-    gpr_avl index = gpr_avl_ref(g_subchannel_index, exec_ctx);
+    gpr_avl index = gpr_avl_ref(g_subchannel_index, grpc_core::ExecCtx::Get());
     gpr_mu_unlock(&g_mu);
 
     // Check to see if this key still refers to the previously
     // registered subchannel
-    grpc_subchannel* c = (grpc_subchannel*)gpr_avl_get(index, key, exec_ctx);
+    grpc_subchannel* c =
+        (grpc_subchannel*)gpr_avl_get(index, key, grpc_core::ExecCtx::Get());
     if (c != constructed) {
-      gpr_avl_unref(index, exec_ctx);
+      gpr_avl_unref(index, grpc_core::ExecCtx::Get());
       break;
     }
 
     // compare and swap the update (some other thread may have
     // mutated the index behind us)
     gpr_avl updated =
-        gpr_avl_remove(gpr_avl_ref(index, exec_ctx), key, exec_ctx);
+        gpr_avl_remove(gpr_avl_ref(index, grpc_core::ExecCtx::Get()), key,
+                       grpc_core::ExecCtx::Get());
 
     gpr_mu_lock(&g_mu);
     if (index.root == g_subchannel_index.root) {
@@ -241,8 +238,8 @@
     }
     gpr_mu_unlock(&g_mu);
 
-    gpr_avl_unref(updated, exec_ctx);
-    gpr_avl_unref(index, exec_ctx);
+    gpr_avl_unref(updated, grpc_core::ExecCtx::Get());
+    gpr_avl_unref(index, grpc_core::ExecCtx::Get());
   }
 }
 
diff --git a/src/core/ext/filters/client_channel/subchannel_index.h b/src/core/ext/filters/client_channel/subchannel_index.h
index 47f9c7b..bd160a3 100644
--- a/src/core/ext/filters/client_channel/subchannel_index.h
+++ b/src/core/ext/filters/client_channel/subchannel_index.h
@@ -21,10 +21,6 @@
 
 #include "src/core/ext/filters/client_channel/subchannel.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** \file Provides an index of active subchannels so that they can be
     shared amongst channels */
 
@@ -33,26 +29,22 @@
     const grpc_subchannel_args* args);
 
 /** Destroy a subchannel key */
-void grpc_subchannel_key_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_subchannel_key* key);
+void grpc_subchannel_key_destroy(grpc_subchannel_key* key);
 
 /** Given a subchannel key, find the subchannel registered for it.
     Returns NULL if no such channel exists.
     Thread-safe. */
-grpc_subchannel* grpc_subchannel_index_find(grpc_exec_ctx* exec_ctx,
-                                            grpc_subchannel_key* key);
+grpc_subchannel* grpc_subchannel_index_find(grpc_subchannel_key* key);
 
 /** Register a subchannel against a key.
     Takes ownership of \a constructed.
     Returns the registered subchannel. This may be different from
     \a constructed in the case of a registration race. */
-grpc_subchannel* grpc_subchannel_index_register(grpc_exec_ctx* exec_ctx,
-                                                grpc_subchannel_key* key,
+grpc_subchannel* grpc_subchannel_index_register(grpc_subchannel_key* key,
                                                 grpc_subchannel* constructed);
 
 /** Remove \a constructed as the registered subchannel for \a key. */
-void grpc_subchannel_index_unregister(grpc_exec_ctx* exec_ctx,
-                                      grpc_subchannel_key* key,
+void grpc_subchannel_index_unregister(grpc_subchannel_key* key,
                                       grpc_subchannel* constructed);
 
 int grpc_subchannel_key_compare(const grpc_subchannel_key* a,
@@ -82,8 +74,4 @@
  * force_creation set. */
 void grpc_subchannel_index_test_only_set_force_creation(bool force_creation);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_SUBCHANNEL_INDEX_H */
diff --git a/src/core/ext/filters/client_channel/uri_parser.cc b/src/core/ext/filters/client_channel/uri_parser.cc
index b76dcbe..3428f4b 100644
--- a/src/core/ext/filters/client_channel/uri_parser.cc
+++ b/src/core/ext/filters/client_channel/uri_parser.cc
@@ -56,8 +56,8 @@
 }
 
 /** Returns a copy of percent decoded \a src[begin, end) */
-static char* decode_and_copy_component(grpc_exec_ctx* exec_ctx, const char* src,
-                                       size_t begin, size_t end) {
+static char* decode_and_copy_component(const char* src, size_t begin,
+                                       size_t end) {
   grpc_slice component =
       (begin == NOT_SET || end == NOT_SET)
           ? grpc_empty_slice()
@@ -65,8 +65,8 @@
   grpc_slice decoded_component =
       grpc_permissive_percent_decode_slice(component);
   char* out = grpc_dump_slice(decoded_component, GPR_DUMP_ASCII);
-  grpc_slice_unref_internal(exec_ctx, component);
-  grpc_slice_unref_internal(exec_ctx, decoded_component);
+  grpc_slice_unref_internal(component);
+  grpc_slice_unref_internal(decoded_component);
   return out;
 }
 
@@ -184,8 +184,7 @@
   }
 }
 
-grpc_uri* grpc_uri_parse(grpc_exec_ctx* exec_ctx, const char* uri_text,
-                         bool suppress_errors) {
+grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors) {
   grpc_uri* uri;
   size_t scheme_begin = 0;
   size_t scheme_end = NOT_SET;
@@ -273,16 +272,13 @@
   }
 
   uri = (grpc_uri*)gpr_zalloc(sizeof(*uri));
-  uri->scheme =
-      decode_and_copy_component(exec_ctx, uri_text, scheme_begin, scheme_end);
-  uri->authority = decode_and_copy_component(exec_ctx, uri_text,
-                                             authority_begin, authority_end);
-  uri->path =
-      decode_and_copy_component(exec_ctx, uri_text, path_begin, path_end);
-  uri->query =
-      decode_and_copy_component(exec_ctx, uri_text, query_begin, query_end);
-  uri->fragment = decode_and_copy_component(exec_ctx, uri_text, fragment_begin,
-                                            fragment_end);
+  uri->scheme = decode_and_copy_component(uri_text, scheme_begin, scheme_end);
+  uri->authority =
+      decode_and_copy_component(uri_text, authority_begin, authority_end);
+  uri->path = decode_and_copy_component(uri_text, path_begin, path_end);
+  uri->query = decode_and_copy_component(uri_text, query_begin, query_end);
+  uri->fragment =
+      decode_and_copy_component(uri_text, fragment_begin, fragment_end);
   parse_query_parts(uri);
 
   return uri;
diff --git a/src/core/ext/filters/client_channel/uri_parser.h b/src/core/ext/filters/client_channel/uri_parser.h
index cd877ad..24ff06c 100644
--- a/src/core/ext/filters/client_channel/uri_parser.h
+++ b/src/core/ext/filters/client_channel/uri_parser.h
@@ -22,10 +22,6 @@
 #include <stddef.h>
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   char* scheme;
   char* authority;
@@ -41,8 +37,7 @@
 } grpc_uri;
 
 /** parse a uri, return NULL on failure */
-grpc_uri* grpc_uri_parse(grpc_exec_ctx* exec_ctx, const char* uri_text,
-                         bool suppress_errors);
+grpc_uri* grpc_uri_parse(const char* uri_text, bool suppress_errors);
 
 /** return the part of a query string after the '=' in "?key=xxx&...", or NULL
  * if key is not present */
@@ -51,8 +46,4 @@
 /** destroy a uri */
 void grpc_uri_destroy(grpc_uri* uri);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_CLIENT_CHANNEL_URI_PARSER_H */
diff --git a/src/core/ext/filters/deadline/deadline_filter.cc b/src/core/ext/filters/deadline/deadline_filter.cc
index 849ce71..c430f3d 100644
--- a/src/core/ext/filters/deadline/deadline_filter.cc
+++ b/src/core/ext/filters/deadline/deadline_filter.cc
@@ -36,18 +36,16 @@
 
 // The on_complete callback used when sending a cancel_error batch down the
 // filter stack.  Yields the call combiner when the batch returns.
-static void yield_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_error* ignored) {
+static void yield_call_combiner(void* arg, grpc_error* ignored) {
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
-  GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+  GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
                           "got on_complete from cancel_stream batch");
-  GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack, "deadline_timer");
+  GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
 }
 
 // This is called via the call combiner, so access to deadline_state is
 // synchronized.
-static void send_cancel_op_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
-                                            grpc_error* error) {
+static void send_cancel_op_in_call_combiner(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
   grpc_transport_stream_op_batch* batch = grpc_make_transport_stream_op(
@@ -55,37 +53,34 @@
                         deadline_state, grpc_schedule_on_exec_ctx));
   batch->cancel_stream = true;
   batch->payload->cancel_stream.cancel_error = GRPC_ERROR_REF(error);
-  elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+  elem->filter->start_transport_stream_op_batch(elem, batch);
 }
 
 // Timer callback.
-static void timer_callback(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
+static void timer_callback(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
   if (error != GRPC_ERROR_CANCELLED) {
     error = grpc_error_set_int(
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Deadline Exceeded"),
         GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_DEADLINE_EXCEEDED);
-    grpc_call_combiner_cancel(exec_ctx, deadline_state->call_combiner,
+    grpc_call_combiner_cancel(deadline_state->call_combiner,
                               GRPC_ERROR_REF(error));
     GRPC_CLOSURE_INIT(&deadline_state->timer_callback,
                       send_cancel_op_in_call_combiner, elem,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
+    GRPC_CALL_COMBINER_START(deadline_state->call_combiner,
                              &deadline_state->timer_callback, error,
                              "deadline exceeded -- sending cancel_stream op");
   } else {
-    GRPC_CALL_STACK_UNREF(exec_ctx, deadline_state->call_stack,
-                          "deadline_timer");
+    GRPC_CALL_STACK_UNREF(deadline_state->call_stack, "deadline_timer");
   }
 }
 
 // Starts the deadline timer.
 // This is called via the call combiner, so access to deadline_state is
 // synchronized.
-static void start_timer_if_needed(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static void start_timer_if_needed(grpc_call_element* elem,
                                   grpc_millis deadline) {
   if (deadline == GRPC_MILLIS_INF_FUTURE) {
     return;
@@ -113,17 +108,16 @@
   }
   GPR_ASSERT(closure != nullptr);
   GRPC_CALL_STACK_REF(deadline_state->call_stack, "deadline_timer");
-  grpc_timer_init(exec_ctx, &deadline_state->timer, deadline, closure);
+  grpc_timer_init(&deadline_state->timer, deadline, closure);
 }
 
 // Cancels the deadline timer.
 // This is called via the call combiner, so access to deadline_state is
 // synchronized.
-static void cancel_timer_if_needed(grpc_exec_ctx* exec_ctx,
-                                   grpc_deadline_state* deadline_state) {
+static void cancel_timer_if_needed(grpc_deadline_state* deadline_state) {
   if (deadline_state->timer_state == GRPC_DEADLINE_STATE_PENDING) {
     deadline_state->timer_state = GRPC_DEADLINE_STATE_FINISHED;
-    grpc_timer_cancel(exec_ctx, &deadline_state->timer);
+    grpc_timer_cancel(&deadline_state->timer);
   } else {
     // timer was either in STATE_INITAL (nothing to cancel)
     // OR in STATE_FINISHED (again nothing to cancel)
@@ -131,12 +125,11 @@
 }
 
 // Callback run when the call is complete.
-static void on_complete(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_complete(void* arg, grpc_error* error) {
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)arg;
-  cancel_timer_if_needed(exec_ctx, deadline_state);
+  cancel_timer_if_needed(deadline_state);
   // Invoke the next callback.
-  GRPC_CLOSURE_RUN(exec_ctx, deadline_state->next_on_complete,
-                   GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(deadline_state->next_on_complete, GRPC_ERROR_REF(error));
 }
 
 // Inject our own on_complete callback into op.
@@ -156,8 +149,7 @@
   grpc_millis deadline;
   grpc_closure closure;
 };
-static void start_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void start_timer_after_init(void* arg, grpc_error* error) {
   struct start_timer_after_init_state* state =
       (struct start_timer_after_init_state*)arg;
   grpc_deadline_state* deadline_state =
@@ -166,18 +158,18 @@
     // We are initially called without holding the call combiner, so we
     // need to bounce ourselves into it.
     state->in_call_combiner = true;
-    GRPC_CALL_COMBINER_START(exec_ctx, deadline_state->call_combiner,
-                             &state->closure, GRPC_ERROR_REF(error),
+    GRPC_CALL_COMBINER_START(deadline_state->call_combiner, &state->closure,
+                             GRPC_ERROR_REF(error),
                              "scheduling deadline timer");
     return;
   }
-  start_timer_if_needed(exec_ctx, state->elem, state->deadline);
+  start_timer_if_needed(state->elem, state->deadline);
   gpr_free(state);
-  GRPC_CALL_COMBINER_STOP(exec_ctx, deadline_state->call_combiner,
+  GRPC_CALL_COMBINER_STOP(deadline_state->call_combiner,
                           "done scheduling deadline timer");
 }
 
-void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+void grpc_deadline_state_init(grpc_call_element* elem,
                               grpc_call_stack* call_stack,
                               grpc_call_combiner* call_combiner,
                               grpc_millis deadline) {
@@ -200,29 +192,27 @@
     state->deadline = deadline;
     GRPC_CLOSURE_INIT(&state->closure, start_timer_after_init, state,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CLOSURE_SCHED(exec_ctx, &state->closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&state->closure, GRPC_ERROR_NONE);
   }
 }
 
-void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_element* elem) {
+void grpc_deadline_state_destroy(grpc_call_element* elem) {
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
-  cancel_timer_if_needed(exec_ctx, deadline_state);
+  cancel_timer_if_needed(deadline_state);
 }
 
-void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+void grpc_deadline_state_reset(grpc_call_element* elem,
                                grpc_millis new_deadline) {
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
-  cancel_timer_if_needed(exec_ctx, deadline_state);
-  start_timer_if_needed(exec_ctx, elem, new_deadline);
+  cancel_timer_if_needed(deadline_state);
+  start_timer_if_needed(elem, new_deadline);
 }
 
 void grpc_deadline_state_client_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   grpc_deadline_state* deadline_state = (grpc_deadline_state*)elem->call_data;
   if (op->cancel_stream) {
-    cancel_timer_if_needed(exec_ctx, deadline_state);
+    cancel_timer_if_needed(deadline_state);
   } else {
     // Make sure we know when the call is complete, so that we can cancel
     // the timer.
@@ -237,16 +227,14 @@
 //
 
 // Constructor for channel_data.  Used for both client and server filters.
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(!args->is_last);
   return GRPC_ERROR_NONE;
 }
 
 // Destructor for channel_data.  Used for both client and server filters.
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 // Call data used for both client and server filter.
 typedef struct base_call_data {
@@ -266,50 +254,45 @@
 } server_call_data;
 
 // Constructor for call_data.  Used for both client and server filters.
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
-  grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
-                           args->call_combiner, args->deadline);
+  grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
+                           args->deadline);
   return GRPC_ERROR_NONE;
 }
 
 // Destructor for call_data.  Used for both client and server filters.
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
-  grpc_deadline_state_destroy(exec_ctx, elem);
+  grpc_deadline_state_destroy(elem);
 }
 
 // Method for starting a call op for client filter.
 static void client_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
-  grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
-                                                             op);
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
+  grpc_deadline_state_client_start_transport_stream_op_batch(elem, op);
   // Chain to next filter.
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 }
 
 // Callback for receiving initial metadata on the server.
-static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   server_call_data* calld = (server_call_data*)elem->call_data;
   // Get deadline from metadata and start the timer if needed.
-  start_timer_if_needed(exec_ctx, elem, calld->recv_initial_metadata->deadline);
+  start_timer_if_needed(elem, calld->recv_initial_metadata->deadline);
   // Invoke the next callback.
   calld->next_recv_initial_metadata_ready->cb(
-      exec_ctx, calld->next_recv_initial_metadata_ready->cb_arg, error);
+      calld->next_recv_initial_metadata_ready->cb_arg, error);
 }
 
 // Method for starting a call op for server filter.
 static void server_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   server_call_data* calld = (server_call_data*)elem->call_data;
   if (op->cancel_stream) {
-    cancel_timer_if_needed(exec_ctx, &calld->base.deadline_state);
+    cancel_timer_if_needed(&calld->base.deadline_state);
   } else {
     // If we're receiving initial metadata, we need to get the deadline
     // from the recv_initial_metadata_ready callback.  So we inject our
@@ -335,7 +318,7 @@
     }
   }
   // Chain to next filter.
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 }
 
 const grpc_channel_filter grpc_client_deadline_filter = {
@@ -372,8 +355,7 @@
       !grpc_channel_args_want_minimal_stack(channel_args));
 }
 
-static bool maybe_add_deadline_filter(grpc_exec_ctx* exec_ctx,
-                                      grpc_channel_stack_builder* builder,
+static bool maybe_add_deadline_filter(grpc_channel_stack_builder* builder,
                                       void* arg) {
   return grpc_deadline_checking_enabled(
              grpc_channel_stack_builder_get_channel_arguments(builder))
@@ -382,7 +364,7 @@
              : true;
 }
 
-extern "C" void grpc_deadline_filter_init(void) {
+void grpc_deadline_filter_init(void) {
   grpc_channel_init_register_stage(
       GRPC_CLIENT_DIRECT_CHANNEL, GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
       maybe_add_deadline_filter, (void*)&grpc_client_deadline_filter);
@@ -391,4 +373,4 @@
       maybe_add_deadline_filter, (void*)&grpc_server_deadline_filter);
 }
 
-extern "C" void grpc_deadline_filter_shutdown(void) {}
+void grpc_deadline_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/deadline/deadline_filter.h b/src/core/ext/filters/deadline/deadline_filter.h
index e665dc5..4de817e 100644
--- a/src/core/ext/filters/deadline/deadline_filter.h
+++ b/src/core/ext/filters/deadline/deadline_filter.h
@@ -20,10 +20,6 @@
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/iomgr/timer.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum grpc_deadline_timer_state {
   GRPC_DEADLINE_STATE_INITIAL,
   GRPC_DEADLINE_STATE_PENDING,
@@ -53,13 +49,12 @@
 //
 
 // assumes elem->call_data is zero'd
-void grpc_deadline_state_init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+void grpc_deadline_state_init(grpc_call_element* elem,
                               grpc_call_stack* call_stack,
                               grpc_call_combiner* call_combiner,
                               grpc_millis deadline);
 
-void grpc_deadline_state_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_element* elem);
+void grpc_deadline_state_destroy(grpc_call_element* elem);
 
 // Cancels the existing timer and starts a new one with new_deadline.
 //
@@ -70,7 +65,7 @@
 // deadline may result in the timer being called twice.
 //
 // Note: Must be called while holding the call combiner.
-void grpc_deadline_state_reset(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+void grpc_deadline_state_reset(grpc_call_element* elem,
                                grpc_millis new_deadline);
 
 // To be called from the client-side filter's start_transport_stream_op_batch()
@@ -82,8 +77,7 @@
 //
 // Note: Must be called while holding the call combiner.
 void grpc_deadline_state_client_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op);
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op);
 
 // Should deadline checking be performed (according to channel args)
 bool grpc_deadline_checking_enabled(const grpc_channel_args* args);
@@ -94,8 +88,4 @@
 extern const grpc_channel_filter grpc_client_deadline_filter;
 extern const grpc_channel_filter grpc_server_deadline_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_DEADLINE_DEADLINE_FILTER_H */
diff --git a/src/core/ext/filters/http/client/http_client_filter.cc b/src/core/ext/filters/http/client/http_client_filter.cc
index a625369..a1fb10f 100644
--- a/src/core/ext/filters/http/client/http_client_filter.cc
+++ b/src/core/ext/filters/http/client/http_client_filter.cc
@@ -68,12 +68,11 @@
   size_t max_payload_size_for_get;
 } channel_data;
 
-static grpc_error* client_filter_incoming_metadata(grpc_exec_ctx* exec_ctx,
-                                                   grpc_call_element* elem,
+static grpc_error* client_filter_incoming_metadata(grpc_call_element* elem,
                                                    grpc_metadata_batch* b) {
   if (b->idx.named.status != nullptr) {
     if (grpc_mdelem_eq(b->idx.named.status->md, GRPC_MDELEM_STATUS_200)) {
-      grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.status);
+      grpc_metadata_batch_remove(b, b->idx.named.status);
     } else {
       char* val = grpc_dump_slice(GRPC_MDVALUE(b->idx.named.status->md),
                                   GPR_DUMP_ASCII);
@@ -98,10 +97,9 @@
         GRPC_MDVALUE(b->idx.named.grpc_message->md));
     if (grpc_slice_is_equivalent(pct_decoded_msg,
                                  GRPC_MDVALUE(b->idx.named.grpc_message->md))) {
-      grpc_slice_unref_internal(exec_ctx, pct_decoded_msg);
+      grpc_slice_unref_internal(pct_decoded_msg);
     } else {
-      grpc_metadata_batch_set_value(exec_ctx, b->idx.named.grpc_message,
-                                    pct_decoded_msg);
+      grpc_metadata_batch_set_value(b->idx.named.grpc_message, pct_decoded_msg);
     }
   }
 
@@ -131,60 +129,53 @@
         gpr_free(val);
       }
     }
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_type);
+    grpc_metadata_batch_remove(b, b->idx.named.content_type);
   }
 
   return GRPC_ERROR_NONE;
 }
 
-static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
-                                        void* user_data, grpc_error* error) {
+static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
   if (error == GRPC_ERROR_NONE) {
-    error = client_filter_incoming_metadata(exec_ctx, elem,
-                                            calld->recv_initial_metadata);
+    error = client_filter_incoming_metadata(elem, calld->recv_initial_metadata);
   } else {
     GRPC_ERROR_REF(error);
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
-                   error);
+  GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready, error);
 }
 
-static void recv_trailing_metadata_on_complete(grpc_exec_ctx* exec_ctx,
-                                               void* user_data,
+static void recv_trailing_metadata_on_complete(void* user_data,
                                                grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
   if (error == GRPC_ERROR_NONE) {
-    error = client_filter_incoming_metadata(exec_ctx, elem,
-                                            calld->recv_trailing_metadata);
+    error =
+        client_filter_incoming_metadata(elem, calld->recv_trailing_metadata);
   } else {
     GRPC_ERROR_REF(error);
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_trailing_metadata_on_complete,
-                   error);
+  GRPC_CLOSURE_RUN(calld->original_recv_trailing_metadata_on_complete, error);
 }
 
-static void send_message_on_complete(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* error) {
+static void send_message_on_complete(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
-  grpc_byte_stream_cache_destroy(exec_ctx, &calld->send_message_cache);
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete,
+  grpc_byte_stream_cache_destroy(&calld->send_message_cache);
+  GRPC_CLOSURE_RUN(calld->original_send_message_on_complete,
                    GRPC_ERROR_REF(error));
 }
 
 // Pulls a slice from the send_message byte stream, updating
 // calld->send_message_bytes_read.
-static grpc_error* pull_slice_from_send_message(grpc_exec_ctx* exec_ctx,
-                                                call_data* calld) {
+static grpc_error* pull_slice_from_send_message(call_data* calld) {
   grpc_slice incoming_slice;
   grpc_error* error = grpc_byte_stream_pull(
-      exec_ctx, &calld->send_message_caching_stream.base, &incoming_slice);
+      &calld->send_message_caching_stream.base, &incoming_slice);
   if (error == GRPC_ERROR_NONE) {
     calld->send_message_bytes_read += GRPC_SLICE_LENGTH(incoming_slice);
-    grpc_slice_unref_internal(exec_ctx, incoming_slice);
+    grpc_slice_unref_internal(incoming_slice);
   }
   return error;
 }
@@ -194,12 +185,10 @@
 // calld->send_message_caching_stream.base.length, then we have completed
 // reading from the byte stream; otherwise, an async read has been dispatched
 // and on_send_message_next_done() will be invoked when it is complete.
-static grpc_error* read_all_available_send_message_data(grpc_exec_ctx* exec_ctx,
-                                                        call_data* calld) {
-  while (grpc_byte_stream_next(exec_ctx,
-                               &calld->send_message_caching_stream.base,
+static grpc_error* read_all_available_send_message_data(call_data* calld) {
+  while (grpc_byte_stream_next(&calld->send_message_caching_stream.base,
                                ~(size_t)0, &calld->on_send_message_next_done)) {
-    grpc_error* error = pull_slice_from_send_message(exec_ctx, calld);
+    grpc_error* error = pull_slice_from_send_message(calld);
     if (error != GRPC_ERROR_NONE) return error;
     if (calld->send_message_bytes_read ==
         calld->send_message_caching_stream.base.length) {
@@ -210,19 +199,18 @@
 }
 
 // Async callback for grpc_byte_stream_next().
-static void on_send_message_next_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_error* error) {
+static void on_send_message_next_done(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   if (error != GRPC_ERROR_NONE) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, calld->send_message_batch, error, calld->call_combiner);
+        calld->send_message_batch, error, calld->call_combiner);
     return;
   }
-  error = pull_slice_from_send_message(exec_ctx, calld);
+  error = pull_slice_from_send_message(calld);
   if (error != GRPC_ERROR_NONE) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, calld->send_message_batch, error, calld->call_combiner);
+        calld->send_message_batch, error, calld->call_combiner);
     return;
   }
   // There may or may not be more to read, but we don't care.  If we got
@@ -230,7 +218,7 @@
   // synchronously, so we were not able to do a cached call.  Instead,
   // we just reset the byte stream and then send down the batch as-is.
   grpc_caching_byte_stream_reset(&calld->send_message_caching_stream);
-  grpc_call_next_op(exec_ctx, elem, calld->send_message_batch);
+  grpc_call_next_op(elem, calld->send_message_batch);
 }
 
 static char* slice_buffer_to_string(grpc_slice_buffer* slice_buffer) {
@@ -248,8 +236,7 @@
 
 // Modifies the path entry in the batch's send_initial_metadata to
 // append the base64-encoded query for a GET request.
-static grpc_error* update_path_for_get(grpc_exec_ctx* exec_ctx,
-                                       grpc_call_element* elem,
+static grpc_error* update_path_for_get(grpc_call_element* elem,
                                        grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   grpc_slice path_slice =
@@ -282,24 +269,22 @@
       grpc_slice_sub_no_ref(path_with_query_slice, 0, strlen(t));
   /* substitute previous path with the new path+query */
   grpc_mdelem mdelem_path_and_query =
-      grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH, path_with_query_slice);
+      grpc_mdelem_from_slices(GRPC_MDSTR_PATH, path_with_query_slice);
   grpc_metadata_batch* b =
       batch->payload->send_initial_metadata.send_initial_metadata;
-  return grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
+  return grpc_metadata_batch_substitute(b, b->idx.named.path,
                                         mdelem_path_and_query);
 }
 
-static void remove_if_present(grpc_exec_ctx* exec_ctx,
-                              grpc_metadata_batch* batch,
+static void remove_if_present(grpc_metadata_batch* batch,
                               grpc_metadata_batch_callouts_index idx) {
   if (batch->idx.array[idx] != nullptr) {
-    grpc_metadata_batch_remove(exec_ctx, batch, batch->idx.array[idx]);
+    grpc_metadata_batch_remove(batch, batch->idx.array[idx]);
   }
 }
 
 static void hc_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* channeld = (channel_data*)elem->channel_data;
   GPR_TIMER_BEGIN("hc_start_transport_stream_op_batch", 0);
@@ -345,17 +330,16 @@
       calld->original_send_message_on_complete = batch->on_complete;
       batch->on_complete = &calld->send_message_on_complete;
       calld->send_message_batch = batch;
-      error = read_all_available_send_message_data(exec_ctx, calld);
+      error = read_all_available_send_message_data(calld);
       if (error != GRPC_ERROR_NONE) goto done;
       // If all the data has been read, then we can use GET.
       if (calld->send_message_bytes_read ==
           calld->send_message_caching_stream.base.length) {
         method = GRPC_MDELEM_METHOD_GET;
-        error = update_path_for_get(exec_ctx, elem, batch);
+        error = update_path_for_get(elem, batch);
         if (error != GRPC_ERROR_NONE) goto done;
         batch->send_message = false;
-        grpc_byte_stream_destroy(exec_ctx,
-                                 &calld->send_message_caching_stream.base);
+        grpc_byte_stream_destroy(&calld->send_message_caching_stream.base);
       } else {
         // Not all data is available.  The batch will be sent down
         // asynchronously in on_send_message_next_done().
@@ -372,41 +356,41 @@
     }
 
     remove_if_present(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         GRPC_BATCH_METHOD);
     remove_if_present(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         GRPC_BATCH_SCHEME);
     remove_if_present(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         GRPC_BATCH_TE);
     remove_if_present(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         GRPC_BATCH_CONTENT_TYPE);
     remove_if_present(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         GRPC_BATCH_USER_AGENT);
 
     /* Send : prefixed headers, which have to be before any application
        layer headers. */
     error = grpc_metadata_batch_add_head(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         &calld->method, method);
     if (error != GRPC_ERROR_NONE) goto done;
     error = grpc_metadata_batch_add_head(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         &calld->scheme, channeld->static_scheme);
     if (error != GRPC_ERROR_NONE) goto done;
     error = grpc_metadata_batch_add_tail(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         &calld->te_trailers, GRPC_MDELEM_TE_TRAILERS);
     if (error != GRPC_ERROR_NONE) goto done;
     error = grpc_metadata_batch_add_tail(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         &calld->content_type, GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC);
     if (error != GRPC_ERROR_NONE) goto done;
     error = grpc_metadata_batch_add_tail(
-        exec_ctx, batch->payload->send_initial_metadata.send_initial_metadata,
+        batch->payload->send_initial_metadata.send_initial_metadata,
         &calld->user_agent, GRPC_MDELEM_REF(channeld->user_agent));
     if (error != GRPC_ERROR_NONE) goto done;
   }
@@ -414,16 +398,15 @@
 done:
   if (error != GRPC_ERROR_NONE) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, calld->send_message_batch, error, calld->call_combiner);
+        calld->send_message_batch, error, calld->call_combiner);
   } else if (!batch_will_be_handled_asynchronously) {
-    grpc_call_next_op(exec_ctx, elem, batch);
+    grpc_call_next_op(elem, batch);
   }
   GPR_TIMER_END("hc_start_transport_stream_op_batch", 0);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   calld->call_combiner = args->call_combiner;
@@ -441,7 +424,7 @@
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
@@ -533,8 +516,7 @@
 }
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   channel_data* chand = (channel_data*)elem->channel_data;
   GPR_ASSERT(!args->is_last);
@@ -543,17 +525,16 @@
   chand->max_payload_size_for_get =
       max_payload_size_from_args(args->channel_args);
   chand->user_agent = grpc_mdelem_from_slices(
-      exec_ctx, GRPC_MDSTR_USER_AGENT,
+      GRPC_MDSTR_USER_AGENT,
       user_agent_from_args(args->channel_args,
                            args->optional_transport->vtable->name));
   return GRPC_ERROR_NONE;
 }
 
 /* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
-  GRPC_MDELEM_UNREF(exec_ctx, chand->user_agent);
+  GRPC_MDELEM_UNREF(chand->user_agent);
 }
 
 const grpc_channel_filter grpc_http_client_filter = {
diff --git a/src/core/ext/filters/http/client/http_client_filter.h b/src/core/ext/filters/http/client/http_client_filter.h
index 9ed8e76..ec8177c 100644
--- a/src/core/ext/filters/http/client/http_client_filter.h
+++ b/src/core/ext/filters/http/client/http_client_filter.h
@@ -20,18 +20,10 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Processes metadata on the client side for HTTP2 transports */
 extern const grpc_channel_filter grpc_http_client_filter;
 
 /* Channel arg to determine maximum size of payload eligable for GET request */
 #define GRPC_ARG_MAX_PAYLOAD_SIZE_FOR_GET "grpc.max_payload_size_for_get"
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_HTTP_CLIENT_HTTP_CLIENT_FILTER_H */
diff --git a/src/core/ext/filters/http/http_filters_plugin.cc b/src/core/ext/filters/http/http_filters_plugin.cc
index ac31ace..deec77c 100644
--- a/src/core/ext/filters/http/http_filters_plugin.cc
+++ b/src/core/ext/filters/http/http_filters_plugin.cc
@@ -40,8 +40,7 @@
   return t != nullptr && strstr(t->vtable->name, "http");
 }
 
-static bool maybe_add_optional_filter(grpc_exec_ctx* exec_ctx,
-                                      grpc_channel_stack_builder* builder,
+static bool maybe_add_optional_filter(grpc_channel_stack_builder* builder,
                                       void* arg) {
   if (!is_building_http_like_transport(builder)) return true;
   optional_filter* filtarg = (optional_filter*)arg;
@@ -55,8 +54,7 @@
                 : true;
 }
 
-static bool maybe_add_required_filter(grpc_exec_ctx* exec_ctx,
-                                      grpc_channel_stack_builder* builder,
+static bool maybe_add_required_filter(grpc_channel_stack_builder* builder,
                                       void* arg) {
   return is_building_http_like_transport(builder)
              ? grpc_channel_stack_builder_prepend_filter(
@@ -64,7 +62,7 @@
              : true;
 }
 
-extern "C" void grpc_http_filters_init(void) {
+void grpc_http_filters_init(void) {
   grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
                                    GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
                                    maybe_add_optional_filter, &compress_filter);
@@ -85,4 +83,4 @@
       maybe_add_required_filter, (void*)&grpc_http_server_filter);
 }
 
-extern "C" void grpc_http_filters_shutdown(void) {}
+void grpc_http_filters_shutdown(void) {}
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.cc b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
index d070b56..9ae13d2 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.cc
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.cc
@@ -100,12 +100,11 @@
 
 /** Filter initial metadata */
 static grpc_error* process_send_initial_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_metadata_batch* initial_metadata,
+    grpc_call_element* elem, grpc_metadata_batch* initial_metadata,
     bool* has_compression_algorithm) GRPC_MUST_USE_RESULT;
 static grpc_error* process_send_initial_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_metadata_batch* initial_metadata, bool* has_compression_algorithm) {
+    grpc_call_element* elem, grpc_metadata_batch* initial_metadata,
+    bool* has_compression_algorithm) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* channeld = (channel_data*)elem->channel_data;
   *has_compression_algorithm = false;
@@ -137,13 +136,13 @@
     }
     *has_compression_algorithm = true;
     grpc_metadata_batch_remove(
-        exec_ctx, initial_metadata,
+        initial_metadata,
         initial_metadata->idx.named.grpc_internal_stream_encoding_request);
     /* Disable message-wise compression */
     calld->compression_algorithm = GRPC_COMPRESS_NONE;
     if (initial_metadata->idx.named.grpc_internal_encoding_request != nullptr) {
       grpc_metadata_batch_remove(
-          exec_ctx, initial_metadata,
+          initial_metadata,
           initial_metadata->idx.named.grpc_internal_encoding_request);
     }
   } else if (initial_metadata->idx.named.grpc_internal_encoding_request !=
@@ -160,7 +159,7 @@
     }
     *has_compression_algorithm = true;
     grpc_metadata_batch_remove(
-        exec_ctx, initial_metadata,
+        initial_metadata,
         initial_metadata->idx.named.grpc_internal_encoding_request);
   } else {
     /* If no algorithm was found in the metadata and we aren't
@@ -181,12 +180,11 @@
   /* hint compression algorithm */
   if (stream_compression_algorithm != GRPC_STREAM_COMPRESS_NONE) {
     error = grpc_metadata_batch_add_tail(
-        exec_ctx, initial_metadata,
-        &calld->stream_compression_algorithm_storage,
+        initial_metadata, &calld->stream_compression_algorithm_storage,
         grpc_stream_compression_encoding_mdelem(stream_compression_algorithm));
   } else if (calld->compression_algorithm != GRPC_COMPRESS_NONE) {
     error = grpc_metadata_batch_add_tail(
-        exec_ctx, initial_metadata, &calld->compression_algorithm_storage,
+        initial_metadata, &calld->compression_algorithm_storage,
         grpc_compression_encoding_mdelem(calld->compression_algorithm));
   }
 
@@ -194,7 +192,7 @@
 
   /* convey supported compression algorithms */
   error = grpc_metadata_batch_add_tail(
-      exec_ctx, initial_metadata, &calld->accept_encoding_storage,
+      initial_metadata, &calld->accept_encoding_storage,
       GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(
           channeld->supported_compression_algorithms));
 
@@ -203,7 +201,7 @@
   /* Do not overwrite accept-encoding header if it already presents. */
   if (!initial_metadata->idx.named.accept_encoding) {
     error = grpc_metadata_batch_add_tail(
-        exec_ctx, initial_metadata, &calld->accept_stream_encoding_storage,
+        initial_metadata, &calld->accept_stream_encoding_storage,
         GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(
             channeld->supported_stream_compression_algorithms));
   }
@@ -211,17 +209,15 @@
   return error;
 }
 
-static void send_message_on_complete(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* error) {
+static void send_message_on_complete(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &calld->slices);
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_send_message_on_complete,
+  grpc_slice_buffer_reset_and_unref_internal(&calld->slices);
+  GRPC_CLOSURE_RUN(calld->original_send_message_on_complete,
                    GRPC_ERROR_REF(error));
 }
 
-static void send_message_batch_continue(grpc_exec_ctx* exec_ctx,
-                                        grpc_call_element* elem) {
+static void send_message_batch_continue(grpc_call_element* elem) {
   call_data* calld = (call_data*)elem->call_data;
   // Note: The call to grpc_call_next_op() results in yielding the
   // call combiner, so we need to clear calld->send_message_batch
@@ -229,19 +225,18 @@
   grpc_transport_stream_op_batch* send_message_batch =
       calld->send_message_batch;
   calld->send_message_batch = nullptr;
-  grpc_call_next_op(exec_ctx, elem, send_message_batch);
+  grpc_call_next_op(elem, send_message_batch);
 }
 
-static void finish_send_message(grpc_exec_ctx* exec_ctx,
-                                grpc_call_element* elem) {
+static void finish_send_message(grpc_call_element* elem) {
   call_data* calld = (call_data*)elem->call_data;
   // Compress the data if appropriate.
   grpc_slice_buffer tmp;
   grpc_slice_buffer_init(&tmp);
   uint32_t send_flags =
       calld->send_message_batch->payload->send_message.send_message->flags;
-  bool did_compress = grpc_msg_compress(exec_ctx, calld->compression_algorithm,
-                                        &calld->slices, &tmp);
+  bool did_compress =
+      grpc_msg_compress(calld->compression_algorithm, &calld->slices, &tmp);
   if (did_compress) {
     if (grpc_compression_trace.enabled()) {
       const char* algo_name;
@@ -268,11 +263,11 @@
               algo_name, calld->slices.length);
     }
   }
-  grpc_slice_buffer_destroy_internal(exec_ctx, &tmp);
+  grpc_slice_buffer_destroy_internal(&tmp);
   // Swap out the original byte stream with our new one and send the
   // batch down.
   grpc_byte_stream_destroy(
-      exec_ctx, calld->send_message_batch->payload->send_message.send_message);
+      calld->send_message_batch->payload->send_message.send_message);
   grpc_slice_buffer_stream_init(&calld->replacement_stream, &calld->slices,
                                 send_flags);
   calld->send_message_batch->payload->send_message.send_message =
@@ -280,27 +275,24 @@
   calld->original_send_message_on_complete =
       calld->send_message_batch->on_complete;
   calld->send_message_batch->on_complete = &calld->send_message_on_complete;
-  send_message_batch_continue(exec_ctx, elem);
+  send_message_batch_continue(elem);
 }
 
-static void fail_send_message_batch_in_call_combiner(grpc_exec_ctx* exec_ctx,
-                                                     void* arg,
+static void fail_send_message_batch_in_call_combiner(void* arg,
                                                      grpc_error* error) {
   call_data* calld = (call_data*)arg;
   if (calld->send_message_batch != nullptr) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, calld->send_message_batch, GRPC_ERROR_REF(error),
-        calld->call_combiner);
+        calld->send_message_batch, GRPC_ERROR_REF(error), calld->call_combiner);
     calld->send_message_batch = nullptr;
   }
 }
 
 // Pulls a slice from the send_message byte stream and adds it to calld->slices.
-static grpc_error* pull_slice_from_send_message(grpc_exec_ctx* exec_ctx,
-                                                call_data* calld) {
+static grpc_error* pull_slice_from_send_message(call_data* calld) {
   grpc_slice incoming_slice;
   grpc_error* error = grpc_byte_stream_pull(
-      exec_ctx, calld->send_message_batch->payload->send_message.send_message,
+      calld->send_message_batch->payload->send_message.send_message,
       &incoming_slice);
   if (error == GRPC_ERROR_NONE) {
     grpc_slice_buffer_add(&calld->slices, incoming_slice);
@@ -312,69 +304,65 @@
 // If all data has been read, invokes finish_send_message().  Otherwise,
 // an async call to grpc_byte_stream_next() has been started, which will
 // eventually result in calling on_send_message_next_done().
-static void continue_reading_send_message(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_element* elem) {
+static void continue_reading_send_message(grpc_call_element* elem) {
   call_data* calld = (call_data*)elem->call_data;
   while (grpc_byte_stream_next(
-      exec_ctx, calld->send_message_batch->payload->send_message.send_message,
-      ~(size_t)0, &calld->on_send_message_next_done)) {
-    grpc_error* error = pull_slice_from_send_message(exec_ctx, calld);
+      calld->send_message_batch->payload->send_message.send_message, ~(size_t)0,
+      &calld->on_send_message_next_done)) {
+    grpc_error* error = pull_slice_from_send_message(calld);
     if (error != GRPC_ERROR_NONE) {
       // Closure callback; does not take ownership of error.
-      fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+      fail_send_message_batch_in_call_combiner(calld, error);
       GRPC_ERROR_UNREF(error);
       return;
     }
     if (calld->slices.length ==
         calld->send_message_batch->payload->send_message.send_message->length) {
-      finish_send_message(exec_ctx, elem);
+      finish_send_message(elem);
       break;
     }
   }
 }
 
 // Async callback for grpc_byte_stream_next().
-static void on_send_message_next_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_error* error) {
+static void on_send_message_next_done(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   if (error != GRPC_ERROR_NONE) {
     // Closure callback; does not take ownership of error.
-    fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+    fail_send_message_batch_in_call_combiner(calld, error);
     return;
   }
-  error = pull_slice_from_send_message(exec_ctx, calld);
+  error = pull_slice_from_send_message(calld);
   if (error != GRPC_ERROR_NONE) {
     // Closure callback; does not take ownership of error.
-    fail_send_message_batch_in_call_combiner(exec_ctx, calld, error);
+    fail_send_message_batch_in_call_combiner(calld, error);
     GRPC_ERROR_UNREF(error);
     return;
   }
   if (calld->slices.length ==
       calld->send_message_batch->payload->send_message.send_message->length) {
-    finish_send_message(exec_ctx, elem);
+    finish_send_message(elem);
   } else {
-    continue_reading_send_message(exec_ctx, elem);
+    continue_reading_send_message(elem);
   }
 }
 
-static void start_send_message_batch(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* unused) {
+static void start_send_message_batch(void* arg, grpc_error* unused) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   if (skip_compression(
           elem,
           calld->send_message_batch->payload->send_message.send_message->flags,
           calld->send_initial_metadata_state == HAS_COMPRESSION_ALGORITHM)) {
-    send_message_batch_continue(exec_ctx, elem);
+    send_message_batch_continue(elem);
   } else {
-    continue_reading_send_message(exec_ctx, elem);
+    continue_reading_send_message(elem);
   }
 }
 
 static void compress_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   GPR_TIMER_BEGIN("compress_start_transport_stream_op_batch", 0);
   // Handle cancel_stream.
@@ -385,21 +373,19 @@
     if (calld->send_message_batch != nullptr) {
       if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
         GRPC_CALL_COMBINER_START(
-            exec_ctx, calld->call_combiner,
+            calld->call_combiner,
             GRPC_CLOSURE_CREATE(fail_send_message_batch_in_call_combiner, calld,
                                 grpc_schedule_on_exec_ctx),
             GRPC_ERROR_REF(calld->cancel_error), "failing send_message op");
       } else {
         grpc_byte_stream_shutdown(
-            exec_ctx,
             calld->send_message_batch->payload->send_message.send_message,
             GRPC_ERROR_REF(calld->cancel_error));
       }
     }
   } else if (calld->cancel_error != GRPC_ERROR_NONE) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, batch, GRPC_ERROR_REF(calld->cancel_error),
-        calld->call_combiner);
+        batch, GRPC_ERROR_REF(calld->cancel_error), calld->call_combiner);
     goto done;
   }
   // Handle send_initial_metadata.
@@ -407,11 +393,10 @@
     GPR_ASSERT(calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN);
     bool has_compression_algorithm;
     grpc_error* error = process_send_initial_metadata(
-        exec_ctx, elem,
-        batch->payload->send_initial_metadata.send_initial_metadata,
+        elem, batch->payload->send_initial_metadata.send_initial_metadata,
         &has_compression_algorithm);
     if (error != GRPC_ERROR_NONE) {
-      grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+      grpc_transport_stream_op_batch_finish_with_failure(batch, error,
                                                          calld->call_combiner);
       goto done;
     }
@@ -425,7 +410,7 @@
     // the call stack) will release the call combiner for each batch it sees.
     if (calld->send_message_batch != nullptr) {
       GRPC_CALL_COMBINER_START(
-          exec_ctx, calld->call_combiner,
+          calld->call_combiner,
           &calld->start_send_message_batch_in_call_combiner, GRPC_ERROR_NONE,
           "starting send_message after send_initial_metadata");
     }
@@ -440,22 +425,21 @@
     // send_initial_metadata.
     if (calld->send_initial_metadata_state == INITIAL_METADATA_UNSEEN) {
       GRPC_CALL_COMBINER_STOP(
-          exec_ctx, calld->call_combiner,
+          calld->call_combiner,
           "send_message batch pending send_initial_metadata");
       goto done;
     }
-    start_send_message_batch(exec_ctx, elem, GRPC_ERROR_NONE);
+    start_send_message_batch(elem, GRPC_ERROR_NONE);
   } else {
     // Pass control down the stack.
-    grpc_call_next_op(exec_ctx, elem, batch);
+    grpc_call_next_op(elem, batch);
   }
 done:
   GPR_TIMER_END("compress_start_transport_stream_op_batch", 0);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   calld->call_combiner = args->call_combiner;
@@ -471,17 +455,16 @@
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = (call_data*)elem->call_data;
-  grpc_slice_buffer_destroy_internal(exec_ctx, &calld->slices);
+  grpc_slice_buffer_destroy_internal(&calld->slices);
   GRPC_ERROR_UNREF(calld->cancel_error);
 }
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   channel_data* channeld = (channel_data*)elem->channel_data;
 
@@ -531,8 +514,7 @@
 }
 
 /* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 const grpc_channel_filter grpc_message_compress_filter = {
     compress_start_transport_stream_op_batch,
diff --git a/src/core/ext/filters/http/message_compress/message_compress_filter.h b/src/core/ext/filters/http/message_compress/message_compress_filter.h
index 79a2815..6220791 100644
--- a/src/core/ext/filters/http/message_compress/message_compress_filter.h
+++ b/src/core/ext/filters/http/message_compress/message_compress_filter.h
@@ -23,10 +23,6 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Compression filter for outgoing data.
  *
  * See <grpc/compression.h> for the available compression settings.
@@ -51,9 +47,5 @@
 
 extern const grpc_channel_filter grpc_message_compress_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_HTTP_MESSAGE_COMPRESS_MESSAGE_COMPRESS_FILTER_H \
         */
diff --git a/src/core/ext/filters/http/server/http_server_filter.cc b/src/core/ext/filters/http/server/http_server_filter.cc
index 4f38979..b872dc9 100644
--- a/src/core/ext/filters/http/server/http_server_filter.cc
+++ b/src/core/ext/filters/http/server/http_server_filter.cc
@@ -66,8 +66,7 @@
   uint8_t unused;
 } channel_data;
 
-static grpc_error* server_filter_outgoing_metadata(grpc_exec_ctx* exec_ctx,
-                                                   grpc_call_element* elem,
+static grpc_error* server_filter_outgoing_metadata(grpc_call_element* elem,
                                                    grpc_metadata_batch* b) {
   if (b->idx.named.grpc_message != nullptr) {
     grpc_slice pct_encoded_msg = grpc_percent_encode_slice(
@@ -75,10 +74,9 @@
         grpc_compatible_percent_encoding_unreserved_bytes);
     if (grpc_slice_is_equivalent(pct_encoded_msg,
                                  GRPC_MDVALUE(b->idx.named.grpc_message->md))) {
-      grpc_slice_unref_internal(exec_ctx, pct_encoded_msg);
+      grpc_slice_unref_internal(pct_encoded_msg);
     } else {
-      grpc_metadata_batch_set_value(exec_ctx, b->idx.named.grpc_message,
-                                    pct_encoded_msg);
+      grpc_metadata_batch_set_value(b->idx.named.grpc_message, pct_encoded_msg);
     }
   }
   return GRPC_ERROR_NONE;
@@ -93,8 +91,7 @@
   *cumulative = grpc_error_add_child(*cumulative, new_err);
 }
 
-static grpc_error* server_filter_incoming_metadata(grpc_exec_ctx* exec_ctx,
-                                                   grpc_call_element* elem,
+static grpc_error* server_filter_incoming_metadata(grpc_call_element* elem,
                                                    grpc_metadata_batch* b) {
   call_data* calld = (call_data*)elem->call_data;
   grpc_error* error = GRPC_ERROR_NONE;
@@ -123,7 +120,7 @@
                     GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"),
                     b->idx.named.method->md));
     }
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.method);
+    grpc_metadata_batch_remove(b, b->idx.named.method);
   } else {
     add_error(
         error_name, &error,
@@ -139,7 +136,7 @@
                     GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"),
                     b->idx.named.te->md));
     }
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.te);
+    grpc_metadata_batch_remove(b, b->idx.named.te);
   } else {
     add_error(error_name, &error,
               grpc_error_set_str(
@@ -156,7 +153,7 @@
                     GRPC_ERROR_CREATE_FROM_STATIC_STRING("Bad header"),
                     b->idx.named.scheme->md));
     }
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.scheme);
+    grpc_metadata_batch_remove(b, b->idx.named.scheme);
   } else {
     add_error(
         error_name, &error,
@@ -191,7 +188,7 @@
         gpr_free(val);
       }
     }
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_type);
+    grpc_metadata_batch_remove(b, b->idx.named.content_type);
   }
 
   if (b->idx.named.path == nullptr) {
@@ -218,22 +215,21 @@
 
       /* substitute path metadata with just the path (not query) */
       grpc_mdelem mdelem_path_without_query = grpc_mdelem_from_slices(
-          exec_ctx, GRPC_MDSTR_PATH, grpc_slice_sub(path_slice, 0, offset));
+          GRPC_MDSTR_PATH, grpc_slice_sub(path_slice, 0, offset));
 
-      grpc_metadata_batch_substitute(exec_ctx, b, b->idx.named.path,
+      grpc_metadata_batch_substitute(b, b->idx.named.path,
                                      mdelem_path_without_query);
 
       /* decode payload from query and add to the slice buffer to be returned */
       const int k_url_safe = 1;
-      grpc_slice_buffer_add(
-          &calld->read_slice_buffer,
-          grpc_base64_decode_with_len(
-              exec_ctx, (const char*)GRPC_SLICE_START_PTR(query_slice),
-              GRPC_SLICE_LENGTH(query_slice), k_url_safe));
+      grpc_slice_buffer_add(&calld->read_slice_buffer,
+                            grpc_base64_decode_with_len(
+                                (const char*)GRPC_SLICE_START_PTR(query_slice),
+                                GRPC_SLICE_LENGTH(query_slice), k_url_safe));
       grpc_slice_buffer_stream_init(&calld->read_stream,
                                     &calld->read_slice_buffer, 0);
       calld->seen_path_with_query = true;
-      grpc_slice_unref_internal(exec_ctx, query_slice);
+      grpc_slice_unref_internal(query_slice);
     } else {
       gpr_log(GPR_ERROR, "GET request without QUERY");
     }
@@ -242,14 +238,14 @@
   if (b->idx.named.host != nullptr && b->idx.named.authority == nullptr) {
     grpc_linked_mdelem* el = b->idx.named.host;
     grpc_mdelem md = GRPC_MDELEM_REF(el->md);
-    grpc_metadata_batch_remove(exec_ctx, b, el);
+    grpc_metadata_batch_remove(b, el);
     add_error(error_name, &error,
               grpc_metadata_batch_add_head(
-                  exec_ctx, b, el,
+                  b, el,
                   grpc_mdelem_from_slices(
-                      exec_ctx, GRPC_MDSTR_AUTHORITY,
+                      GRPC_MDSTR_AUTHORITY,
                       grpc_slice_ref_internal(GRPC_MDVALUE(md)))));
-    GRPC_MDELEM_UNREF(exec_ctx, md);
+    GRPC_MDELEM_UNREF(md);
   }
 
   if (b->idx.named.authority == nullptr) {
@@ -263,21 +259,18 @@
   return error;
 }
 
-static void hs_on_recv(grpc_exec_ctx* exec_ctx, void* user_data,
-                       grpc_error* err) {
+static void hs_on_recv(void* user_data, grpc_error* err) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
   if (err == GRPC_ERROR_NONE) {
-    err = server_filter_incoming_metadata(exec_ctx, elem,
-                                          calld->recv_initial_metadata);
+    err = server_filter_incoming_metadata(elem, calld->recv_initial_metadata);
   } else {
     GRPC_ERROR_REF(err);
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv, err);
+  GRPC_CLOSURE_RUN(calld->on_done_recv, err);
 }
 
-static void hs_on_complete(grpc_exec_ctx* exec_ctx, void* user_data,
-                           grpc_error* err) {
+static void hs_on_complete(void* user_data, grpc_error* err) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
   /* Call recv_message_ready if we got the payload via the path field */
@@ -287,17 +280,16 @@
                                   : (grpc_byte_stream*)&calld->read_stream;
     // Re-enter call combiner for recv_message_ready, since the surface
     // code will release the call combiner for each callback it receives.
-    GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
-                             calld->recv_message_ready, GRPC_ERROR_REF(err),
+    GRPC_CALL_COMBINER_START(calld->call_combiner, calld->recv_message_ready,
+                             GRPC_ERROR_REF(err),
                              "resuming recv_message_ready from on_complete");
     calld->recv_message_ready = nullptr;
     calld->payload_bin_delivered = true;
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->on_complete, GRPC_ERROR_REF(err));
+  GRPC_CLOSURE_RUN(calld->on_complete, GRPC_ERROR_REF(err));
 }
 
-static void hs_recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
-                                  grpc_error* err) {
+static void hs_recv_message_ready(void* user_data, grpc_error* err) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
   if (calld->seen_path_with_query) {
@@ -305,15 +297,14 @@
     // returned in hs_on_complete callback.
     // Note that we release the call combiner here, so that other
     // callbacks can run.
-    GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+    GRPC_CALL_COMBINER_STOP(calld->call_combiner,
                             "pausing recv_message_ready until on_complete");
   } else {
-    GRPC_CLOSURE_RUN(exec_ctx, calld->recv_message_ready, GRPC_ERROR_REF(err));
+    GRPC_CLOSURE_RUN(calld->recv_message_ready, GRPC_ERROR_REF(err));
   }
 }
 
-static grpc_error* hs_mutate_op(grpc_exec_ctx* exec_ctx,
-                                grpc_call_element* elem,
+static grpc_error* hs_mutate_op(grpc_call_element* elem,
                                 grpc_transport_stream_op_batch* op) {
   /* grab pointers to our data from the call element */
   call_data* calld = (call_data*)elem->call_data;
@@ -321,21 +312,19 @@
   if (op->send_initial_metadata) {
     grpc_error* error = GRPC_ERROR_NONE;
     static const char* error_name = "Failed sending initial metadata";
-    add_error(
-        error_name, &error,
-        grpc_metadata_batch_add_head(
-            exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
-            &calld->status, GRPC_MDELEM_STATUS_200));
-    add_error(
-        error_name, &error,
-        grpc_metadata_batch_add_tail(
-            exec_ctx, op->payload->send_initial_metadata.send_initial_metadata,
-            &calld->content_type,
-            GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC));
     add_error(error_name, &error,
-              server_filter_outgoing_metadata(
-                  exec_ctx, elem,
-                  op->payload->send_initial_metadata.send_initial_metadata));
+              grpc_metadata_batch_add_head(
+                  op->payload->send_initial_metadata.send_initial_metadata,
+                  &calld->status, GRPC_MDELEM_STATUS_200));
+    add_error(error_name, &error,
+              grpc_metadata_batch_add_tail(
+                  op->payload->send_initial_metadata.send_initial_metadata,
+                  &calld->content_type,
+                  GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC));
+    add_error(
+        error_name, &error,
+        server_filter_outgoing_metadata(
+            elem, op->payload->send_initial_metadata.send_initial_metadata));
     if (error != GRPC_ERROR_NONE) return error;
   }
 
@@ -367,8 +356,7 @@
 
   if (op->send_trailing_metadata) {
     grpc_error* error = server_filter_outgoing_metadata(
-        exec_ctx, elem,
-        op->payload->send_trailing_metadata.send_trailing_metadata);
+        elem, op->payload->send_trailing_metadata.send_trailing_metadata);
     if (error != GRPC_ERROR_NONE) return error;
   }
 
@@ -376,23 +364,21 @@
 }
 
 static void hs_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   call_data* calld = (call_data*)elem->call_data;
   GPR_TIMER_BEGIN("hs_start_transport_stream_op_batch", 0);
-  grpc_error* error = hs_mutate_op(exec_ctx, elem, op);
+  grpc_error* error = hs_mutate_op(elem, op);
   if (error != GRPC_ERROR_NONE) {
-    grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, op, error,
+    grpc_transport_stream_op_batch_finish_with_failure(op, error,
                                                        calld->call_combiner);
   } else {
-    grpc_call_next_op(exec_ctx, elem, op);
+    grpc_call_next_op(elem, op);
   }
   GPR_TIMER_END("hs_start_transport_stream_op_batch", 0);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   /* grab pointers to our data from the call element */
   call_data* calld = (call_data*)elem->call_data;
@@ -409,24 +395,22 @@
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = (call_data*)elem->call_data;
-  grpc_slice_buffer_destroy_internal(exec_ctx, &calld->read_slice_buffer);
+  grpc_slice_buffer_destroy_internal(&calld->read_slice_buffer);
 }
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(!args->is_last);
   return GRPC_ERROR_NONE;
 }
 
 /* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 const grpc_channel_filter grpc_http_server_filter = {
     hs_start_transport_stream_op_batch,
diff --git a/src/core/ext/filters/http/server/http_server_filter.h b/src/core/ext/filters/http/server/http_server_filter.h
index 4b38cc5..c0f678a 100644
--- a/src/core/ext/filters/http/server/http_server_filter.h
+++ b/src/core/ext/filters/http/server/http_server_filter.h
@@ -21,15 +21,7 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Processes metadata on the client side for HTTP2 transports */
 extern const grpc_channel_filter grpc_http_server_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_HTTP_SERVER_HTTP_SERVER_FILTER_H */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
index 762198f..f50a928 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.cc
@@ -54,8 +54,7 @@
   intptr_t id; /**< an id unique to the channel */
 } channel_data;
 
-static void on_initial_md_ready(grpc_exec_ctx* exec_ctx, void* user_data,
-                                grpc_error* err) {
+static void on_initial_md_ready(void* user_data, grpc_error* err) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
 
@@ -73,20 +72,19 @@
           GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.lb_token->md));
       calld->have_initial_md_string = true;
       grpc_metadata_batch_remove(
-          exec_ctx, calld->recv_initial_metadata,
+          calld->recv_initial_metadata,
           calld->recv_initial_metadata->idx.named.lb_token);
     }
   } else {
     GRPC_ERROR_REF(err);
   }
   calld->ops_recv_initial_metadata_ready->cb(
-      exec_ctx, calld->ops_recv_initial_metadata_ready->cb_arg, err);
+      calld->ops_recv_initial_metadata_ready->cb_arg, err);
   GRPC_ERROR_UNREF(err);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   calld->id = (intptr_t)args->call_stack;
@@ -108,7 +106,7 @@
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = (call_data*)elem->call_data;
@@ -125,19 +123,18 @@
   */
 
   if (calld->have_initial_md_string) {
-    grpc_slice_unref_internal(exec_ctx, calld->initial_md_string);
+    grpc_slice_unref_internal(calld->initial_md_string);
   }
   if (calld->have_trailing_md_string) {
-    grpc_slice_unref_internal(exec_ctx, calld->trailing_md_string);
+    grpc_slice_unref_internal(calld->trailing_md_string);
   }
   if (calld->have_service_method) {
-    grpc_slice_unref_internal(exec_ctx, calld->service_method);
+    grpc_slice_unref_internal(calld->service_method);
   }
 }
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(!args->is_last);
 
@@ -158,8 +155,7 @@
 }
 
 /* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   /* TODO(dgq): do something with the data
   channel_data *chand = elem->channel_data;
   grpc_load_reporting_call_data lr_call_data = {
@@ -173,8 +169,7 @@
   */
 }
 
-static grpc_filtered_mdelem lr_trailing_md_filter(grpc_exec_ctx* exec_ctx,
-                                                  void* user_data,
+static grpc_filtered_mdelem lr_trailing_md_filter(void* user_data,
                                                   grpc_mdelem md) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -186,8 +181,7 @@
 }
 
 static void lr_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   GPR_TIMER_BEGIN("lr_start_transport_stream_op_batch", 0);
   call_data* calld = (call_data*)elem->call_data;
 
@@ -203,12 +197,11 @@
     GRPC_LOG_IF_ERROR(
         "grpc_metadata_batch_filter",
         grpc_metadata_batch_filter(
-            exec_ctx,
             op->payload->send_trailing_metadata.send_trailing_metadata,
             lr_trailing_md_filter, elem,
             "LR trailing metadata filtering error"));
   }
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 
   GPR_TIMER_END("lr_start_transport_stream_op_batch", 0);
 }
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_filter.h b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
index 356f8b8..1baee5e 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_filter.h
@@ -22,15 +22,7 @@
 #include "src/core/ext/filters/load_reporting/server_load_reporting_plugin.h"
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_server_load_reporting_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_FILTER_H \
         */
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
index ab83879..9d1dfcb 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc
@@ -38,7 +38,7 @@
 }
 
 static bool maybe_add_server_load_reporting_filter(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   const grpc_channel_args* args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
   const grpc_channel_filter* filter = (const grpc_channel_filter*)arg;
@@ -61,10 +61,10 @@
 
 /* Plugin registration */
 
-extern "C" void grpc_server_load_reporting_plugin_init(void) {
+void grpc_server_load_reporting_plugin_init(void) {
   grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
                                    maybe_add_server_load_reporting_filter,
                                    (void*)&grpc_server_load_reporting_filter);
 }
 
-extern "C" void grpc_server_load_reporting_plugin_shutdown() {}
+void grpc_server_load_reporting_plugin_shutdown() {}
diff --git a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
index a6448ce..4b694d3 100644
--- a/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
+++ b/src/core/ext/filters/load_reporting/server_load_reporting_plugin.h
@@ -23,10 +23,6 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Identifiers for the invocation point of the users LR callback */
 typedef enum grpc_load_reporting_source {
   GRPC_LR_POINT_UNKNOWN = 0,
@@ -59,9 +55,5 @@
 /** Return a \a grpc_arg enabling load reporting */
 grpc_arg grpc_load_reporting_enable_arg();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_LOAD_REPORTING_SERVER_LOAD_REPORTING_PLUGIN_H \
         */
diff --git a/src/core/ext/filters/max_age/max_age_filter.cc b/src/core/ext/filters/max_age/max_age_filter.cc
index 1387e0f..0499c6e 100644
--- a/src/core/ext/filters/max_age/max_age_filter.cc
+++ b/src/core/ext/filters/max_age/max_age_filter.cc
@@ -88,73 +88,69 @@
 
 /* Increase the nubmer of active calls. Before the increasement, if there are no
    calls, the max_idle_timer should be cancelled. */
-static void increase_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
+static void increase_call_count(channel_data* chand) {
   if (gpr_atm_full_fetch_add(&chand->call_count, 1) == 0) {
-    grpc_timer_cancel(exec_ctx, &chand->max_idle_timer);
+    grpc_timer_cancel(&chand->max_idle_timer);
   }
 }
 
 /* Decrease the nubmer of active calls. After the decrement, if there are no
    calls, the max_idle_timer should be started. */
-static void decrease_call_count(grpc_exec_ctx* exec_ctx, channel_data* chand) {
+static void decrease_call_count(channel_data* chand) {
   if (gpr_atm_full_fetch_add(&chand->call_count, -1) == 1) {
     GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_idle_timer");
-    grpc_timer_init(exec_ctx, &chand->max_idle_timer,
-                    grpc_exec_ctx_now(exec_ctx) + chand->max_connection_idle,
-                    &chand->close_max_idle_channel);
+    grpc_timer_init(
+        &chand->max_idle_timer,
+        grpc_core::ExecCtx::Get()->Now() + chand->max_connection_idle,
+        &chand->close_max_idle_channel);
   }
 }
 
-static void start_max_idle_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
-                                            grpc_error* error) {
+static void start_max_idle_timer_after_init(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   /* Decrease call_count. If there are no active calls at this time,
      max_idle_timer will start here. If the number of active calls is not 0,
      max_idle_timer will start after all the active calls end. */
-  decrease_call_count(exec_ctx, chand);
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
+  decrease_call_count(chand);
+  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack,
                            "max_age start_max_idle_timer_after_init");
 }
 
-static void start_max_age_timer_after_init(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void start_max_age_timer_after_init(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   gpr_mu_lock(&chand->max_age_timer_mu);
   chand->max_age_timer_pending = true;
   GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_timer");
-  grpc_timer_init(exec_ctx, &chand->max_age_timer,
-                  grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age,
+  grpc_timer_init(&chand->max_age_timer,
+                  grpc_core::ExecCtx::Get()->Now() + chand->max_connection_age,
                   &chand->close_max_age_channel);
   gpr_mu_unlock(&chand->max_age_timer_mu);
   grpc_transport_op* op = grpc_make_transport_op(nullptr);
   op->on_connectivity_state_change = &chand->channel_connectivity_changed;
   op->connectivity_state = &chand->connectivity_state;
-  grpc_channel_next_op(exec_ctx,
-                       grpc_channel_stack_element(chand->channel_stack, 0), op);
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
+  grpc_channel_next_op(grpc_channel_stack_element(chand->channel_stack, 0), op);
+  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack,
                            "max_age start_max_age_timer_after_init");
 }
 
-static void start_max_age_grace_timer_after_goaway_op(grpc_exec_ctx* exec_ctx,
-                                                      void* arg,
+static void start_max_age_grace_timer_after_goaway_op(void* arg,
                                                       grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   gpr_mu_lock(&chand->max_age_timer_mu);
   chand->max_age_grace_timer_pending = true;
   GRPC_CHANNEL_STACK_REF(chand->channel_stack, "max_age max_age_grace_timer");
   grpc_timer_init(
-      exec_ctx, &chand->max_age_grace_timer,
+      &chand->max_age_grace_timer,
       chand->max_connection_age_grace == GRPC_MILLIS_INF_FUTURE
           ? GRPC_MILLIS_INF_FUTURE
-          : grpc_exec_ctx_now(exec_ctx) + chand->max_connection_age_grace,
+          : grpc_core::ExecCtx::Get()->Now() + chand->max_connection_age_grace,
       &chand->force_close_max_age_channel);
   gpr_mu_unlock(&chand->max_age_timer_mu);
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
+  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack,
                            "max_age start_max_age_grace_timer_after_goaway_op");
 }
 
-static void close_max_idle_channel(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void close_max_idle_channel(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   if (error == GRPC_ERROR_NONE) {
     /* Prevent the max idle timer from being set again */
@@ -165,16 +161,14 @@
                            GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR);
     grpc_channel_element* elem =
         grpc_channel_stack_element(chand->channel_stack, 0);
-    elem->filter->start_transport_op(exec_ctx, elem, op);
+    elem->filter->start_transport_op(elem, op);
   } else if (error != GRPC_ERROR_CANCELLED) {
     GRPC_LOG_IF_ERROR("close_max_idle_channel", error);
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
-                           "max_age max_idle_timer");
+  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_idle_timer");
 }
 
-static void close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
-                                  grpc_error* error) {
+static void close_max_age_channel(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   gpr_mu_lock(&chand->max_age_timer_mu);
   chand->max_age_timer_pending = false;
@@ -189,16 +183,14 @@
                            GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_NO_ERROR);
     grpc_channel_element* elem =
         grpc_channel_stack_element(chand->channel_stack, 0);
-    elem->filter->start_transport_op(exec_ctx, elem, op);
+    elem->filter->start_transport_op(elem, op);
   } else if (error != GRPC_ERROR_CANCELLED) {
     GRPC_LOG_IF_ERROR("close_max_age_channel", error);
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
-                           "max_age max_age_timer");
+  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_age_timer");
 }
 
-static void force_close_max_age_channel(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void force_close_max_age_channel(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   gpr_mu_lock(&chand->max_age_timer_mu);
   chand->max_age_grace_timer_pending = false;
@@ -209,38 +201,36 @@
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel reaches max age");
     grpc_channel_element* elem =
         grpc_channel_stack_element(chand->channel_stack, 0);
-    elem->filter->start_transport_op(exec_ctx, elem, op);
+    elem->filter->start_transport_op(elem, op);
   } else if (error != GRPC_ERROR_CANCELLED) {
     GRPC_LOG_IF_ERROR("force_close_max_age_channel", error);
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->channel_stack,
-                           "max_age max_age_grace_timer");
+  GRPC_CHANNEL_STACK_UNREF(chand->channel_stack, "max_age max_age_grace_timer");
 }
 
-static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* arg,
-                                         grpc_error* error) {
+static void channel_connectivity_changed(void* arg, grpc_error* error) {
   channel_data* chand = (channel_data*)arg;
   if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
     grpc_transport_op* op = grpc_make_transport_op(nullptr);
     op->on_connectivity_state_change = &chand->channel_connectivity_changed;
     op->connectivity_state = &chand->connectivity_state;
-    grpc_channel_next_op(
-        exec_ctx, grpc_channel_stack_element(chand->channel_stack, 0), op);
+    grpc_channel_next_op(grpc_channel_stack_element(chand->channel_stack, 0),
+                         op);
   } else {
     gpr_mu_lock(&chand->max_age_timer_mu);
     if (chand->max_age_timer_pending) {
-      grpc_timer_cancel(exec_ctx, &chand->max_age_timer);
+      grpc_timer_cancel(&chand->max_age_timer);
       chand->max_age_timer_pending = false;
     }
     if (chand->max_age_grace_timer_pending) {
-      grpc_timer_cancel(exec_ctx, &chand->max_age_grace_timer);
+      grpc_timer_cancel(&chand->max_age_grace_timer);
       chand->max_age_grace_timer_pending = false;
     }
     gpr_mu_unlock(&chand->max_age_timer_mu);
     /* If there are no active calls, this increasement will cancel
        max_idle_timer, and prevent max_idle_timer from being started in the
        future. */
-    increase_call_count(exec_ctx, chand);
+    increase_call_count(chand);
   }
 }
 
@@ -263,25 +253,23 @@
 }
 
 /* Constructor for call_data. */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   channel_data* chand = (channel_data*)elem->channel_data;
-  increase_call_count(exec_ctx, chand);
+  increase_call_count(chand);
   return GRPC_ERROR_NONE;
 }
 
 /* Destructor for call_data. */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   channel_data* chand = (channel_data*)elem->channel_data;
-  decrease_call_count(exec_ctx, chand);
+  decrease_call_count(chand);
 }
 
 /* Constructor for channel_data. */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   channel_data* chand = (channel_data*)elem->channel_data;
   gpr_mu_init(&chand->max_age_timer_mu);
@@ -351,8 +339,7 @@
        initialization is done. */
     GRPC_CHANNEL_STACK_REF(chand->channel_stack,
                            "max_age start_max_age_timer_after_init");
-    GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_age_timer_after_init,
-                       GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&chand->start_max_age_timer_after_init, GRPC_ERROR_NONE);
   }
 
   /* Initialize the number of calls as 1, so that the max_idle_timer will not
@@ -361,15 +348,14 @@
   if (chand->max_connection_idle != GRPC_MILLIS_INF_FUTURE) {
     GRPC_CHANNEL_STACK_REF(chand->channel_stack,
                            "max_age start_max_idle_timer_after_init");
-    GRPC_CLOSURE_SCHED(exec_ctx, &chand->start_max_idle_timer_after_init,
+    GRPC_CLOSURE_SCHED(&chand->start_max_idle_timer_after_init,
                        GRPC_ERROR_NONE);
   }
   return GRPC_ERROR_NONE;
 }
 
 /* Destructor for channel_data. */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 const grpc_channel_filter grpc_max_age_filter = {
     grpc_call_next_op,
@@ -384,8 +370,7 @@
     grpc_channel_next_get_info,
     "max_age"};
 
-static bool maybe_add_max_age_filter(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_stack_builder* builder,
+static bool maybe_add_max_age_filter(grpc_channel_stack_builder* builder,
                                      void* arg) {
   const grpc_channel_args* channel_args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
@@ -404,10 +389,10 @@
   }
 }
 
-extern "C" void grpc_max_age_filter_init(void) {
+void grpc_max_age_filter_init(void) {
   grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL,
                                    GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
                                    maybe_add_max_age_filter, nullptr);
 }
 
-extern "C" void grpc_max_age_filter_shutdown(void) {}
+void grpc_max_age_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/max_age/max_age_filter.h b/src/core/ext/filters/max_age/max_age_filter.h
index eeeefd6..68fb4a4 100644
--- a/src/core/ext/filters/max_age/max_age_filter.h
+++ b/src/core/ext/filters/max_age/max_age_filter.h
@@ -19,14 +19,6 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_max_age_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_MAX_AGE_MAX_AGE_FILTER_H */
diff --git a/src/core/ext/filters/message_size/message_size_filter.cc b/src/core/ext/filters/message_size/message_size_filter.cc
index 2e81d09..f8487f9 100644
--- a/src/core/ext/filters/message_size/message_size_filter.cc
+++ b/src/core/ext/filters/message_size/message_size_filter.cc
@@ -47,8 +47,7 @@
   return value;
 }
 
-static void refcounted_message_size_limits_unref(grpc_exec_ctx* exec_ctx,
-                                                 void* value) {
+static void refcounted_message_size_limits_unref(void* value) {
   refcounted_message_size_limits* limits =
       (refcounted_message_size_limits*)value;
   if (gpr_unref(&limits->refs)) {
@@ -108,8 +107,7 @@
 
 // Callback invoked when we receive a message.  Here we check the max
 // receive message size.
-static void recv_message_ready(grpc_exec_ctx* exec_ctx, void* user_data,
-                               grpc_error* error) {
+static void recv_message_ready(void* user_data, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
   if (*calld->recv_message != nullptr && calld->limits.max_recv_size >= 0 &&
@@ -132,13 +130,12 @@
     GRPC_ERROR_REF(error);
   }
   // Invoke the next callback.
-  GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_message_ready, error);
+  GRPC_CLOSURE_RUN(calld->next_recv_message_ready, error);
 }
 
 // Start transport stream op.
 static void start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   call_data* calld = (call_data*)elem->call_data;
   // Check max send message size.
   if (op->send_message && calld->limits.max_send_size >= 0 &&
@@ -149,7 +146,7 @@
                  op->payload->send_message.send_message->length,
                  calld->limits.max_send_size);
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, op,
+        op,
         grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(message_string),
                            GRPC_ERROR_INT_GRPC_STATUS,
                            GRPC_STATUS_RESOURCE_EXHAUSTED),
@@ -165,12 +162,11 @@
     op->payload->recv_message.recv_message_ready = &calld->recv_message_ready;
   }
   // Chain to the next filter.
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 }
 
 // Constructor for call_data.
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -186,7 +182,7 @@
   if (chand->method_limit_table != nullptr) {
     refcounted_message_size_limits* limits =
         (refcounted_message_size_limits*)grpc_method_config_table_get(
-            exec_ctx, chand->method_limit_table, args->path);
+            chand->method_limit_table, args->path);
     if (limits != nullptr) {
       if (limits->limits.max_send_size >= 0 &&
           (limits->limits.max_send_size < calld->limits.max_send_size ||
@@ -204,7 +200,7 @@
 }
 
 // Destructor for call_data.
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
@@ -241,8 +237,7 @@
 }
 
 // Constructor for channel_data.
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(!args->is_last);
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -257,8 +252,7 @@
     if (service_config != nullptr) {
       chand->method_limit_table =
           grpc_service_config_create_method_config_table(
-              exec_ctx, service_config,
-              refcounted_message_size_limits_create_from_json,
+              service_config, refcounted_message_size_limits_create_from_json,
               refcounted_message_size_limits_ref,
               refcounted_message_size_limits_unref);
       grpc_service_config_destroy(service_config);
@@ -268,10 +262,9 @@
 }
 
 // Destructor for channel_data.
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
-  grpc_slice_hash_table_unref(exec_ctx, chand->method_limit_table);
+  grpc_slice_hash_table_unref(chand->method_limit_table);
 }
 
 const grpc_channel_filter grpc_message_size_filter = {
@@ -287,8 +280,7 @@
     grpc_channel_next_get_info,
     "message_size"};
 
-static bool maybe_add_message_size_filter(grpc_exec_ctx* exec_ctx,
-                                          grpc_channel_stack_builder* builder,
+static bool maybe_add_message_size_filter(grpc_channel_stack_builder* builder,
                                           void* arg) {
   const grpc_channel_args* channel_args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
@@ -310,7 +302,7 @@
   }
 }
 
-extern "C" void grpc_message_size_filter_init(void) {
+void grpc_message_size_filter_init(void) {
   grpc_channel_init_register_stage(GRPC_CLIENT_SUBCHANNEL,
                                    GRPC_CHANNEL_INIT_BUILTIN_PRIORITY,
                                    maybe_add_message_size_filter, nullptr);
@@ -322,4 +314,4 @@
                                    maybe_add_message_size_filter, nullptr);
 }
 
-extern "C" void grpc_message_size_filter_shutdown(void) {}
+void grpc_message_size_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/message_size/message_size_filter.h b/src/core/ext/filters/message_size/message_size_filter.h
index da325d6..d3667f7 100644
--- a/src/core/ext/filters/message_size/message_size_filter.h
+++ b/src/core/ext/filters/message_size/message_size_filter.h
@@ -19,14 +19,6 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_message_size_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_MESSAGE_SIZE_MESSAGE_SIZE_FILTER_H */
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
index 390da52..555a913 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc
@@ -50,8 +50,7 @@
 }
 
 // Callback invoked when we receive an initial metadata.
-static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
-                                        void* user_data, grpc_error* error) {
+static void recv_initial_metadata_ready(void* user_data, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
 
@@ -67,14 +66,13 @@
   }
 
   // Invoke the next callback.
-  GRPC_CLOSURE_RUN(exec_ctx, calld->next_recv_initial_metadata_ready,
+  GRPC_CLOSURE_RUN(calld->next_recv_initial_metadata_ready,
                    GRPC_ERROR_REF(error));
 }
 
 // Start transport stream op.
 static void start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   call_data* calld = (call_data*)elem->call_data;
 
   // Inject callback for receiving initial metadata
@@ -96,12 +94,11 @@
   }
 
   // Chain to the next filter.
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 }
 
 // Constructor for call_data.
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   calld->next_recv_initial_metadata_ready = nullptr;
@@ -113,20 +110,18 @@
 }
 
 // Destructor for call_data.
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
 // Constructor for channel_data.
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
 // Destructor for channel_data.
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 // Parse the user agent
 static bool parse_user_agent(grpc_mdelem md) {
@@ -181,7 +176,7 @@
     "workaround_cronet_compression"};
 
 static bool register_workaround_cronet_compression(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   const grpc_channel_args* channel_args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
   const grpc_arg* a = grpc_channel_args_find(
@@ -196,7 +191,7 @@
       builder, &grpc_workaround_cronet_compression_filter, nullptr, nullptr);
 }
 
-extern "C" void grpc_workaround_cronet_compression_filter_init(void) {
+void grpc_workaround_cronet_compression_filter_init(void) {
   grpc_channel_init_register_stage(
       GRPC_SERVER_CHANNEL, GRPC_WORKAROUND_PRIORITY_HIGH,
       register_workaround_cronet_compression, nullptr);
@@ -204,4 +199,4 @@
                            parse_user_agent);
 }
 
-extern "C" void grpc_workaround_cronet_compression_filter_shutdown(void) {}
+void grpc_workaround_cronet_compression_filter_shutdown(void) {}
diff --git a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h
index c8b07df..9dae4f0 100644
--- a/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h
+++ b/src/core/ext/filters/workarounds/workaround_cronet_compression_filter.h
@@ -19,15 +19,7 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_workaround_cronet_compression_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_CRONET_COMPRESSION_FILTER_H \
         */
diff --git a/src/core/ext/filters/workarounds/workaround_utils.h b/src/core/ext/filters/workarounds/workaround_utils.h
index a954ad4..d6ef5e8 100644
--- a/src/core/ext/filters/workarounds/workaround_utils.h
+++ b/src/core/ext/filters/workarounds/workaround_utils.h
@@ -24,10 +24,6 @@
 #define GRPC_WORKAROUND_PRIORITY_HIGH 10001
 #define GRPC_WORKAROUND_PROIRITY_LOW 9999
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_workaround_user_agent_md {
   bool workaround_active[GRPC_MAX_WORKAROUND_ID];
 } grpc_workaround_user_agent_md;
@@ -38,8 +34,4 @@
 
 void grpc_register_workaround(uint32_t id, user_agent_parser parser);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_FILTERS_WORKAROUNDS_WORKAROUND_UTILS_H */
diff --git a/src/core/ext/transport/chttp2/alpn/alpn.h b/src/core/ext/transport/chttp2/alpn/alpn.h
index 4a420e8..fd7513c 100644
--- a/src/core/ext/transport/chttp2/alpn/alpn.h
+++ b/src/core/ext/transport/chttp2/alpn/alpn.h
@@ -21,10 +21,6 @@
 
 #include <string.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Retuns 1 if the version is supported, 0 otherwise. */
 int grpc_chttp2_is_alpn_version_supported(const char* version, size_t size);
 
@@ -35,8 +31,4 @@
  * grpc_chttp2_num_alpn_versions()) */
 const char* grpc_chttp2_get_alpn_version_index(size_t i);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_ALPN_ALPN_H */
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.cc b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
index 7b2bb7d..db5962e 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.cc
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.cc
@@ -61,38 +61,34 @@
   gpr_ref(&c->refs);
 }
 
-static void chttp2_connector_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_connector* con) {
+static void chttp2_connector_unref(grpc_connector* con) {
   chttp2_connector* c = (chttp2_connector*)con;
   if (gpr_unref(&c->refs)) {
     gpr_mu_destroy(&c->mu);
     // If handshaking is not yet in progress, destroy the endpoint.
     // Otherwise, the handshaker will do this for us.
-    if (c->endpoint != nullptr) grpc_endpoint_destroy(exec_ctx, c->endpoint);
+    if (c->endpoint != nullptr) grpc_endpoint_destroy(c->endpoint);
     gpr_free(c);
   }
 }
 
-static void chttp2_connector_shutdown(grpc_exec_ctx* exec_ctx,
-                                      grpc_connector* con, grpc_error* why) {
+static void chttp2_connector_shutdown(grpc_connector* con, grpc_error* why) {
   chttp2_connector* c = (chttp2_connector*)con;
   gpr_mu_lock(&c->mu);
   c->shutdown = true;
   if (c->handshake_mgr != nullptr) {
-    grpc_handshake_manager_shutdown(exec_ctx, c->handshake_mgr,
-                                    GRPC_ERROR_REF(why));
+    grpc_handshake_manager_shutdown(c->handshake_mgr, GRPC_ERROR_REF(why));
   }
   // If handshaking is not yet in progress, shutdown the endpoint.
   // Otherwise, the handshaker will do this for us.
   if (!c->connecting && c->endpoint != nullptr) {
-    grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(why));
+    grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(why));
   }
   gpr_mu_unlock(&c->mu);
   GRPC_ERROR_UNREF(why);
 }
 
-static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* error) {
+static void on_handshake_done(void* arg, grpc_error* error) {
   grpc_handshaker_args* args = (grpc_handshaker_args*)arg;
   chttp2_connector* c = (chttp2_connector*)args->user_data;
   gpr_mu_lock(&c->mu);
@@ -105,20 +101,20 @@
       // before destroying them, even if we know that there are no
       // pending read/write callbacks.  This should be fixed, at which
       // point this can be removed.
-      grpc_endpoint_shutdown(exec_ctx, args->endpoint, GRPC_ERROR_REF(error));
-      grpc_endpoint_destroy(exec_ctx, args->endpoint);
-      grpc_channel_args_destroy(exec_ctx, args->args);
-      grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer);
+      grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_REF(error));
+      grpc_endpoint_destroy(args->endpoint);
+      grpc_channel_args_destroy(args->args);
+      grpc_slice_buffer_destroy_internal(args->read_buffer);
       gpr_free(args->read_buffer);
     } else {
       error = GRPC_ERROR_REF(error);
     }
     memset(c->result, 0, sizeof(*c->result));
   } else {
-    grpc_endpoint_delete_from_pollset_set(exec_ctx, args->endpoint,
+    grpc_endpoint_delete_from_pollset_set(args->endpoint,
                                           c->args.interested_parties);
-    c->result->transport = grpc_create_chttp2_transport(exec_ctx, args->args,
-                                                        args->endpoint, true);
+    c->result->transport =
+        grpc_create_chttp2_transport(args->args, args->endpoint, true);
     GPR_ASSERT(c->result->transport);
     // TODO(roth): We ideally want to wait until we receive HTTP/2
     // settings from the server before we consider the connection
@@ -144,33 +140,32 @@
     // so until after transparent retries is implemented.  Otherwise, any
     // RPC that we attempt to send on the connection before the timeout
     // would fail instead of being retried on a subsequent attempt.
-    grpc_chttp2_transport_start_reading(exec_ctx, c->result->transport,
-                                        args->read_buffer, nullptr);
+    grpc_chttp2_transport_start_reading(c->result->transport, args->read_buffer,
+                                        nullptr);
     c->result->channel_args = args->args;
   }
   grpc_closure* notify = c->notify;
   c->notify = nullptr;
-  GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
-  grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
+  GRPC_CLOSURE_SCHED(notify, error);
+  grpc_handshake_manager_destroy(c->handshake_mgr);
   c->handshake_mgr = nullptr;
   gpr_mu_unlock(&c->mu);
-  chttp2_connector_unref(exec_ctx, (grpc_connector*)c);
+  chttp2_connector_unref((grpc_connector*)c);
 }
 
-static void start_handshake_locked(grpc_exec_ctx* exec_ctx,
-                                   chttp2_connector* c) {
+static void start_handshake_locked(chttp2_connector* c) {
   c->handshake_mgr = grpc_handshake_manager_create();
-  grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, c->args.channel_args,
+  grpc_handshakers_add(HANDSHAKER_CLIENT, c->args.channel_args,
                        c->handshake_mgr);
-  grpc_endpoint_add_to_pollset_set(exec_ctx, c->endpoint,
-                                   c->args.interested_parties);
+  grpc_endpoint_add_to_pollset_set(c->endpoint, c->args.interested_parties);
   grpc_handshake_manager_do_handshake(
-      exec_ctx, c->handshake_mgr, c->endpoint, c->args.channel_args,
-      c->args.deadline, nullptr /* acceptor */, on_handshake_done, c);
+      c->handshake_mgr, c->args.interested_parties, c->endpoint,
+      c->args.channel_args, c->args.deadline, nullptr /* acceptor */,
+      on_handshake_done, c);
   c->endpoint = nullptr;  // Endpoint handed off to handshake manager.
 }
 
-static void connected(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void connected(void* arg, grpc_error* error) {
   chttp2_connector* c = (chttp2_connector*)arg;
   gpr_mu_lock(&c->mu);
   GPR_ASSERT(c->connecting);
@@ -184,27 +179,26 @@
     memset(c->result, 0, sizeof(*c->result));
     grpc_closure* notify = c->notify;
     c->notify = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, notify, error);
+    GRPC_CLOSURE_SCHED(notify, error);
     if (c->endpoint != nullptr) {
-      grpc_endpoint_shutdown(exec_ctx, c->endpoint, GRPC_ERROR_REF(error));
+      grpc_endpoint_shutdown(c->endpoint, GRPC_ERROR_REF(error));
     }
     gpr_mu_unlock(&c->mu);
-    chttp2_connector_unref(exec_ctx, (grpc_connector*)arg);
+    chttp2_connector_unref((grpc_connector*)arg);
   } else {
     GPR_ASSERT(c->endpoint != nullptr);
-    start_handshake_locked(exec_ctx, c);
+    start_handshake_locked(c);
     gpr_mu_unlock(&c->mu);
   }
 }
 
-static void chttp2_connector_connect(grpc_exec_ctx* exec_ctx,
-                                     grpc_connector* con,
+static void chttp2_connector_connect(grpc_connector* con,
                                      const grpc_connect_in_args* args,
                                      grpc_connect_out_args* result,
                                      grpc_closure* notify) {
   chttp2_connector* c = (chttp2_connector*)con;
   grpc_resolved_address addr;
-  grpc_get_subchannel_address_arg(exec_ctx, args->channel_args, &addr);
+  grpc_get_subchannel_address_arg(args->channel_args, &addr);
   gpr_mu_lock(&c->mu);
   GPR_ASSERT(c->notify == nullptr);
   c->notify = notify;
@@ -215,9 +209,8 @@
   GRPC_CLOSURE_INIT(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
   GPR_ASSERT(!c->connecting);
   c->connecting = true;
-  grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,
-                          args->interested_parties, args->channel_args, &addr,
-                          args->deadline);
+  grpc_tcp_client_connect(&c->connected, &c->endpoint, args->interested_parties,
+                          args->channel_args, &addr, args->deadline);
   gpr_mu_unlock(&c->mu);
 }
 
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.h b/src/core/ext/transport/chttp2/client/chttp2_connector.h
index 63f264e..e258892 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.h
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.h
@@ -19,16 +19,8 @@
 #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H
 #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #include "src/core/ext/filters/client_channel/connector.h"
 
 grpc_connector* grpc_chttp2_connector_create();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_CLIENT_CHTTP2_CONNECTOR_H */
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create.cc b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
index 028b69e..6a1b709 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create.cc
@@ -34,21 +34,19 @@
     grpc_client_channel_factory* cc_factory) {}
 
 static void client_channel_factory_unref(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory) {}
+    grpc_client_channel_factory* cc_factory) {}
 
 static grpc_subchannel* client_channel_factory_create_subchannel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
-    const grpc_subchannel_args* args) {
+    grpc_client_channel_factory* cc_factory, const grpc_subchannel_args* args) {
   grpc_connector* connector = grpc_chttp2_connector_create();
-  grpc_subchannel* s = grpc_subchannel_create(exec_ctx, connector, args);
-  grpc_connector_unref(exec_ctx, connector);
+  grpc_subchannel* s = grpc_subchannel_create(connector, args);
+  grpc_connector_unref(connector);
   return s;
 }
 
 static grpc_channel* client_channel_factory_create_channel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
-    const char* target, grpc_client_channel_type type,
-    const grpc_channel_args* args) {
+    grpc_client_channel_factory* cc_factory, const char* target,
+    grpc_client_channel_type type, const grpc_channel_args* args) {
   if (target == nullptr) {
     gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
     return nullptr;
@@ -56,14 +54,14 @@
   // Add channel arg containing the server URI.
   grpc_arg arg = grpc_channel_arg_string_create(
       (char*)GRPC_ARG_SERVER_URI,
-      grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
+      grpc_resolver_factory_add_default_prefix_if_needed(target));
   const char* to_remove[] = {GRPC_ARG_SERVER_URI};
   grpc_channel_args* new_args =
       grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
   gpr_free(arg.value.string);
-  grpc_channel* channel = grpc_channel_create(exec_ctx, target, new_args,
-                                              GRPC_CLIENT_CHANNEL, nullptr);
-  grpc_channel_args_destroy(exec_ctx, new_args);
+  grpc_channel* channel =
+      grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
+  grpc_channel_args_destroy(new_args);
   return channel;
 }
 
@@ -82,7 +80,7 @@
 grpc_channel* grpc_insecure_channel_create(const char* target,
                                            const grpc_channel_args* args,
                                            void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE(
       "grpc_insecure_channel_create(target=%s, args=%p, reserved=%p)", 3,
       (target, args, reserved));
@@ -93,11 +91,11 @@
   grpc_channel_args* new_args = grpc_channel_args_copy_and_add(args, &arg, 1);
   // Create channel.
   grpc_channel* channel = client_channel_factory_create_channel(
-      &exec_ctx, &client_channel_factory, target,
-      GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
+      &client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR,
+      new_args);
   // Clean up.
-  grpc_channel_args_destroy(&exec_ctx, new_args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_args_destroy(new_args);
+
   return channel != nullptr ? channel
                             : grpc_lame_client_channel_create(
                                   target, GRPC_STATUS_INTERNAL,
diff --git a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
index c6b149d..0cdea5a 100644
--- a/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
+++ b/src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc
@@ -37,7 +37,7 @@
 
 grpc_channel* grpc_insecure_channel_create_from_fd(
     const char* target, int fd, const grpc_channel_args* args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE("grpc_insecure_channel_create(target=%p, fd=%d, args=%p)", 3,
                  (target, fd, args));
 
@@ -50,17 +50,17 @@
   GPR_ASSERT(fcntl(fd, F_SETFL, flags | O_NONBLOCK) == 0);
 
   grpc_endpoint* client = grpc_tcp_client_create_from_fd(
-      &exec_ctx, grpc_fd_create(fd, "client"), args, "fd-client");
+      grpc_fd_create(fd, "client"), args, "fd-client");
 
   grpc_transport* transport =
-      grpc_create_chttp2_transport(&exec_ctx, final_args, client, true);
+      grpc_create_chttp2_transport(final_args, client, true);
   GPR_ASSERT(transport);
   grpc_channel* channel = grpc_channel_create(
-      &exec_ctx, target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
-  grpc_channel_args_destroy(&exec_ctx, final_args);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
+      target, final_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
+  grpc_channel_args_destroy(final_args);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   return channel != nullptr ? channel
                             : grpc_lame_client_channel_create(
@@ -73,7 +73,7 @@
 grpc_channel* grpc_insecure_channel_create_from_fd(
     const char* target, int fd, const grpc_channel_args* args) {
   GPR_ASSERT(0);
-  return NULL;
+  return nullptr;
 }
 
 #endif  // GPR_SUPPORT_CHANNELS_FROM_FD
diff --git a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
index dd2bc42..27c5b96 100644
--- a/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
+++ b/src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc
@@ -41,10 +41,10 @@
     grpc_client_channel_factory* cc_factory) {}
 
 static void client_channel_factory_unref(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory) {}
+    grpc_client_channel_factory* cc_factory) {}
 
 static grpc_subchannel_args* get_secure_naming_subchannel_args(
-    grpc_exec_ctx* exec_ctx, const grpc_subchannel_args* args) {
+    const grpc_subchannel_args* args) {
   grpc_channel_credentials* channel_credentials =
       grpc_channel_credentials_find_in_args(args->args);
   if (channel_credentials == nullptr) {
@@ -68,7 +68,7 @@
   const char* server_uri_str = server_uri_arg->value.string;
   GPR_ASSERT(server_uri_str != nullptr);
   grpc_uri* server_uri =
-      grpc_uri_parse(exec_ctx, server_uri_str, true /* supress errors */);
+      grpc_uri_parse(server_uri_str, true /* supress errors */);
   GPR_ASSERT(server_uri != nullptr);
   const char* server_uri_path;
   server_uri_path =
@@ -81,7 +81,7 @@
     const char* target_uri_str =
         grpc_get_subchannel_address_uri_arg(args->args);
     grpc_uri* target_uri =
-        grpc_uri_parse(exec_ctx, target_uri_str, false /* suppress errors */);
+        grpc_uri_parse(target_uri_str, false /* suppress errors */);
     GPR_ASSERT(target_uri != nullptr);
     if (target_uri->path[0] != '\0') {  // "path" may be empty
       const grpc_slice key = grpc_slice_from_static_string(
@@ -89,7 +89,7 @@
       const char* value =
           (const char*)grpc_slice_hash_table_get(targets_info, key);
       if (value != nullptr) target_name_to_check = gpr_strdup(value);
-      grpc_slice_unref_internal(exec_ctx, key);
+      grpc_slice_unref_internal(key);
     }
     if (target_name_to_check == nullptr) {
       // If the target name to check hasn't already been set, fall back to using
@@ -107,7 +107,7 @@
   grpc_channel_args* new_args_from_connector = nullptr;
   const grpc_security_status security_status =
       grpc_channel_credentials_create_security_connector(
-          exec_ctx, channel_credentials, target_name_to_check, args->args,
+          channel_credentials, target_name_to_check, args->args,
           &subchannel_security_connector, &new_args_from_connector);
   if (security_status != GRPC_SECURITY_OK) {
     gpr_log(GPR_ERROR,
@@ -123,10 +123,10 @@
   grpc_channel_args* new_args = grpc_channel_args_copy_and_add(
       new_args_from_connector != nullptr ? new_args_from_connector : args->args,
       &new_security_connector_arg, 1);
-  GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &subchannel_security_connector->base,
+  GRPC_SECURITY_CONNECTOR_UNREF(&subchannel_security_connector->base,
                                 "lb_channel_create");
   if (new_args_from_connector != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, new_args_from_connector);
+    grpc_channel_args_destroy(new_args_from_connector);
   }
   grpc_subchannel_args* final_sc_args =
       (grpc_subchannel_args*)gpr_malloc(sizeof(*final_sc_args));
@@ -136,10 +136,9 @@
 }
 
 static grpc_subchannel* client_channel_factory_create_subchannel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
-    const grpc_subchannel_args* args) {
+    grpc_client_channel_factory* cc_factory, const grpc_subchannel_args* args) {
   grpc_subchannel_args* subchannel_args =
-      get_secure_naming_subchannel_args(exec_ctx, args);
+      get_secure_naming_subchannel_args(args);
   if (subchannel_args == nullptr) {
     gpr_log(
         GPR_ERROR,
@@ -147,19 +146,16 @@
     return nullptr;
   }
   grpc_connector* connector = grpc_chttp2_connector_create();
-  grpc_subchannel* s =
-      grpc_subchannel_create(exec_ctx, connector, subchannel_args);
-  grpc_connector_unref(exec_ctx, connector);
-  grpc_channel_args_destroy(exec_ctx,
-                            (grpc_channel_args*)subchannel_args->args);
+  grpc_subchannel* s = grpc_subchannel_create(connector, subchannel_args);
+  grpc_connector_unref(connector);
+  grpc_channel_args_destroy((grpc_channel_args*)subchannel_args->args);
   gpr_free(subchannel_args);
   return s;
 }
 
 static grpc_channel* client_channel_factory_create_channel(
-    grpc_exec_ctx* exec_ctx, grpc_client_channel_factory* cc_factory,
-    const char* target, grpc_client_channel_type type,
-    const grpc_channel_args* args) {
+    grpc_client_channel_factory* cc_factory, const char* target,
+    grpc_client_channel_type type, const grpc_channel_args* args) {
   if (target == nullptr) {
     gpr_log(GPR_ERROR, "cannot create channel with NULL target name");
     return nullptr;
@@ -167,14 +163,14 @@
   // Add channel arg containing the server URI.
   grpc_arg arg = grpc_channel_arg_string_create(
       (char*)GRPC_ARG_SERVER_URI,
-      grpc_resolver_factory_add_default_prefix_if_needed(exec_ctx, target));
+      grpc_resolver_factory_add_default_prefix_if_needed(target));
   const char* to_remove[] = {GRPC_ARG_SERVER_URI};
   grpc_channel_args* new_args =
       grpc_channel_args_copy_and_add_and_remove(args, to_remove, 1, &arg, 1);
   gpr_free(arg.value.string);
-  grpc_channel* channel = grpc_channel_create(exec_ctx, target, new_args,
-                                              GRPC_CLIENT_CHANNEL, nullptr);
-  grpc_channel_args_destroy(exec_ctx, new_args);
+  grpc_channel* channel =
+      grpc_channel_create(target, new_args, GRPC_CLIENT_CHANNEL, nullptr);
+  grpc_channel_args_destroy(new_args);
   return channel;
 }
 
@@ -194,7 +190,7 @@
                                          const char* target,
                                          const grpc_channel_args* args,
                                          void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE(
       "grpc_secure_channel_create(creds=%p, target=%s, args=%p, "
       "reserved=%p)",
@@ -211,11 +207,10 @@
         args, args_to_add, GPR_ARRAY_SIZE(args_to_add));
     // Create channel.
     channel = client_channel_factory_create_channel(
-        &exec_ctx, &client_channel_factory, target,
-        GRPC_CLIENT_CHANNEL_TYPE_REGULAR, new_args);
+        &client_channel_factory, target, GRPC_CLIENT_CHANNEL_TYPE_REGULAR,
+        new_args);
     // Clean up.
-    grpc_channel_args_destroy(&exec_ctx, new_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_channel_args_destroy(new_args);
   }
   return channel != nullptr ? channel
                             : grpc_lame_client_channel_create(
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.cc b/src/core/ext/transport/chttp2/server/chttp2_server.cc
index 1f4517a..5669fa4 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.cc
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.cc
@@ -69,17 +69,17 @@
 } server_connection_state;
 
 static void server_connection_state_unref(
-    grpc_exec_ctx* exec_ctx, server_connection_state* connection_state) {
+    server_connection_state* connection_state) {
   if (gpr_unref(&connection_state->refs)) {
     if (connection_state->transport != nullptr) {
-      GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, connection_state->transport,
+      GRPC_CHTTP2_UNREF_TRANSPORT(connection_state->transport,
                                   "receive settings timeout");
     }
     gpr_free(connection_state);
   }
 }
 
-static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_timeout(void* arg, grpc_error* error) {
   server_connection_state* connection_state = (server_connection_state*)arg;
   // Note that we may be called with GRPC_ERROR_NONE when the timer fires
   // or with an error indicating that the timer system is being shut down.
@@ -87,22 +87,20 @@
     grpc_transport_op* op = grpc_make_transport_op(nullptr);
     op->disconnect_with_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
         "Did not receive HTTP/2 settings before handshake timeout");
-    grpc_transport_perform_op(exec_ctx, &connection_state->transport->base, op);
+    grpc_transport_perform_op(&connection_state->transport->base, op);
   }
-  server_connection_state_unref(exec_ctx, connection_state);
+  server_connection_state_unref(connection_state);
 }
 
-static void on_receive_settings(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_error* error) {
+static void on_receive_settings(void* arg, grpc_error* error) {
   server_connection_state* connection_state = (server_connection_state*)arg;
   if (error == GRPC_ERROR_NONE) {
-    grpc_timer_cancel(exec_ctx, &connection_state->timer);
+    grpc_timer_cancel(&connection_state->timer);
   }
-  server_connection_state_unref(exec_ctx, connection_state);
+  server_connection_state_unref(connection_state);
 }
 
-static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* error) {
+static void on_handshake_done(void* arg, grpc_error* error) {
   grpc_handshaker_args* args = (grpc_handshaker_args*)arg;
   server_connection_state* connection_state =
       (server_connection_state*)args->user_data;
@@ -117,10 +115,10 @@
       // before destroying them, even if we know that there are no
       // pending read/write callbacks.  This should be fixed, at which
       // point this can be removed.
-      grpc_endpoint_shutdown(exec_ctx, args->endpoint, GRPC_ERROR_NONE);
-      grpc_endpoint_destroy(exec_ctx, args->endpoint);
-      grpc_channel_args_destroy(exec_ctx, args->args);
-      grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer);
+      grpc_endpoint_shutdown(args->endpoint, GRPC_ERROR_NONE);
+      grpc_endpoint_destroy(args->endpoint);
+      grpc_channel_args_destroy(args->args);
+      grpc_slice_buffer_destroy_internal(args->read_buffer);
       gpr_free(args->read_buffer);
     }
   } else {
@@ -128,10 +126,10 @@
     // handshaker may have handed off the connection to some external
     // code, so we can just clean up here without creating a transport.
     if (args->endpoint != nullptr) {
-      grpc_transport* transport = grpc_create_chttp2_transport(
-          exec_ctx, args->args, args->endpoint, false);
+      grpc_transport* transport =
+          grpc_create_chttp2_transport(args->args, args->endpoint, false);
       grpc_server_setup_transport(
-          exec_ctx, connection_state->svr_state->server, transport,
+          connection_state->svr_state->server, transport,
           connection_state->accepting_pollset, args->args);
       // Use notify_on_receive_settings callback to enforce the
       // handshake deadline.
@@ -141,16 +139,14 @@
                         on_receive_settings, connection_state,
                         grpc_schedule_on_exec_ctx);
       grpc_chttp2_transport_start_reading(
-          exec_ctx, transport, args->read_buffer,
-          &connection_state->on_receive_settings);
-      grpc_channel_args_destroy(exec_ctx, args->args);
+          transport, args->read_buffer, &connection_state->on_receive_settings);
+      grpc_channel_args_destroy(args->args);
       gpr_ref(&connection_state->refs);
       GRPC_CHTTP2_REF_TRANSPORT((grpc_chttp2_transport*)transport,
                                 "receive settings timeout");
       GRPC_CLOSURE_INIT(&connection_state->on_timeout, on_timeout,
                         connection_state, grpc_schedule_on_exec_ctx);
-      grpc_timer_init(exec_ctx, &connection_state->timer,
-                      connection_state->deadline,
+      grpc_timer_init(&connection_state->timer, connection_state->deadline,
                       &connection_state->on_timeout);
     }
   }
@@ -158,21 +154,21 @@
       &connection_state->svr_state->pending_handshake_mgrs,
       connection_state->handshake_mgr);
   gpr_mu_unlock(&connection_state->svr_state->mu);
-  grpc_handshake_manager_destroy(exec_ctx, connection_state->handshake_mgr);
+  grpc_handshake_manager_destroy(connection_state->handshake_mgr);
   gpr_free(connection_state->acceptor);
-  grpc_tcp_server_unref(exec_ctx, connection_state->svr_state->tcp_server);
-  server_connection_state_unref(exec_ctx, connection_state);
+  grpc_tcp_server_unref(connection_state->svr_state->tcp_server);
+  server_connection_state_unref(connection_state);
 }
 
-static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* tcp,
+static void on_accept(void* arg, grpc_endpoint* tcp,
                       grpc_pollset* accepting_pollset,
                       grpc_tcp_server_acceptor* acceptor) {
   server_state* state = (server_state*)arg;
   gpr_mu_lock(&state->mu);
   if (state->shutdown) {
     gpr_mu_unlock(&state->mu);
-    grpc_endpoint_shutdown(exec_ctx, tcp, GRPC_ERROR_NONE);
-    grpc_endpoint_destroy(exec_ctx, tcp);
+    grpc_endpoint_shutdown(tcp, GRPC_ERROR_NONE);
+    grpc_endpoint_destroy(tcp);
     gpr_free(acceptor);
     return;
   }
@@ -188,58 +184,56 @@
   connection_state->accepting_pollset = accepting_pollset;
   connection_state->acceptor = acceptor;
   connection_state->handshake_mgr = handshake_mgr;
-  grpc_handshakers_add(exec_ctx, HANDSHAKER_SERVER, state->args,
+  grpc_handshakers_add(HANDSHAKER_SERVER, state->args,
                        connection_state->handshake_mgr);
   const grpc_arg* timeout_arg =
       grpc_channel_args_find(state->args, GRPC_ARG_SERVER_HANDSHAKE_TIMEOUT_MS);
   connection_state->deadline =
-      grpc_exec_ctx_now(exec_ctx) +
+      grpc_core::ExecCtx::Get()->Now() +
       grpc_channel_arg_get_integer(timeout_arg,
                                    {120 * GPR_MS_PER_SEC, 1, INT_MAX});
-  grpc_handshake_manager_do_handshake(exec_ctx, connection_state->handshake_mgr,
-                                      tcp, state->args,
-                                      connection_state->deadline, acceptor,
-                                      on_handshake_done, connection_state);
+  grpc_handshake_manager_do_handshake(
+      connection_state->handshake_mgr, nullptr /* interested_parties */, tcp,
+      state->args, connection_state->deadline, acceptor, on_handshake_done,
+      connection_state);
 }
 
 /* Server callback: start listening on our ports */
-static void server_start_listener(grpc_exec_ctx* exec_ctx, grpc_server* server,
-                                  void* arg, grpc_pollset** pollsets,
+static void server_start_listener(grpc_server* server, void* arg,
+                                  grpc_pollset** pollsets,
                                   size_t pollset_count) {
   server_state* state = (server_state*)arg;
   gpr_mu_lock(&state->mu);
   state->shutdown = false;
   gpr_mu_unlock(&state->mu);
-  grpc_tcp_server_start(exec_ctx, state->tcp_server, pollsets, pollset_count,
-                        on_accept, state);
+  grpc_tcp_server_start(state->tcp_server, pollsets, pollset_count, on_accept,
+                        state);
 }
 
-static void tcp_server_shutdown_complete(grpc_exec_ctx* exec_ctx, void* arg,
-                                         grpc_error* error) {
+static void tcp_server_shutdown_complete(void* arg, grpc_error* error) {
   server_state* state = (server_state*)arg;
   /* ensure all threads have unlocked */
   gpr_mu_lock(&state->mu);
   grpc_closure* destroy_done = state->server_destroy_listener_done;
   GPR_ASSERT(state->shutdown);
   grpc_handshake_manager_pending_list_shutdown_all(
-      exec_ctx, state->pending_handshake_mgrs, GRPC_ERROR_REF(error));
+      state->pending_handshake_mgrs, GRPC_ERROR_REF(error));
   gpr_mu_unlock(&state->mu);
   // Flush queued work before destroying handshaker factory, since that
   // may do a synchronous unref.
-  grpc_exec_ctx_flush(exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   if (destroy_done != nullptr) {
-    destroy_done->cb(exec_ctx, destroy_done->cb_arg, GRPC_ERROR_REF(error));
-    grpc_exec_ctx_flush(exec_ctx);
+    destroy_done->cb(destroy_done->cb_arg, GRPC_ERROR_REF(error));
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_channel_args_destroy(exec_ctx, state->args);
+  grpc_channel_args_destroy(state->args);
   gpr_mu_destroy(&state->mu);
   gpr_free(state);
 }
 
 /* Server callback: destroy the tcp listener (so we don't generate further
    callbacks) */
-static void server_destroy_listener(grpc_exec_ctx* exec_ctx,
-                                    grpc_server* server, void* arg,
+static void server_destroy_listener(grpc_server* server, void* arg,
                                     grpc_closure* destroy_done) {
   server_state* state = (server_state*)arg;
   gpr_mu_lock(&state->mu);
@@ -247,12 +241,11 @@
   state->server_destroy_listener_done = destroy_done;
   grpc_tcp_server* tcp_server = state->tcp_server;
   gpr_mu_unlock(&state->mu);
-  grpc_tcp_server_shutdown_listeners(exec_ctx, tcp_server);
-  grpc_tcp_server_unref(exec_ctx, tcp_server);
+  grpc_tcp_server_shutdown_listeners(tcp_server);
+  grpc_tcp_server_unref(tcp_server);
 }
 
-grpc_error* grpc_chttp2_server_add_port(grpc_exec_ctx* exec_ctx,
-                                        grpc_server* server, const char* addr,
+grpc_error* grpc_chttp2_server_add_port(grpc_server* server, const char* addr,
                                         grpc_channel_args* args,
                                         int* port_num) {
   grpc_resolved_addresses* resolved = nullptr;
@@ -276,8 +269,8 @@
   GRPC_CLOSURE_INIT(&state->tcp_server_shutdown_complete,
                     tcp_server_shutdown_complete, state,
                     grpc_schedule_on_exec_ctx);
-  err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,
-                               args, &tcp_server);
+  err = grpc_tcp_server_create(&state->tcp_server_shutdown_complete, args,
+                               &tcp_server);
   if (err != GRPC_ERROR_NONE) {
     goto error;
   }
@@ -326,7 +319,7 @@
   grpc_resolved_addresses_destroy(resolved);
 
   /* Register with the server only upon success */
-  grpc_server_add_listener(exec_ctx, server, state, server_start_listener,
+  grpc_server_add_listener(server, state, server_start_listener,
                            server_destroy_listener);
   goto done;
 
@@ -337,9 +330,9 @@
     grpc_resolved_addresses_destroy(resolved);
   }
   if (tcp_server) {
-    grpc_tcp_server_unref(exec_ctx, tcp_server);
+    grpc_tcp_server_unref(tcp_server);
   } else {
-    grpc_channel_args_destroy(exec_ctx, args);
+    grpc_channel_args_destroy(args);
     gpr_free(state);
   }
   *port_num = 0;
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.h b/src/core/ext/transport/chttp2/server/chttp2_server.h
index 4e0e7aa..7de859d 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.h
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.h
@@ -23,18 +23,9 @@
 
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// Adds a port to \a server.  Sets \a port_num to the port number.
 /// Takes ownership of \a args.
-grpc_error* grpc_chttp2_server_add_port(grpc_exec_ctx* exec_ctx,
-                                        grpc_server* server, const char* addr,
+grpc_error* grpc_chttp2_server_add_port(grpc_server* server, const char* addr,
                                         grpc_channel_args* args, int* port_num);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_SERVER_CHTTP2_SERVER_H */
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
index 8984896..52c42d0 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc
@@ -26,12 +26,12 @@
 #include "src/core/lib/surface/server.h"
 
 int grpc_server_add_insecure_http2_port(grpc_server* server, const char* addr) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   int port_num = 0;
   GRPC_API_TRACE("grpc_server_add_insecure_http2_port(server=%p, addr=%s)", 2,
                  (server, addr));
   grpc_error* err = grpc_chttp2_server_add_port(
-      &exec_ctx, server, addr,
+      server, addr,
       grpc_channel_args_copy(grpc_server_get_channel_args(server)), &port_num);
   if (err != GRPC_ERROR_NONE) {
     const char* msg = grpc_error_string(err);
@@ -39,6 +39,6 @@
 
     GRPC_ERROR_UNREF(err);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return port_num;
 }
diff --git a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
index 3fe05ce..dafd4af 100644
--- a/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
+++ b/src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc
@@ -38,32 +38,29 @@
                                               void* reserved, int fd) {
   GPR_ASSERT(reserved == nullptr);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   char* name;
   gpr_asprintf(&name, "fd:%d", fd);
 
-  grpc_endpoint* server_endpoint =
-      grpc_tcp_create(&exec_ctx, grpc_fd_create(fd, name),
-                      grpc_server_get_channel_args(server), name);
+  grpc_endpoint* server_endpoint = grpc_tcp_create(
+      grpc_fd_create(fd, name), grpc_server_get_channel_args(server), name);
 
   gpr_free(name);
 
   const grpc_channel_args* server_args = grpc_server_get_channel_args(server);
   grpc_transport* transport = grpc_create_chttp2_transport(
-      &exec_ctx, server_args, server_endpoint, false /* is_client */);
+      server_args, server_endpoint, false /* is_client */);
 
   grpc_pollset** pollsets;
   size_t num_pollsets = 0;
   grpc_server_get_pollsets(server, &pollsets, &num_pollsets);
 
   for (size_t i = 0; i < num_pollsets; i++) {
-    grpc_endpoint_add_to_pollset(&exec_ctx, server_endpoint, pollsets[i]);
+    grpc_endpoint_add_to_pollset(server_endpoint, pollsets[i]);
   }
 
-  grpc_server_setup_transport(&exec_ctx, server, transport, nullptr,
-                              server_args);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_server_setup_transport(server, transport, nullptr, server_args);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 #else  // !GPR_SUPPORT_CHANNELS_FROM_FD
diff --git a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
index ac3ea40..723af97 100644
--- a/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
+++ b/src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc
@@ -36,7 +36,7 @@
 
 int grpc_server_add_secure_http2_port(grpc_server* server, const char* addr,
                                       grpc_server_credentials* creds) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_error* err = GRPC_ERROR_NONE;
   grpc_server_security_connector* sc = nullptr;
   int port_num = 0;
@@ -52,8 +52,7 @@
         "No credentials specified for secure server port (creds==NULL)");
     goto done;
   }
-  status =
-      grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc);
+  status = grpc_server_credentials_create_security_connector(creds, &sc);
   if (status != GRPC_SECURITY_OK) {
     char* msg;
     gpr_asprintf(&msg,
@@ -72,12 +71,12 @@
       grpc_channel_args_copy_and_add(grpc_server_get_channel_args(server),
                                      args_to_add, GPR_ARRAY_SIZE(args_to_add));
   // Add server port.
-  err = grpc_chttp2_server_add_port(&exec_ctx, server, addr, args, &port_num);
+  err = grpc_chttp2_server_add_port(server, addr, args, &port_num);
 done:
   if (sc != nullptr) {
-    GRPC_SECURITY_CONNECTOR_UNREF(&exec_ctx, &sc->base, "server");
+    GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "server");
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   if (err != GRPC_ERROR_NONE) {
     const char* msg = grpc_error_string(err);
     gpr_log(GPR_ERROR, "%s", msg);
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.cc b/src/core/ext/transport/chttp2/transport/bin_decoder.cc
index 3ccae7a..984cd4c 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.cc
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.cc
@@ -130,8 +130,7 @@
   return true;
 }
 
-grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx* exec_ctx,
-                                     grpc_slice input) {
+grpc_slice grpc_chttp2_base64_decode(grpc_slice input) {
   size_t input_length = GRPC_SLICE_LENGTH(input);
   size_t output_length = input_length / 4 * 3;
   struct grpc_base64_decode_context ctx;
@@ -167,7 +166,7 @@
     char* s = grpc_slice_to_c_string(input);
     gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
     gpr_free(s);
-    grpc_slice_unref_internal(exec_ctx, output);
+    grpc_slice_unref_internal(output);
     return grpc_empty_slice();
   }
   GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output));
@@ -175,8 +174,7 @@
   return output;
 }
 
-grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx* exec_ctx,
-                                                 grpc_slice input,
+grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
                                                  size_t output_length) {
   size_t input_length = GRPC_SLICE_LENGTH(input);
   grpc_slice output = GRPC_SLICE_MALLOC(output_length);
@@ -189,7 +187,7 @@
             "grpc_chttp2_base64_decode_with_length has a length of %d, which "
             "has a tail of 1 byte.\n",
             (int)input_length);
-    grpc_slice_unref_internal(exec_ctx, output);
+    grpc_slice_unref_internal(output);
     return grpc_empty_slice();
   }
 
@@ -199,7 +197,7 @@
             "than the max possible output length %d.\n",
             (int)output_length,
             (int)(input_length / 4 * 3 + tail_xtra[input_length % 4]));
-    grpc_slice_unref_internal(exec_ctx, output);
+    grpc_slice_unref_internal(output);
     return grpc_empty_slice();
   }
 
@@ -213,7 +211,7 @@
     char* s = grpc_slice_to_c_string(input);
     gpr_log(GPR_ERROR, "Base64 decoding failed, input string:\n%s\n", s);
     gpr_free(s);
-    grpc_slice_unref_internal(exec_ctx, output);
+    grpc_slice_unref_internal(output);
     return grpc_empty_slice();
   }
   GPR_ASSERT(ctx.output_cur == GRPC_SLICE_END_PTR(output));
diff --git a/src/core/ext/transport/chttp2/transport/bin_decoder.h b/src/core/ext/transport/chttp2/transport/bin_decoder.h
index a9c4c9a..9cb75cc 100644
--- a/src/core/ext/transport/chttp2/transport/bin_decoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_decoder.h
@@ -22,10 +22,6 @@
 #include <grpc/slice.h>
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 struct grpc_base64_decode_context {
   /* input/output: */
   uint8_t* input_cur;
@@ -44,17 +40,12 @@
 
 /* base64 decode a slice with pad chars. Returns a new slice, does not take
    ownership of the input. Returns an empty slice if decoding is failed. */
-grpc_slice grpc_chttp2_base64_decode(grpc_exec_ctx* exec_ctx, grpc_slice input);
+grpc_slice grpc_chttp2_base64_decode(grpc_slice input);
 
 /* base64 decode a slice without pad chars, data length is needed. Returns a new
    slice, does not take ownership of the input. Returns an empty slice if
    decoding is failed. */
-grpc_slice grpc_chttp2_base64_decode_with_length(grpc_exec_ctx* exec_ctx,
-                                                 grpc_slice input,
+grpc_slice grpc_chttp2_base64_decode_with_length(grpc_slice input,
                                                  size_t output_length);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_DECODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/bin_encoder.h b/src/core/ext/transport/chttp2/transport/bin_encoder.h
index 0be3633..93ad0df 100644
--- a/src/core/ext/transport/chttp2/transport/bin_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/bin_encoder.h
@@ -21,10 +21,6 @@
 
 #include <grpc/slice.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* base64 encode a slice. Returns a new slice, does not take ownership of the
    input */
 grpc_slice grpc_chttp2_base64_encode(grpc_slice input);
@@ -36,12 +32,8 @@
 /* equivalent to:
    grpc_slice x = grpc_chttp2_base64_encode(input);
    grpc_slice y = grpc_chttp2_huffman_compress(x);
-   grpc_slice_unref_internal(exec_ctx, x);
+   grpc_slice_unref_internal( x);
    return y; */
 grpc_slice grpc_chttp2_base64_encode_and_huffman_compress(grpc_slice input);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_BIN_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc b/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
index 2569347..97c1878 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_plugin.cc
@@ -20,6 +20,6 @@
 #include "src/core/lib/debug/trace.h"
 #include "src/core/lib/transport/metadata.h"
 
-extern "C" void grpc_chttp2_plugin_init(void) {}
+void grpc_chttp2_plugin_init(void) {}
 
-extern "C" void grpc_chttp2_plugin_shutdown(void) {}
+void grpc_chttp2_plugin_shutdown(void) {}
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
index 3a2c4b6..7c77de2 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.cc
@@ -79,7 +79,9 @@
     DEFAULT_SERVER_KEEPALIVE_TIME_MS;
 static int g_default_server_keepalive_timeout_ms =
     DEFAULT_SERVER_KEEPALIVE_TIMEOUT_MS;
-static bool g_default_keepalive_permit_without_calls =
+static bool g_default_client_keepalive_permit_without_calls =
+    DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
+static bool g_default_server_keepalive_permit_without_calls =
     DEFAULT_KEEPALIVE_PERMIT_WITHOUT_CALLS;
 
 static int g_default_min_sent_ping_interval_without_data_ms =
@@ -95,105 +97,77 @@
                                                          "chttp2_refcount");
 
 /* forward declarations of various callbacks that we'll build closures around */
-static void write_action_begin_locked(grpc_exec_ctx* exec_ctx, void* t,
-                                      grpc_error* error);
-static void write_action(grpc_exec_ctx* exec_ctx, void* t, grpc_error* error);
-static void write_action_end_locked(grpc_exec_ctx* exec_ctx, void* t,
-                                    grpc_error* error);
+static void write_action_begin_locked(void* t, grpc_error* error);
+static void write_action(void* t, grpc_error* error);
+static void write_action_end_locked(void* t, grpc_error* error);
 
-static void read_action_locked(grpc_exec_ctx* exec_ctx, void* t,
-                               grpc_error* error);
+static void read_action_locked(void* t, grpc_error* error);
 
-static void complete_fetch_locked(grpc_exec_ctx* exec_ctx, void* gs,
-                                  grpc_error* error);
+static void complete_fetch_locked(void* gs, grpc_error* error);
 /** Set a transport level setting, and push it to our peer */
-static void queue_setting_update(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t,
+static void queue_setting_update(grpc_chttp2_transport* t,
                                  grpc_chttp2_setting_id id, uint32_t value);
 
-static void close_from_api(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                           grpc_chttp2_stream* s, grpc_error* error);
+static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+                           grpc_error* error);
 
 /** Start new streams that have been created if we can */
-static void maybe_start_some_streams(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t);
+static void maybe_start_some_streams(grpc_chttp2_transport* t);
 
-static void connectivity_state_set(grpc_exec_ctx* exec_ctx,
-                                   grpc_chttp2_transport* t,
+static void connectivity_state_set(grpc_chttp2_transport* t,
                                    grpc_connectivity_state state,
                                    grpc_error* error, const char* reason);
 
-static void incoming_byte_stream_destroy_locked(grpc_exec_ctx* exec_ctx,
-                                                void* byte_stream,
+static void incoming_byte_stream_destroy_locked(void* byte_stream,
                                                 grpc_error* error_ignored);
 static void incoming_byte_stream_publish_error(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_error* error);
-static void incoming_byte_stream_unref(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_incoming_byte_stream* bs);
+    grpc_chttp2_incoming_byte_stream* bs, grpc_error* error);
+static void incoming_byte_stream_unref(grpc_chttp2_incoming_byte_stream* bs);
 
-static void benign_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* t,
-                                    grpc_error* error);
-static void destructive_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* t,
-                                         grpc_error* error);
+static void benign_reclaimer_locked(void* t, grpc_error* error);
+static void destructive_reclaimer_locked(void* t, grpc_error* error);
 
-static void post_benign_reclaimer(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_transport* t);
-static void post_destructive_reclaimer(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_transport* t);
+static void post_benign_reclaimer(grpc_chttp2_transport* t);
+static void post_destructive_reclaimer(grpc_chttp2_transport* t);
 
-static void close_transport_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_chttp2_transport* t, grpc_error* error);
-static void end_all_the_calls(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                              grpc_error* error);
+static void close_transport_locked(grpc_chttp2_transport* t, grpc_error* error);
+static void end_all_the_calls(grpc_chttp2_transport* t, grpc_error* error);
 
-static void schedule_bdp_ping_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t);
-static void start_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                  grpc_error* error);
-static void finish_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                   grpc_error* error);
-static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx* exec_ctx,
-                                               void* tp, grpc_error* error);
+static void schedule_bdp_ping_locked(grpc_chttp2_transport* t);
+static void start_bdp_ping_locked(void* tp, grpc_error* error);
+static void finish_bdp_ping_locked(void* tp, grpc_error* error);
+static void next_bdp_ping_timer_expired_locked(void* tp, grpc_error* error);
 
-static void cancel_pings(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                         grpc_error* error);
-static void send_ping_locked(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+static void cancel_pings(grpc_chttp2_transport* t, grpc_error* error);
+static void send_ping_locked(grpc_chttp2_transport* t,
                              grpc_closure* on_initiate,
                              grpc_closure* on_complete);
-static void retry_initiate_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                       grpc_error* error);
+static void retry_initiate_ping_locked(void* tp, grpc_error* error);
 
 /** keepalive-relevant functions */
-static void init_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                       grpc_error* error);
-static void start_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error);
-static void finish_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                         grpc_error* error);
-static void keepalive_watchdog_fired_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                            grpc_error* error);
+static void init_keepalive_ping_locked(void* arg, grpc_error* error);
+static void start_keepalive_ping_locked(void* arg, grpc_error* error);
+static void finish_keepalive_ping_locked(void* arg, grpc_error* error);
+static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error);
 
-static void reset_byte_stream(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* error);
+static void reset_byte_stream(void* arg, grpc_error* error);
 
 /*******************************************************************************
  * CONSTRUCTION/DESTRUCTION/REFCOUNTING
  */
 
-static void destruct_transport(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_transport* t) {
+static void destruct_transport(grpc_chttp2_transport* t) {
   size_t i;
 
-  grpc_endpoint_destroy(exec_ctx, t->ep);
+  grpc_endpoint_destroy(t->ep);
 
-  grpc_slice_buffer_destroy_internal(exec_ctx, &t->qbuf);
+  grpc_slice_buffer_destroy_internal(&t->qbuf);
 
-  grpc_slice_buffer_destroy_internal(exec_ctx, &t->outbuf);
-  grpc_chttp2_hpack_compressor_destroy(exec_ctx, &t->hpack_compressor);
+  grpc_slice_buffer_destroy_internal(&t->outbuf);
+  grpc_chttp2_hpack_compressor_destroy(&t->hpack_compressor);
 
-  grpc_slice_buffer_destroy_internal(exec_ctx, &t->read_buffer);
-  grpc_chttp2_hpack_parser_destroy(exec_ctx, &t->hpack_parser);
+  grpc_slice_buffer_destroy_internal(&t->read_buffer);
+  grpc_chttp2_hpack_parser_destroy(&t->hpack_parser);
   grpc_chttp2_goaway_parser_destroy(&t->goaway_parser);
 
   for (i = 0; i < STREAM_LIST_COUNT; i++) {
@@ -206,12 +180,11 @@
   GPR_ASSERT(grpc_chttp2_stream_map_size(&t->stream_map) == 0);
 
   grpc_chttp2_stream_map_destroy(&t->stream_map);
-  grpc_connectivity_state_destroy(exec_ctx, &t->channel_callback.state_tracker);
+  grpc_connectivity_state_destroy(&t->channel_callback.state_tracker);
 
-  GRPC_COMBINER_UNREF(exec_ctx, t->combiner, "chttp2_transport");
+  GRPC_COMBINER_UNREF(t->combiner, "chttp2_transport");
 
-  cancel_pings(exec_ctx, t,
-               GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
+  cancel_pings(t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"));
 
   while (t->write_cb_pool) {
     grpc_chttp2_write_cb* next = t->write_cb_pool->next;
@@ -228,8 +201,7 @@
 }
 
 #ifndef NDEBUG
-void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t, const char* reason,
+void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason,
                                  const char* file, int line) {
   if (grpc_trace_chttp2_refcount.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&t->refs.count);
@@ -237,7 +209,7 @@
             t, val, val - 1, reason, file, line);
   }
   if (!gpr_unref(&t->refs)) return;
-  destruct_transport(exec_ctx, t);
+  destruct_transport(t);
 }
 
 void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
@@ -250,10 +222,9 @@
   gpr_ref(&t->refs);
 }
 #else
-void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t) {
+void grpc_chttp2_unref_transport(grpc_chttp2_transport* t) {
   if (!gpr_unref(&t->refs)) return;
-  destruct_transport(exec_ctx, t);
+  destruct_transport(t);
 }
 
 void grpc_chttp2_ref_transport(grpc_chttp2_transport* t) { gpr_ref(&t->refs); }
@@ -261,7 +232,7 @@
 
 static const grpc_transport_vtable* get_vtable(void);
 
-static void init_transport(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+static void init_transport(grpc_chttp2_transport* t,
                            const grpc_channel_args* channel_args,
                            grpc_endpoint* ep, bool is_client) {
   size_t i;
@@ -320,7 +291,7 @@
 
   t->goaway_error = GRPC_ERROR_NONE;
   grpc_chttp2_goaway_parser_init(&t->goaway_parser);
-  grpc_chttp2_hpack_parser_init(exec_ctx, &t->hpack_parser);
+  grpc_chttp2_hpack_parser_init(&t->hpack_parser);
 
   grpc_slice_buffer_init(&t->read_buffer);
 
@@ -351,14 +322,13 @@
 
   /* configure http2 the way we like it */
   if (is_client) {
-    queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
-    queue_setting_update(exec_ctx, t,
-                         GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
+    queue_setting_update(t, GRPC_CHTTP2_SETTINGS_ENABLE_PUSH, 0);
+    queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS, 0);
   }
-  queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
+  queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE,
                        DEFAULT_MAX_HEADER_LIST_SIZE);
-  queue_setting_update(exec_ctx, t,
-                       GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA, 1);
+  queue_setting_update(t, GRPC_CHTTP2_SETTINGS_GRPC_ALLOW_TRUE_BINARY_METADATA,
+                       1);
 
   t->ping_policy.max_pings_without_data = g_default_max_pings_without_data;
   t->ping_policy.min_sent_ping_interval_without_data =
@@ -375,6 +345,8 @@
     t->keepalive_timeout = g_default_client_keepalive_timeout_ms == INT_MAX
                                ? GRPC_MILLIS_INF_FUTURE
                                : g_default_client_keepalive_timeout_ms;
+    t->keepalive_permit_without_calls =
+        g_default_client_keepalive_permit_without_calls;
   } else {
     t->keepalive_time = g_default_server_keepalive_time_ms == INT_MAX
                             ? GRPC_MILLIS_INF_FUTURE
@@ -382,8 +354,9 @@
     t->keepalive_timeout = g_default_server_keepalive_timeout_ms == INT_MAX
                                ? GRPC_MILLIS_INF_FUTURE
                                : g_default_server_keepalive_timeout_ms;
+    t->keepalive_permit_without_calls =
+        g_default_server_keepalive_permit_without_calls;
   }
-  t->keepalive_permit_without_calls = g_default_keepalive_permit_without_calls;
 
   t->opt_target = GRPC_CHTTP2_OPTIMIZE_FOR_LATENCY;
 
@@ -533,7 +506,7 @@
               int value = grpc_channel_arg_get_integer(
                   &channel_args->args[i], settings_map[j].integer_options);
               if (value >= 0) {
-                queue_setting_update(exec_ctx, t, settings_map[j].setting_id,
+                queue_setting_update(t, settings_map[j].setting_id,
                                      (uint32_t)value);
               }
             }
@@ -544,11 +517,12 @@
     }
   }
 
-  t->flow_control.Init(exec_ctx, t, enable_bdp);
+  t->flow_control.Init(t, enable_bdp);
 
   /* No pings allowed before receiving a header or data frame. */
   t->ping_state.pings_before_data_required = 0;
   t->ping_state.is_delayed_ping_timer_set = false;
+  t->ping_state.last_ping_sent_time = GRPC_MILLIS_INF_PAST;
 
   t->ping_recv_state.last_ping_recv_time = GRPC_MILLIS_INF_PAST;
   t->ping_recv_state.ping_strikes = 0;
@@ -557,8 +531,8 @@
   if (t->keepalive_time != GRPC_MILLIS_INF_FUTURE) {
     t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
     GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
-    grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
-                    grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+    grpc_timer_init(&t->keepalive_ping_timer,
+                    grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
                     &t->init_keepalive_ping_locked);
   } else {
     /* Use GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED to indicate there are no
@@ -568,42 +542,37 @@
 
   if (enable_bdp) {
     GRPC_CHTTP2_REF_TRANSPORT(t, "bdp_ping");
-    schedule_bdp_ping_locked(exec_ctx, t);
+    schedule_bdp_ping_locked(t);
 
-    grpc_chttp2_act_on_flowctl_action(
-        exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, nullptr);
+    grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
+                                      nullptr);
   }
 
-  grpc_chttp2_initiate_write(exec_ctx, t,
-                             GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
-  post_benign_reclaimer(exec_ctx, t);
+  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE);
+  post_benign_reclaimer(t);
 }
 
-static void destroy_transport_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                     grpc_error* error) {
+static void destroy_transport_locked(void* tp, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   t->destroying = 1;
   close_transport_locked(
-      exec_ctx, t,
-      grpc_error_set_int(
-          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
-          GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destroy");
+      t, grpc_error_set_int(
+             GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport destroyed"),
+             GRPC_ERROR_INT_OCCURRED_DURING_WRITE, t->write_state));
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "destroy");
 }
 
-static void destroy_transport(grpc_exec_ctx* exec_ctx, grpc_transport* gt) {
+static void destroy_transport(grpc_transport* gt) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
-  GRPC_CLOSURE_SCHED(exec_ctx,
-                     GRPC_CLOSURE_CREATE(destroy_transport_locked, t,
+  GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(destroy_transport_locked, t,
                                          grpc_combiner_scheduler(t->combiner)),
                      GRPC_ERROR_NONE);
 }
 
-static void close_transport_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_chttp2_transport* t,
+static void close_transport_locked(grpc_chttp2_transport* t,
                                    grpc_error* error) {
-  end_all_the_calls(exec_ctx, t, GRPC_ERROR_REF(error));
-  cancel_pings(exec_ctx, t, GRPC_ERROR_REF(error));
+  end_all_the_calls(t, GRPC_ERROR_REF(error));
+  cancel_pings(t, GRPC_ERROR_REF(error));
   if (t->closed_with_error == GRPC_ERROR_NONE) {
     if (!grpc_error_has_clear_grpc_status(error)) {
       error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
@@ -621,21 +590,21 @@
     }
     GPR_ASSERT(error != GRPC_ERROR_NONE);
     t->closed_with_error = GRPC_ERROR_REF(error);
-    connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_SHUTDOWN,
-                           GRPC_ERROR_REF(error), "close_transport");
+    connectivity_state_set(t, GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
+                           "close_transport");
     if (t->ping_state.is_delayed_ping_timer_set) {
-      grpc_timer_cancel(exec_ctx, &t->ping_state.delayed_ping_timer);
+      grpc_timer_cancel(&t->ping_state.delayed_ping_timer);
     }
     if (t->have_next_bdp_ping_timer) {
-      grpc_timer_cancel(exec_ctx, &t->next_bdp_ping_timer);
+      grpc_timer_cancel(&t->next_bdp_ping_timer);
     }
     switch (t->keepalive_state) {
       case GRPC_CHTTP2_KEEPALIVE_STATE_WAITING:
-        grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
+        grpc_timer_cancel(&t->keepalive_ping_timer);
         break;
       case GRPC_CHTTP2_KEEPALIVE_STATE_PINGING:
-        grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
-        grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
+        grpc_timer_cancel(&t->keepalive_ping_timer);
+        grpc_timer_cancel(&t->keepalive_watchdog_timer);
         break;
       case GRPC_CHTTP2_KEEPALIVE_STATE_DYING:
       case GRPC_CHTTP2_KEEPALIVE_STATE_DISABLED:
@@ -646,14 +615,13 @@
     /* flush writable stream list to avoid dangling references */
     grpc_chttp2_stream* s;
     while (grpc_chttp2_list_pop_writable_stream(t, &s)) {
-      GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:close");
+      GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:close");
     }
     GPR_ASSERT(t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE);
-    grpc_endpoint_shutdown(exec_ctx, t->ep, GRPC_ERROR_REF(error));
+    grpc_endpoint_shutdown(t->ep, GRPC_ERROR_REF(error));
   }
   if (t->notify_on_receive_settings != nullptr) {
-    GRPC_CLOSURE_SCHED(exec_ctx, t->notify_on_receive_settings,
-                       GRPC_ERROR_CANCELLED);
+    GRPC_CLOSURE_SCHED(t->notify_on_receive_settings, GRPC_ERROR_CANCELLED);
     t->notify_on_receive_settings = nullptr;
   }
   GRPC_ERROR_UNREF(error);
@@ -663,22 +631,21 @@
 void grpc_chttp2_stream_ref(grpc_chttp2_stream* s, const char* reason) {
   grpc_stream_ref(s->refcount, reason);
 }
-void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s,
-                              const char* reason) {
-  grpc_stream_unref(exec_ctx, s->refcount, reason);
+void grpc_chttp2_stream_unref(grpc_chttp2_stream* s, const char* reason) {
+  grpc_stream_unref(s->refcount, reason);
 }
 #else
 void grpc_chttp2_stream_ref(grpc_chttp2_stream* s) {
   grpc_stream_ref(s->refcount);
 }
-void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s) {
-  grpc_stream_unref(exec_ctx, s->refcount);
+void grpc_chttp2_stream_unref(grpc_chttp2_stream* s) {
+  grpc_stream_unref(s->refcount);
 }
 #endif
 
-static int init_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                       grpc_stream* gs, grpc_stream_refcount* refcount,
-                       const void* server_data, gpr_arena* arena) {
+static int init_stream(grpc_transport* gt, grpc_stream* gs,
+                       grpc_stream_refcount* refcount, const void* server_data,
+                       gpr_arena* arena) {
   GPR_TIMER_BEGIN("init_stream", 0);
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
   grpc_chttp2_stream* s = (grpc_chttp2_stream*)gs;
@@ -712,7 +679,7 @@
     s->id = (uint32_t)(uintptr_t)server_data;
     *t->accepting_stream = s;
     grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
-    post_destructive_reclaimer(exec_ctx, t);
+    post_destructive_reclaimer(t);
   }
 
   s->flow_control.Init(t->flow_control.get(), s);
@@ -721,8 +688,7 @@
   return 0;
 }
 
-static void destroy_stream_locked(grpc_exec_ctx* exec_ctx, void* sp,
-                                  grpc_error* error) {
+static void destroy_stream_locked(void* sp, grpc_error* error) {
   grpc_chttp2_stream* s = (grpc_chttp2_stream*)sp;
   grpc_chttp2_transport* t = s->t;
 
@@ -733,11 +699,10 @@
     GPR_ASSERT(grpc_chttp2_stream_map_find(&t->stream_map, s->id) == nullptr);
   }
 
-  grpc_slice_buffer_destroy_internal(exec_ctx,
-                                     &s->unprocessed_incoming_frames_buffer);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &s->frame_storage);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &s->compressed_data_buffer);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &s->decompressed_data_buffer);
+  grpc_slice_buffer_destroy_internal(&s->unprocessed_incoming_frames_buffer);
+  grpc_slice_buffer_destroy_internal(&s->frame_storage);
+  grpc_slice_buffer_destroy_internal(&s->compressed_data_buffer);
+  grpc_slice_buffer_destroy_internal(&s->decompressed_data_buffer);
 
   grpc_chttp2_list_remove_stalled_by_transport(t, s);
   grpc_chttp2_list_remove_stalled_by_stream(t, s);
@@ -756,27 +721,24 @@
   GPR_ASSERT(s->recv_initial_metadata_ready == nullptr);
   GPR_ASSERT(s->recv_message_ready == nullptr);
   GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr);
-  grpc_chttp2_data_parser_destroy(exec_ctx, &s->data_parser);
-  grpc_chttp2_incoming_metadata_buffer_destroy(exec_ctx,
-                                               &s->metadata_buffer[0]);
-  grpc_chttp2_incoming_metadata_buffer_destroy(exec_ctx,
-                                               &s->metadata_buffer[1]);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &s->flow_controlled_buffer);
+  grpc_chttp2_data_parser_destroy(&s->data_parser);
+  grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[0]);
+  grpc_chttp2_incoming_metadata_buffer_destroy(&s->metadata_buffer[1]);
+  grpc_slice_buffer_destroy_internal(&s->flow_controlled_buffer);
   GRPC_ERROR_UNREF(s->read_closed_error);
   GRPC_ERROR_UNREF(s->write_closed_error);
   GRPC_ERROR_UNREF(s->byte_stream_error);
 
   s->flow_control.Destroy();
 
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "stream");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "stream");
 
   GPR_TIMER_END("destroy_stream", 0);
 
-  GRPC_CLOSURE_SCHED(exec_ctx, s->destroy_stream_arg, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(s->destroy_stream_arg, GRPC_ERROR_NONE);
 }
 
-static void destroy_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                           grpc_stream* gs,
+static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
                            grpc_closure* then_schedule_closure) {
   GPR_TIMER_BEGIN("destroy_stream", 0);
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
@@ -793,7 +755,6 @@
 
   s->destroy_stream_arg = then_schedule_closure;
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&s->destroy_stream, destroy_stream_locked, s,
                         grpc_combiner_scheduler(t->combiner)),
       GRPC_ERROR_NONE);
@@ -805,8 +766,7 @@
   return (grpc_chttp2_stream*)grpc_chttp2_stream_map_find(&t->stream_map, id);
 }
 
-grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_exec_ctx* exec_ctx,
-                                                      grpc_chttp2_transport* t,
+grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
                                                       uint32_t id) {
   if (t->channel_callback.accept_stream == nullptr) {
     return nullptr;
@@ -814,8 +774,7 @@
   grpc_chttp2_stream* accepting;
   GPR_ASSERT(t->accepting_stream == nullptr);
   t->accepting_stream = &accepting;
-  t->channel_callback.accept_stream(exec_ctx,
-                                    t->channel_callback.accept_stream_user_data,
+  t->channel_callback.accept_stream(t->channel_callback.accept_stream_user_data,
                                     &t->base, (void*)(uintptr_t)id);
   t->accepting_stream = nullptr;
   return accepting;
@@ -837,7 +796,7 @@
   GPR_UNREACHABLE_CODE(return "UNKNOWN");
 }
 
-static void set_write_state(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+static void set_write_state(grpc_chttp2_transport* t,
                             grpc_chttp2_write_state st, const char* reason) {
   GRPC_CHTTP2_IF_TRACING(gpr_log(GPR_DEBUG, "W:%p %s state %s -> %s [%s]", t,
                                  t->is_client ? "CLIENT" : "SERVER",
@@ -845,108 +804,100 @@
                                  write_state_name(st), reason));
   t->write_state = st;
   if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx, &t->run_after_write);
+    GRPC_CLOSURE_LIST_SCHED(&t->run_after_write);
     if (t->close_transport_on_writes_finished != nullptr) {
       grpc_error* err = t->close_transport_on_writes_finished;
       t->close_transport_on_writes_finished = nullptr;
-      close_transport_locked(exec_ctx, t, err);
+      close_transport_locked(t, err);
     }
   }
 }
 
 static void inc_initiate_write_reason(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_initiate_write_reason reason) {
+    grpc_chttp2_initiate_write_reason reason) {
   switch (reason) {
     case GRPC_CHTTP2_INITIATE_WRITE_INITIAL_WRITE:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA(
-          exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA(
-          exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL(
-          exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING(
-          exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE(
-          exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED(
-          exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE();
       break;
     case GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM:
-      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx);
+      GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM();
       break;
   }
 }
 
-void grpc_chttp2_initiate_write(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_transport* t,
+void grpc_chttp2_initiate_write(grpc_chttp2_transport* t,
                                 grpc_chttp2_initiate_write_reason reason) {
   GPR_TIMER_BEGIN("grpc_chttp2_initiate_write", 0);
 
   switch (t->write_state) {
     case GRPC_CHTTP2_WRITE_STATE_IDLE:
-      inc_initiate_write_reason(exec_ctx, reason);
-      set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
+      inc_initiate_write_reason(reason);
+      set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING,
                       grpc_chttp2_initiate_write_reason_string(reason));
       t->is_first_write_in_batch = true;
       GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           GRPC_CLOSURE_INIT(&t->write_action_begin_locked,
                             write_action_begin_locked, t,
                             grpc_combiner_finally_scheduler(t->combiner)),
           GRPC_ERROR_NONE);
       break;
     case GRPC_CHTTP2_WRITE_STATE_WRITING:
-      set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
+      set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE,
                       grpc_chttp2_initiate_write_reason_string(reason));
       break;
     case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
@@ -955,8 +906,7 @@
   GPR_TIMER_END("grpc_chttp2_initiate_write", 0);
 }
 
-void grpc_chttp2_mark_stream_writable(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_transport* t,
+void grpc_chttp2_mark_stream_writable(grpc_chttp2_transport* t,
                                       grpc_chttp2_stream* s) {
   if (t->closed_with_error == GRPC_ERROR_NONE &&
       grpc_chttp2_list_add_writable_stream(t, s)) {
@@ -1006,8 +956,7 @@
   GPR_UNREACHABLE_CODE(return "bad state tuple");
 }
 
-static void write_action_begin_locked(grpc_exec_ctx* exec_ctx, void* gt,
-                                      grpc_error* error_ignored) {
+static void write_action_begin_locked(void* gt, grpc_error* error_ignored) {
   GPR_TIMER_BEGIN("write_action_begin_locked", 0);
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
   GPR_ASSERT(t->write_state != GRPC_CHTTP2_WRITE_STATE_IDLE);
@@ -1015,62 +964,59 @@
   if (t->closed_with_error != GRPC_ERROR_NONE) {
     r.writing = false;
   } else {
-    r = grpc_chttp2_begin_write(exec_ctx, t);
+    r = grpc_chttp2_begin_write(t);
   }
   if (r.writing) {
     if (r.partial) {
-      GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx);
+      GRPC_STATS_INC_HTTP2_PARTIAL_WRITES();
     }
     if (!t->is_first_write_in_batch) {
-      GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx);
+      GRPC_STATS_INC_HTTP2_WRITES_CONTINUED();
     }
     grpc_closure_scheduler* scheduler =
         write_scheduler(t, r.early_results_scheduled, r.partial);
     if (scheduler != grpc_schedule_on_exec_ctx) {
-      GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx);
+      GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED();
     }
     set_write_state(
-        exec_ctx, t,
+        t,
         r.partial ? GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE
                   : GRPC_CHTTP2_WRITE_STATE_WRITING,
         begin_writing_desc(r.partial, scheduler == grpc_schedule_on_exec_ctx));
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_INIT(&t->write_action, write_action, t, scheduler),
         GRPC_ERROR_NONE);
   } else {
-    GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx);
-    set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
-                    "begin writing nothing");
-    GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
+    GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN();
+    set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "begin writing nothing");
+    GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing");
   }
   GPR_TIMER_END("write_action_begin_locked", 0);
 }
 
-static void write_action(grpc_exec_ctx* exec_ctx, void* gt, grpc_error* error) {
+static void write_action(void* gt, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
   GPR_TIMER_BEGIN("write_action", 0);
   grpc_endpoint_write(
-      exec_ctx, t->ep, &t->outbuf,
+      t->ep, &t->outbuf,
       GRPC_CLOSURE_INIT(&t->write_action_end_locked, write_action_end_locked, t,
                         grpc_combiner_scheduler(t->combiner)));
   GPR_TIMER_END("write_action", 0);
 }
 
-static void write_action_end_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                    grpc_error* error) {
+static void write_action_end_locked(void* tp, grpc_error* error) {
   GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
 
   if (error != GRPC_ERROR_NONE) {
-    close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
+    close_transport_locked(t, GRPC_ERROR_REF(error));
   }
 
   if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED) {
     t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SENT;
     if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
       close_transport_locked(
-          exec_ctx, t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("goaway sent"));
+          t, GRPC_ERROR_CREATE_FROM_STATIC_STRING("goaway sent"));
     }
   }
 
@@ -1079,17 +1025,14 @@
       GPR_UNREACHABLE_CODE(break);
     case GRPC_CHTTP2_WRITE_STATE_WRITING:
       GPR_TIMER_MARK("state=writing", 0);
-      set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
-                      "finish writing");
+      set_write_state(t, GRPC_CHTTP2_WRITE_STATE_IDLE, "finish writing");
       break;
     case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE:
       GPR_TIMER_MARK("state=writing_stale_no_poller", 0);
-      set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
-                      "continue writing");
+      set_write_state(t, GRPC_CHTTP2_WRITE_STATE_WRITING, "continue writing");
       t->is_first_write_in_batch = false;
       GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
       GRPC_CLOSURE_RUN(
-          exec_ctx,
           GRPC_CLOSURE_INIT(&t->write_action_begin_locked,
                             write_action_begin_locked, t,
                             grpc_combiner_finally_scheduler(t->combiner)),
@@ -1097,16 +1040,15 @@
       break;
   }
 
-  grpc_chttp2_end_write(exec_ctx, t, GRPC_ERROR_REF(error));
+  grpc_chttp2_end_write(t, GRPC_ERROR_REF(error));
 
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "writing");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "writing");
   GPR_TIMER_END("terminate_writing_with_lock", 0);
 }
 
 // Dirties an HTTP2 setting to be sent out next time a writing path occurs.
 // If the change needs to occur immediately, manually initiate a write.
-static void queue_setting_update(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t,
+static void queue_setting_update(grpc_chttp2_transport* t,
                                  grpc_chttp2_setting_id id, uint32_t value) {
   const grpc_chttp2_setting_parameters* sp =
       &grpc_chttp2_settings_parameters[id];
@@ -1121,8 +1063,7 @@
   }
 }
 
-void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
                                      uint32_t goaway_error,
                                      grpc_slice goaway_text) {
   // GRPC_CHTTP2_IF_TRACING(
@@ -1157,12 +1098,11 @@
 
   /* lie: use transient failure from the transport to indicate goaway has been
    * received */
-  connectivity_state_set(exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+  connectivity_state_set(t, GRPC_CHANNEL_TRANSIENT_FAILURE,
                          GRPC_ERROR_REF(t->goaway_error), "got_goaway");
 }
 
-static void maybe_start_some_streams(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t) {
+static void maybe_start_some_streams(grpc_chttp2_transport* t) {
   grpc_chttp2_stream* s;
   /* start streams where we have free grpc_chttp2_stream ids and free
    * concurrency */
@@ -1182,22 +1122,21 @@
 
     if (t->next_stream_id >= MAX_CLIENT_STREAM_ID) {
       connectivity_state_set(
-          exec_ctx, t, GRPC_CHANNEL_TRANSIENT_FAILURE,
+          t, GRPC_CHANNEL_TRANSIENT_FAILURE,
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"),
           "no_more_stream_ids");
     }
 
     grpc_chttp2_stream_map_add(&t->stream_map, s->id, s);
-    post_destructive_reclaimer(exec_ctx, t);
-    grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
-    grpc_chttp2_initiate_write(exec_ctx, t,
-                               GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM);
+    post_destructive_reclaimer(t);
+    grpc_chttp2_mark_stream_writable(t, s);
+    grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_START_NEW_STREAM);
   }
   /* cancel out streams that will never be started */
   while (t->next_stream_id >= MAX_CLIENT_STREAM_ID &&
          grpc_chttp2_list_pop_waiting_for_concurrency(t, &s)) {
     grpc_chttp2_cancel_stream(
-        exec_ctx, t, s,
+        t, s,
         grpc_error_set_int(
             GRPC_ERROR_CREATE_FROM_STATIC_STRING("Stream IDs exhausted"),
             GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
@@ -1219,15 +1158,13 @@
   return closure;
 }
 
-static void null_then_run_closure(grpc_exec_ctx* exec_ctx,
-                                  grpc_closure** closure, grpc_error* error) {
+static void null_then_run_closure(grpc_closure** closure, grpc_error* error) {
   grpc_closure* c = *closure;
   *closure = nullptr;
-  GRPC_CLOSURE_RUN(exec_ctx, c, error);
+  GRPC_CLOSURE_RUN(c, error);
 }
 
-void grpc_chttp2_complete_closure_step(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_transport* t,
+void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
                                        grpc_chttp2_stream* s,
                                        grpc_closure** pclosure,
                                        grpc_error* error, const char* desc) {
@@ -1267,7 +1204,7 @@
     }
     if ((t->write_state == GRPC_CHTTP2_WRITE_STATE_IDLE) ||
         !(closure->next_data.scratch & CLOSURE_BARRIER_MAY_COVER_WRITE)) {
-      GRPC_CLOSURE_RUN(exec_ctx, closure, closure->error_data.error);
+      GRPC_CLOSURE_RUN(closure, closure->error_data.error);
     } else {
       grpc_closure_list_append(&t->run_after_write, closure,
                                closure->error_data.error);
@@ -1283,28 +1220,24 @@
   return false;
 }
 
-static void maybe_become_writable_due_to_send_msg(grpc_exec_ctx* exec_ctx,
-                                                  grpc_chttp2_transport* t,
+static void maybe_become_writable_due_to_send_msg(grpc_chttp2_transport* t,
                                                   grpc_chttp2_stream* s) {
   if (s->id != 0 && (!s->write_buffering ||
                      s->flow_controlled_buffer.length > t->write_buffer_size)) {
-    grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
-    grpc_chttp2_initiate_write(exec_ctx, t,
-                               GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE);
+    grpc_chttp2_mark_stream_writable(t, s);
+    grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_SEND_MESSAGE);
   }
 }
 
-static void add_fetched_slice_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+static void add_fetched_slice_locked(grpc_chttp2_transport* t,
                                      grpc_chttp2_stream* s) {
   s->fetched_send_message_length +=
       (uint32_t)GRPC_SLICE_LENGTH(s->fetching_slice);
   grpc_slice_buffer_add(&s->flow_controlled_buffer, s->fetching_slice);
-  maybe_become_writable_due_to_send_msg(exec_ctx, t, s);
+  maybe_become_writable_due_to_send_msg(t, s);
 }
 
-static void continue_fetching_send_locked(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t,
+static void continue_fetching_send_locked(grpc_chttp2_transport* t,
                                           grpc_chttp2_stream* s) {
   for (;;) {
     if (s->fetching_send_message == nullptr) {
@@ -1313,11 +1246,11 @@
       return;  /* early out */
     }
     if (s->fetched_send_message_length == s->fetching_send_message->length) {
-      grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message);
+      grpc_byte_stream_destroy(s->fetching_send_message);
       int64_t notify_offset = s->next_message_end_offset;
       if (notify_offset <= s->flow_controlled_bytes_written) {
         grpc_chttp2_complete_closure_step(
-            exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE,
+            t, s, &s->fetching_send_message_finished, GRPC_ERROR_NONE,
             "fetching_send_message_finished");
       } else {
         grpc_chttp2_write_cb* cb = t->write_cb_pool;
@@ -1338,39 +1271,37 @@
       }
       s->fetching_send_message = nullptr;
       return; /* early out */
-    } else if (grpc_byte_stream_next(exec_ctx, s->fetching_send_message,
-                                     UINT32_MAX, &s->complete_fetch_locked)) {
-      grpc_error* error = grpc_byte_stream_pull(
-          exec_ctx, s->fetching_send_message, &s->fetching_slice);
+    } else if (grpc_byte_stream_next(s->fetching_send_message, UINT32_MAX,
+                                     &s->complete_fetch_locked)) {
+      grpc_error* error =
+          grpc_byte_stream_pull(s->fetching_send_message, &s->fetching_slice);
       if (error != GRPC_ERROR_NONE) {
-        grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message);
-        grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
+        grpc_byte_stream_destroy(s->fetching_send_message);
+        grpc_chttp2_cancel_stream(t, s, error);
       } else {
-        add_fetched_slice_locked(exec_ctx, t, s);
+        add_fetched_slice_locked(t, s);
       }
     }
   }
 }
 
-static void complete_fetch_locked(grpc_exec_ctx* exec_ctx, void* gs,
-                                  grpc_error* error) {
+static void complete_fetch_locked(void* gs, grpc_error* error) {
   grpc_chttp2_stream* s = (grpc_chttp2_stream*)gs;
   grpc_chttp2_transport* t = s->t;
   if (error == GRPC_ERROR_NONE) {
-    error = grpc_byte_stream_pull(exec_ctx, s->fetching_send_message,
-                                  &s->fetching_slice);
+    error = grpc_byte_stream_pull(s->fetching_send_message, &s->fetching_slice);
     if (error == GRPC_ERROR_NONE) {
-      add_fetched_slice_locked(exec_ctx, t, s);
-      continue_fetching_send_locked(exec_ctx, t, s);
+      add_fetched_slice_locked(t, s);
+      continue_fetching_send_locked(t, s);
     }
   }
   if (error != GRPC_ERROR_NONE) {
-    grpc_byte_stream_destroy(exec_ctx, s->fetching_send_message);
-    grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
+    grpc_byte_stream_destroy(s->fetching_send_message);
+    grpc_chttp2_cancel_stream(t, s, error);
   }
 }
 
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void do_nothing(void* arg, grpc_error* error) {}
 
 static void log_metadata(const grpc_metadata_batch* md_batch, uint32_t id,
                          bool is_client, bool is_initial) {
@@ -1385,7 +1316,7 @@
   }
 }
 
-static void perform_stream_op_locked(grpc_exec_ctx* exec_ctx, void* stream_op,
+static void perform_stream_op_locked(void* stream_op,
                                      grpc_error* error_ignored) {
   GPR_TIMER_BEGIN("perform_stream_op_locked", 0);
 
@@ -1395,7 +1326,7 @@
   grpc_transport_stream_op_batch_payload* op_payload = op->payload;
   grpc_chttp2_transport* t = s->t;
 
-  GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx);
+  GRPC_STATS_INC_HTTP2_OP_BATCHES();
 
   if (grpc_http_trace.enabled()) {
     char* str = grpc_transport_stream_op_batch_string(op);
@@ -1430,13 +1361,12 @@
   }
 
   if (op->cancel_stream) {
-    GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx);
-    grpc_chttp2_cancel_stream(exec_ctx, t, s,
-                              op_payload->cancel_stream.cancel_error);
+    GRPC_STATS_INC_HTTP2_OP_CANCEL();
+    grpc_chttp2_cancel_stream(t, s, op_payload->cancel_stream.cancel_error);
   }
 
   if (op->send_initial_metadata) {
-    GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx);
+    GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA();
     GPR_ASSERT(s->send_initial_metadata_finished == nullptr);
     on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
 
@@ -1464,7 +1394,7 @@
     }
     if (metadata_size > metadata_peer_limit) {
       grpc_chttp2_cancel_stream(
-          exec_ctx, t, s,
+          t, s,
           grpc_error_set_int(
               grpc_error_set_int(
                   grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -1483,10 +1413,10 @@
           if (t->closed_with_error == GRPC_ERROR_NONE) {
             GPR_ASSERT(s->id == 0);
             grpc_chttp2_list_add_waiting_for_concurrency(t, s);
-            maybe_start_some_streams(exec_ctx, t);
+            maybe_start_some_streams(t);
           } else {
             grpc_chttp2_cancel_stream(
-                exec_ctx, t, s,
+                t, s,
                 grpc_error_set_int(
                     GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                         "Transport closed", &t->closed_with_error, 1),
@@ -1494,18 +1424,18 @@
           }
         } else {
           GPR_ASSERT(s->id != 0);
-          grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+          grpc_chttp2_mark_stream_writable(t, s);
           if (!(op->send_message &&
                 (op->payload->send_message.send_message->flags &
                  GRPC_WRITE_BUFFER_HINT))) {
             grpc_chttp2_initiate_write(
-                exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA);
+                t, GRPC_CHTTP2_INITIATE_WRITE_SEND_INITIAL_METADATA);
           }
         }
       } else {
         s->send_initial_metadata = nullptr;
         grpc_chttp2_complete_closure_step(
-            exec_ctx, t, s, &s->send_initial_metadata_finished,
+            t, s, &s->send_initial_metadata_finished,
             GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                 "Attempt to send initial metadata after stream was closed",
                 &s->write_closed_error, 1),
@@ -1519,9 +1449,9 @@
   }
 
   if (op->send_message) {
-    GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx);
+    GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE();
     GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(
-        exec_ctx, op->payload->send_message.send_message->length);
+        op->payload->send_message.send_message->length);
     on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
     s->fetching_send_message_finished = add_closure_barrier(op->on_complete);
     if (s->write_closed) {
@@ -1531,7 +1461,7 @@
       // recv_message failure, breaking out of its loop, and then
       // starting recv_trailing_metadata.
       grpc_chttp2_complete_closure_step(
-          exec_ctx, t, s, &s->fetching_send_message_finished,
+          t, s, &s->fetching_send_message_finished,
           t->is_client && s->received_trailing_metadata
               ? GRPC_ERROR_NONE
               : GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
@@ -1560,13 +1490,13 @@
       } else {
         s->write_buffering = false;
       }
-      continue_fetching_send_locked(exec_ctx, t, s);
-      maybe_become_writable_due_to_send_msg(exec_ctx, t, s);
+      continue_fetching_send_locked(t, s);
+      maybe_become_writable_due_to_send_msg(t, s);
     }
   }
 
   if (op->send_trailing_metadata) {
-    GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx);
+    GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA();
     GPR_ASSERT(s->send_trailing_metadata_finished == nullptr);
     on_complete->next_data.scratch |= CLOSURE_BARRIER_MAY_COVER_WRITE;
     s->send_trailing_metadata_finished = add_closure_barrier(on_complete);
@@ -1580,7 +1510,7 @@
                    [GRPC_CHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE];
     if (metadata_size > metadata_peer_limit) {
       grpc_chttp2_cancel_stream(
-          exec_ctx, t, s,
+          t, s,
           grpc_error_set_int(
               grpc_error_set_int(
                   grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
@@ -1597,7 +1527,7 @@
       if (s->write_closed) {
         s->send_trailing_metadata = nullptr;
         grpc_chttp2_complete_closure_step(
-            exec_ctx, t, s, &s->send_trailing_metadata_finished,
+            t, s, &s->send_trailing_metadata_finished,
             grpc_metadata_batch_is_empty(
                 op->payload->send_trailing_metadata.send_trailing_metadata)
                 ? GRPC_ERROR_NONE
@@ -1608,15 +1538,15 @@
       } else if (s->id != 0) {
         /* TODO(ctiller): check if there's flow control for any outstanding
            bytes before going writable */
-        grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+        grpc_chttp2_mark_stream_writable(t, s);
         grpc_chttp2_initiate_write(
-            exec_ctx, t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA);
+            t, GRPC_CHTTP2_INITIATE_WRITE_SEND_TRAILING_METADATA);
       }
     }
   }
 
   if (op->recv_initial_metadata) {
-    GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx);
+    GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA();
     GPR_ASSERT(s->recv_initial_metadata_ready == nullptr);
     s->recv_initial_metadata_ready =
         op_payload->recv_initial_metadata.recv_initial_metadata_ready;
@@ -1628,11 +1558,11 @@
       gpr_atm_rel_store(op_payload->recv_initial_metadata.peer_string,
                         (gpr_atm)gpr_strdup(t->peer_string));
     }
-    grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_initial_metadata(t, s);
   }
 
   if (op->recv_message) {
-    GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx);
+    GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE();
     size_t already_received;
     GPR_ASSERT(s->recv_message_ready == nullptr);
     GPR_ASSERT(!s->pending_byte_stream);
@@ -1643,32 +1573,30 @@
         already_received = s->frame_storage.length;
         s->flow_control->IncomingByteStreamUpdate(GRPC_HEADER_SIZE_IN_BYTES,
                                                   already_received);
-        grpc_chttp2_act_on_flowctl_action(exec_ctx,
-                                          s->flow_control->MakeAction(), t, s);
+        grpc_chttp2_act_on_flowctl_action(s->flow_control->MakeAction(), t, s);
       }
     }
-    grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_message(t, s);
   }
 
   if (op->recv_trailing_metadata) {
-    GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx);
+    GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA();
     GPR_ASSERT(s->recv_trailing_metadata_finished == nullptr);
     s->recv_trailing_metadata_finished = add_closure_barrier(on_complete);
     s->recv_trailing_metadata =
         op_payload->recv_trailing_metadata.recv_trailing_metadata;
     s->final_metadata_requested = true;
-    grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
   }
 
-  grpc_chttp2_complete_closure_step(exec_ctx, t, s, &on_complete,
-                                    GRPC_ERROR_NONE, "op->on_complete");
+  grpc_chttp2_complete_closure_step(t, s, &on_complete, GRPC_ERROR_NONE,
+                                    "op->on_complete");
 
   GPR_TIMER_END("perform_stream_op_locked", 0);
-  GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "perform_stream_op");
+  GRPC_CHTTP2_STREAM_UNREF(s, "perform_stream_op");
 }
 
-static void perform_stream_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                              grpc_stream* gs,
+static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
                               grpc_transport_stream_op_batch* op) {
   GPR_TIMER_BEGIN("perform_stream_op", 0);
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
@@ -1696,32 +1624,29 @@
   op->handler_private.extra_arg = gs;
   GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&op->handler_private.closure, perform_stream_op_locked,
                         op, grpc_combiner_scheduler(t->combiner)),
       GRPC_ERROR_NONE);
   GPR_TIMER_END("perform_stream_op", 0);
 }
 
-static void cancel_pings(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                         grpc_error* error) {
+static void cancel_pings(grpc_chttp2_transport* t, grpc_error* error) {
   /* callback remaining pings: they're not allowed to call into the transpot,
      and maybe they hold resources that need to be freed */
   grpc_chttp2_ping_queue* pq = &t->ping_queue;
   GPR_ASSERT(error != GRPC_ERROR_NONE);
   for (size_t j = 0; j < GRPC_CHTTP2_PCL_COUNT; j++) {
     grpc_closure_list_fail_all(&pq->lists[j], GRPC_ERROR_REF(error));
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[j]);
+    GRPC_CLOSURE_LIST_SCHED(&pq->lists[j]);
   }
   GRPC_ERROR_UNREF(error);
 }
 
-static void send_ping_locked(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+static void send_ping_locked(grpc_chttp2_transport* t,
                              grpc_closure* on_initiate, grpc_closure* on_ack) {
   if (t->closed_with_error != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(exec_ctx, on_initiate,
-                       GRPC_ERROR_REF(t->closed_with_error));
-    GRPC_CLOSURE_SCHED(exec_ctx, on_ack, GRPC_ERROR_REF(t->closed_with_error));
+    GRPC_CLOSURE_SCHED(on_initiate, GRPC_ERROR_REF(t->closed_with_error));
+    GRPC_CLOSURE_SCHED(on_ack, GRPC_ERROR_REF(t->closed_with_error));
     return;
   }
   grpc_chttp2_ping_queue* pq = &t->ping_queue;
@@ -1731,18 +1656,15 @@
                            GRPC_ERROR_NONE);
 }
 
-static void retry_initiate_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                       grpc_error* error) {
+static void retry_initiate_ping_locked(void* tp, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   t->ping_state.is_delayed_ping_timer_set = false;
   if (error == GRPC_ERROR_NONE) {
-    grpc_chttp2_initiate_write(exec_ctx, t,
-                               GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
+    grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RETRY_SEND_PING);
   }
 }
 
-void grpc_chttp2_ack_ping(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                          uint64_t id) {
+void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id) {
   grpc_chttp2_ping_queue* pq = &t->ping_queue;
   if (pq->inflight_id != id) {
     char* from = grpc_endpoint_get_peer(t->ep);
@@ -1750,54 +1672,48 @@
     gpr_free(from);
     return;
   }
-  GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
+  GRPC_CLOSURE_LIST_SCHED(&pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
   if (!grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
-    grpc_chttp2_initiate_write(exec_ctx, t,
-                               GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS);
+    grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_CONTINUE_PINGS);
   }
 }
 
-static void send_goaway(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                        grpc_error* error) {
+static void send_goaway(grpc_chttp2_transport* t, grpc_error* error) {
   t->sent_goaway_state = GRPC_CHTTP2_GOAWAY_SEND_SCHEDULED;
   grpc_http2_error_code http_error;
   grpc_slice slice;
-  grpc_error_get_status(exec_ctx, error, GRPC_MILLIS_INF_FUTURE, nullptr,
-                        &slice, &http_error, nullptr);
+  grpc_error_get_status(error, GRPC_MILLIS_INF_FUTURE, nullptr, &slice,
+                        &http_error, nullptr);
   grpc_chttp2_goaway_append(t->last_new_stream_id, (uint32_t)http_error,
                             grpc_slice_ref_internal(slice), &t->qbuf);
-  grpc_chttp2_initiate_write(exec_ctx, t,
-                             GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
+  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_GOAWAY_SENT);
   GRPC_ERROR_UNREF(error);
 }
 
-void grpc_chttp2_add_ping_strike(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t) {
+void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t) {
   t->ping_recv_state.ping_strikes++;
   if (++t->ping_recv_state.ping_strikes > t->ping_policy.max_ping_strikes &&
       t->ping_policy.max_ping_strikes != 0) {
-    send_goaway(exec_ctx, t,
+    send_goaway(t,
                 grpc_error_set_int(
                     GRPC_ERROR_CREATE_FROM_STATIC_STRING("too_many_pings"),
                     GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
     /*The transport will be closed after the write is done */
     close_transport_locked(
-        exec_ctx, t,
-        grpc_error_set_int(
-            GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
-            GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
+        t, grpc_error_set_int(
+               GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many pings"),
+               GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
   }
 }
 
-static void perform_transport_op_locked(grpc_exec_ctx* exec_ctx,
-                                        void* stream_op,
+static void perform_transport_op_locked(void* stream_op,
                                         grpc_error* error_ignored) {
   grpc_transport_op* op = (grpc_transport_op*)stream_op;
   grpc_chttp2_transport* t =
       (grpc_chttp2_transport*)op->handler_private.extra_arg;
 
   if (op->goaway_error) {
-    send_goaway(exec_ctx, t, op->goaway_error);
+    send_goaway(t, op->goaway_error);
   }
 
   if (op->set_accept_stream) {
@@ -1807,43 +1723,40 @@
   }
 
   if (op->bind_pollset) {
-    grpc_endpoint_add_to_pollset(exec_ctx, t->ep, op->bind_pollset);
+    grpc_endpoint_add_to_pollset(t->ep, op->bind_pollset);
   }
 
   if (op->bind_pollset_set) {
-    grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, op->bind_pollset_set);
+    grpc_endpoint_add_to_pollset_set(t->ep, op->bind_pollset_set);
   }
 
-  if (op->send_ping) {
-    send_ping_locked(exec_ctx, t, nullptr, op->send_ping);
-    grpc_chttp2_initiate_write(exec_ctx, t,
-                               GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
+  if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
+    send_ping_locked(t, op->send_ping.on_initiate, op->send_ping.on_ack);
+    grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_APPLICATION_PING);
   }
 
   if (op->on_connectivity_state_change != nullptr) {
     grpc_connectivity_state_notify_on_state_change(
-        exec_ctx, &t->channel_callback.state_tracker, op->connectivity_state,
+        &t->channel_callback.state_tracker, op->connectivity_state,
         op->on_connectivity_state_change);
   }
 
   if (op->disconnect_with_error != GRPC_ERROR_NONE) {
-    close_transport_locked(exec_ctx, t, op->disconnect_with_error);
+    close_transport_locked(t, op->disconnect_with_error);
   }
 
-  GRPC_CLOSURE_RUN(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_RUN(op->on_consumed, GRPC_ERROR_NONE);
 
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "transport_op");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "transport_op");
 }
 
-static void perform_transport_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                                 grpc_transport_op* op) {
+static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
   char* msg = grpc_transport_op_string(op);
   gpr_free(msg);
   op->handler_private.extra_arg = gt;
   GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
-  GRPC_CLOSURE_SCHED(exec_ctx,
-                     GRPC_CLOSURE_INIT(&op->handler_private.closure,
+  GRPC_CLOSURE_SCHED(GRPC_CLOSURE_INIT(&op->handler_private.closure,
                                        perform_transport_op_locked, op,
                                        grpc_combiner_scheduler(t->combiner)),
                      GRPC_ERROR_NONE);
@@ -1853,36 +1766,33 @@
  * INPUT PROCESSING - GENERAL
  */
 
-void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx* exec_ctx,
-                                                      grpc_chttp2_transport* t,
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_chttp2_transport* t,
                                                       grpc_chttp2_stream* s) {
   if (s->recv_initial_metadata_ready != nullptr &&
       s->published_metadata[0] != GRPC_METADATA_NOT_PUBLISHED) {
     if (s->seen_error) {
-      grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage);
+      grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage);
       if (!s->pending_byte_stream) {
         grpc_slice_buffer_reset_and_unref_internal(
-            exec_ctx, &s->unprocessed_incoming_frames_buffer);
+            &s->unprocessed_incoming_frames_buffer);
       }
     }
-    grpc_chttp2_incoming_metadata_buffer_publish(
-        exec_ctx, &s->metadata_buffer[0], s->recv_initial_metadata);
-    null_then_run_closure(exec_ctx, &s->recv_initial_metadata_ready,
-                          GRPC_ERROR_NONE);
+    grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[0],
+                                                 s->recv_initial_metadata);
+    null_then_run_closure(&s->recv_initial_metadata_ready, GRPC_ERROR_NONE);
   }
 }
 
-void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx* exec_ctx,
-                                             grpc_chttp2_transport* t,
+void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
                                              grpc_chttp2_stream* s) {
   grpc_error* error = GRPC_ERROR_NONE;
   if (s->recv_message_ready != nullptr) {
     *s->recv_message = nullptr;
     if (s->final_metadata_requested && s->seen_error) {
-      grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage);
+      grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage);
       if (!s->pending_byte_stream) {
         grpc_slice_buffer_reset_and_unref_internal(
-            exec_ctx, &s->unprocessed_incoming_frames_buffer);
+            &s->unprocessed_incoming_frames_buffer);
       }
     }
     if (!s->pending_byte_stream) {
@@ -1909,10 +1819,9 @@
                   &s->decompressed_data_buffer, nullptr,
                   GRPC_HEADER_SIZE_IN_BYTES - s->decompressed_header_bytes,
                   &end_of_context)) {
-            grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                                       &s->frame_storage);
+            grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage);
             grpc_slice_buffer_reset_and_unref_internal(
-                exec_ctx, &s->unprocessed_incoming_frames_buffer);
+                &s->unprocessed_incoming_frames_buffer);
             error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                 "Stream decompression error.");
           } else {
@@ -1921,8 +1830,8 @@
               s->decompressed_header_bytes = 0;
             }
             error = grpc_deframe_unprocessed_incoming_frames(
-                exec_ctx, &s->data_parser, s, &s->decompressed_data_buffer,
-                nullptr, s->recv_message);
+                &s->data_parser, s, &s->decompressed_data_buffer, nullptr,
+                s->recv_message);
             if (end_of_context) {
               grpc_stream_compression_context_destroy(
                   s->stream_decompression_ctx);
@@ -1931,15 +1840,14 @@
           }
         } else {
           error = grpc_deframe_unprocessed_incoming_frames(
-              exec_ctx, &s->data_parser, s,
-              &s->unprocessed_incoming_frames_buffer, nullptr, s->recv_message);
+              &s->data_parser, s, &s->unprocessed_incoming_frames_buffer,
+              nullptr, s->recv_message);
         }
         if (error != GRPC_ERROR_NONE) {
           s->seen_error = true;
-          grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                                     &s->frame_storage);
+          grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage);
           grpc_slice_buffer_reset_and_unref_internal(
-              exec_ctx, &s->unprocessed_incoming_frames_buffer);
+              &s->unprocessed_incoming_frames_buffer);
           break;
         } else if (*s->recv_message != nullptr) {
           break;
@@ -1947,26 +1855,25 @@
       }
     }
     if (error == GRPC_ERROR_NONE && *s->recv_message != nullptr) {
-      null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE);
+      null_then_run_closure(&s->recv_message_ready, GRPC_ERROR_NONE);
     } else if (s->published_metadata[1] != GRPC_METADATA_NOT_PUBLISHED) {
       *s->recv_message = nullptr;
-      null_then_run_closure(exec_ctx, &s->recv_message_ready, GRPC_ERROR_NONE);
+      null_then_run_closure(&s->recv_message_ready, GRPC_ERROR_NONE);
     }
     GRPC_ERROR_UNREF(error);
   }
 }
 
-void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx* exec_ctx,
-                                                       grpc_chttp2_transport* t,
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
                                                        grpc_chttp2_stream* s) {
-  grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+  grpc_chttp2_maybe_complete_recv_message(t, s);
   if (s->recv_trailing_metadata_finished != nullptr && s->read_closed &&
       s->write_closed) {
     if (s->seen_error) {
-      grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage);
+      grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage);
       if (!s->pending_byte_stream) {
         grpc_slice_buffer_reset_and_unref_internal(
-            exec_ctx, &s->unprocessed_incoming_frames_buffer);
+            &s->unprocessed_incoming_frames_buffer);
       }
     }
     bool pending_data = s->pending_byte_stream ||
@@ -1984,9 +1891,9 @@
               s->stream_decompression_ctx, &s->frame_storage,
               &s->unprocessed_incoming_frames_buffer, nullptr,
               GRPC_HEADER_SIZE_IN_BYTES, &end_of_context)) {
-        grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &s->frame_storage);
+        grpc_slice_buffer_reset_and_unref_internal(&s->frame_storage);
         grpc_slice_buffer_reset_and_unref_internal(
-            exec_ctx, &s->unprocessed_incoming_frames_buffer);
+            &s->unprocessed_incoming_frames_buffer);
         s->seen_error = true;
       } else {
         if (s->unprocessed_incoming_frames_buffer.length > 0) {
@@ -2001,23 +1908,23 @@
     }
     if (s->read_closed && s->frame_storage.length == 0 && !pending_data &&
         s->recv_trailing_metadata_finished != nullptr) {
-      grpc_chttp2_incoming_metadata_buffer_publish(
-          exec_ctx, &s->metadata_buffer[1], s->recv_trailing_metadata);
+      grpc_chttp2_incoming_metadata_buffer_publish(&s->metadata_buffer[1],
+                                                   s->recv_trailing_metadata);
       grpc_chttp2_complete_closure_step(
-          exec_ctx, t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE,
+          t, s, &s->recv_trailing_metadata_finished, GRPC_ERROR_NONE,
           "recv_trailing_metadata_finished");
     }
   }
 }
 
-static void remove_stream(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                          uint32_t id, grpc_error* error) {
+static void remove_stream(grpc_chttp2_transport* t, uint32_t id,
+                          grpc_error* error) {
   grpc_chttp2_stream* s =
       (grpc_chttp2_stream*)grpc_chttp2_stream_map_delete(&t->stream_map, id);
   GPR_ASSERT(s);
   if (t->incoming_stream == s) {
     t->incoming_stream = nullptr;
-    grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+    grpc_chttp2_parsing_become_skip_parser(t);
   }
   if (s->pending_byte_stream) {
     if (s->on_next != nullptr) {
@@ -2025,8 +1932,8 @@
       if (error == GRPC_ERROR_NONE) {
         error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
       }
-      incoming_byte_stream_publish_error(exec_ctx, bs, error);
-      incoming_byte_stream_unref(exec_ctx, bs);
+      incoming_byte_stream_publish_error(bs, error);
+      incoming_byte_stream_unref(bs);
       s->data_parser.parsing_frame = nullptr;
     } else {
       GRPC_ERROR_UNREF(s->byte_stream_error);
@@ -2035,56 +1942,52 @@
   }
 
   if (grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
-    post_benign_reclaimer(exec_ctx, t);
+    post_benign_reclaimer(t);
     if (t->sent_goaway_state == GRPC_CHTTP2_GOAWAY_SENT) {
       close_transport_locked(
-          exec_ctx, t,
-          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-              "Last stream closed after sending GOAWAY", &error, 1));
+          t, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                 "Last stream closed after sending GOAWAY", &error, 1));
     }
   }
   if (grpc_chttp2_list_remove_writable_stream(t, s)) {
-    GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:remove_stream");
+    GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:remove_stream");
   }
 
   GRPC_ERROR_UNREF(error);
 
-  maybe_start_some_streams(exec_ctx, t);
+  maybe_start_some_streams(t);
 }
 
-void grpc_chttp2_cancel_stream(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
                                grpc_error* due_to_error) {
   if (!t->is_client && !s->sent_trailing_metadata &&
       grpc_error_has_clear_grpc_status(due_to_error)) {
-    close_from_api(exec_ctx, t, s, due_to_error);
+    close_from_api(t, s, due_to_error);
     return;
   }
 
   if (!s->read_closed || !s->write_closed) {
     if (s->id != 0) {
       grpc_http2_error_code http_error;
-      grpc_error_get_status(exec_ctx, due_to_error, s->deadline, nullptr,
-                            nullptr, &http_error, nullptr);
+      grpc_error_get_status(due_to_error, s->deadline, nullptr, nullptr,
+                            &http_error, nullptr);
       grpc_slice_buffer_add(
           &t->qbuf, grpc_chttp2_rst_stream_create(s->id, (uint32_t)http_error,
                                                   &s->stats.outgoing));
-      grpc_chttp2_initiate_write(exec_ctx, t,
-                                 GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
+      grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_RST_STREAM);
     }
   }
   if (due_to_error != GRPC_ERROR_NONE && !s->seen_error) {
     s->seen_error = true;
   }
-  grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, due_to_error);
+  grpc_chttp2_mark_stream_closed(t, s, 1, 1, due_to_error);
 }
 
-void grpc_chttp2_fake_status(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                             grpc_chttp2_stream* s, grpc_error* error) {
+void grpc_chttp2_fake_status(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+                             grpc_error* error) {
   grpc_status_code status;
   grpc_slice slice;
-  grpc_error_get_status(exec_ctx, error, s->deadline, &status, &slice, nullptr,
-                        nullptr);
+  grpc_error_get_status(error, s->deadline, &status, &slice, nullptr, nullptr);
   if (status != GRPC_STATUS_OK) {
     s->seen_error = true;
   }
@@ -2100,20 +2003,20 @@
     gpr_ltoa(status, status_string);
     GRPC_LOG_IF_ERROR("add_status",
                       grpc_chttp2_incoming_metadata_buffer_replace_or_add(
-                          exec_ctx, &s->metadata_buffer[1],
+                          &s->metadata_buffer[1],
                           grpc_mdelem_from_slices(
-                              exec_ctx, GRPC_MDSTR_GRPC_STATUS,
+                              GRPC_MDSTR_GRPC_STATUS,
                               grpc_slice_from_copied_string(status_string))));
     if (!GRPC_SLICE_IS_EMPTY(slice)) {
       GRPC_LOG_IF_ERROR(
           "add_status_message",
           grpc_chttp2_incoming_metadata_buffer_replace_or_add(
-              exec_ctx, &s->metadata_buffer[1],
-              grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+              &s->metadata_buffer[1],
+              grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_MESSAGE,
                                       grpc_slice_ref_internal(slice))));
     }
     s->published_metadata[1] = GRPC_METADATA_SYNTHESIZED_FROM_FAKE;
-    grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
   }
 
   GRPC_ERROR_UNREF(error);
@@ -2146,14 +2049,12 @@
   return error;
 }
 
-static void flush_write_list(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                             grpc_chttp2_stream* s, grpc_chttp2_write_cb** list,
-                             grpc_error* error) {
+static void flush_write_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+                             grpc_chttp2_write_cb** list, grpc_error* error) {
   while (*list) {
     grpc_chttp2_write_cb* cb = *list;
     *list = cb->next;
-    grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure,
-                                      GRPC_ERROR_REF(error),
+    grpc_chttp2_complete_closure_step(t, s, &cb->closure, GRPC_ERROR_REF(error),
                                       "on_write_finished_cb");
     cb->next = t->write_cb_pool;
     t->write_cb_pool = cb;
@@ -2161,37 +2062,34 @@
   GRPC_ERROR_UNREF(error);
 }
 
-void grpc_chttp2_fail_pending_writes(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t,
                                      grpc_chttp2_stream* s, grpc_error* error) {
   error =
       removal_error(error, s, "Pending writes failed due to stream closure");
   s->send_initial_metadata = nullptr;
-  grpc_chttp2_complete_closure_step(
-      exec_ctx, t, s, &s->send_initial_metadata_finished, GRPC_ERROR_REF(error),
-      "send_initial_metadata_finished");
+  grpc_chttp2_complete_closure_step(t, s, &s->send_initial_metadata_finished,
+                                    GRPC_ERROR_REF(error),
+                                    "send_initial_metadata_finished");
 
   s->send_trailing_metadata = nullptr;
-  grpc_chttp2_complete_closure_step(
-      exec_ctx, t, s, &s->send_trailing_metadata_finished,
-      GRPC_ERROR_REF(error), "send_trailing_metadata_finished");
+  grpc_chttp2_complete_closure_step(t, s, &s->send_trailing_metadata_finished,
+                                    GRPC_ERROR_REF(error),
+                                    "send_trailing_metadata_finished");
 
   s->fetching_send_message = nullptr;
-  grpc_chttp2_complete_closure_step(
-      exec_ctx, t, s, &s->fetching_send_message_finished, GRPC_ERROR_REF(error),
-      "fetching_send_message_finished");
-  flush_write_list(exec_ctx, t, s, &s->on_write_finished_cbs,
-                   GRPC_ERROR_REF(error));
-  flush_write_list(exec_ctx, t, s, &s->on_flow_controlled_cbs, error);
+  grpc_chttp2_complete_closure_step(t, s, &s->fetching_send_message_finished,
+                                    GRPC_ERROR_REF(error),
+                                    "fetching_send_message_finished");
+  flush_write_list(t, s, &s->on_write_finished_cbs, GRPC_ERROR_REF(error));
+  flush_write_list(t, s, &s->on_flow_controlled_cbs, error);
 }
 
-void grpc_chttp2_mark_stream_closed(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_transport* t,
+void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t,
                                     grpc_chttp2_stream* s, int close_reads,
                                     int close_writes, grpc_error* error) {
   if (s->read_closed && s->write_closed) {
     /* already closed */
-    grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
     GRPC_ERROR_UNREF(error);
     return;
   }
@@ -2205,20 +2103,20 @@
   if (close_writes && !s->write_closed) {
     s->write_closed_error = GRPC_ERROR_REF(error);
     s->write_closed = true;
-    grpc_chttp2_fail_pending_writes(exec_ctx, t, s, GRPC_ERROR_REF(error));
+    grpc_chttp2_fail_pending_writes(t, s, GRPC_ERROR_REF(error));
   }
   if (s->read_closed && s->write_closed) {
     became_closed = true;
     grpc_error* overall_error =
         removal_error(GRPC_ERROR_REF(error), s, "Stream removed");
     if (s->id != 0) {
-      remove_stream(exec_ctx, t, s->id, GRPC_ERROR_REF(overall_error));
+      remove_stream(t, s->id, GRPC_ERROR_REF(overall_error));
     } else {
       /* Purge streams waiting on concurrency still waiting for id assignment */
       grpc_chttp2_list_remove_waiting_for_concurrency(t, s);
     }
     if (overall_error != GRPC_ERROR_NONE) {
-      grpc_chttp2_fake_status(exec_ctx, t, s, overall_error);
+      grpc_chttp2_fake_status(t, s, overall_error);
     }
   }
   if (closed_read) {
@@ -2227,18 +2125,18 @@
         s->published_metadata[i] = GPRC_METADATA_PUBLISHED_AT_CLOSE;
       }
     }
-    grpc_chttp2_maybe_complete_recv_initial_metadata(exec_ctx, t, s);
-    grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_initial_metadata(t, s);
+    grpc_chttp2_maybe_complete_recv_message(t, s);
   }
   if (became_closed) {
-    grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
-    GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2");
+    grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
+    GRPC_CHTTP2_STREAM_UNREF(s, "chttp2");
   }
   GRPC_ERROR_UNREF(error);
 }
 
-static void close_from_api(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                           grpc_chttp2_stream* s, grpc_error* error) {
+static void close_from_api(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+                           grpc_error* error) {
   grpc_slice hdr;
   grpc_slice status_hdr;
   grpc_slice http_status_hdr;
@@ -2248,8 +2146,8 @@
   uint32_t len = 0;
   grpc_status_code grpc_status;
   grpc_slice slice;
-  grpc_error_get_status(exec_ctx, error, s->deadline, &grpc_status, &slice,
-                        nullptr, nullptr);
+  grpc_error_get_status(error, s->deadline, &grpc_status, &slice, nullptr,
+                        nullptr);
 
   GPR_ASSERT(grpc_status >= 0 && (int)grpc_status < 100);
 
@@ -2391,13 +2289,11 @@
       &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
                                               &s->stats.outgoing));
 
-  grpc_chttp2_mark_stream_closed(exec_ctx, t, s, 1, 1, error);
-  grpc_chttp2_initiate_write(exec_ctx, t,
-                             GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
+  grpc_chttp2_mark_stream_closed(t, s, 1, 1, error);
+  grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_CLOSE_FROM_API);
 }
 
 typedef struct {
-  grpc_exec_ctx* exec_ctx;
   grpc_error* error;
   grpc_chttp2_transport* t;
 } cancel_stream_cb_args;
@@ -2405,13 +2301,11 @@
 static void cancel_stream_cb(void* user_data, uint32_t key, void* stream) {
   cancel_stream_cb_args* args = (cancel_stream_cb_args*)user_data;
   grpc_chttp2_stream* s = (grpc_chttp2_stream*)stream;
-  grpc_chttp2_cancel_stream(args->exec_ctx, args->t, s,
-                            GRPC_ERROR_REF(args->error));
+  grpc_chttp2_cancel_stream(args->t, s, GRPC_ERROR_REF(args->error));
 }
 
-static void end_all_the_calls(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                              grpc_error* error) {
-  cancel_stream_cb_args args = {exec_ctx, error, t};
+static void end_all_the_calls(grpc_chttp2_transport* t, grpc_error* error) {
+  cancel_stream_cb_args args = {error, t};
   grpc_chttp2_stream_map_for_each(&t->stream_map, cancel_stream_cb, &args);
   GRPC_ERROR_UNREF(error);
 }
@@ -2421,14 +2315,14 @@
  */
 
 template <class F>
-static void WithUrgency(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+static void WithUrgency(grpc_chttp2_transport* t,
                         grpc_core::chttp2::FlowControlAction::Urgency urgency,
                         grpc_chttp2_initiate_write_reason reason, F action) {
   switch (urgency) {
     case grpc_core::chttp2::FlowControlAction::Urgency::NO_ACTION_NEEDED:
       break;
     case grpc_core::chttp2::FlowControlAction::Urgency::UPDATE_IMMEDIATELY:
-      grpc_chttp2_initiate_write(exec_ctx, t, reason);
+      grpc_chttp2_initiate_write(t, reason);
     // fallthrough
     case grpc_core::chttp2::FlowControlAction::Urgency::QUEUE_UPDATE:
       action();
@@ -2437,31 +2331,27 @@
 }
 
 void grpc_chttp2_act_on_flowctl_action(
-    grpc_exec_ctx* exec_ctx, const grpc_core::chttp2::FlowControlAction& action,
+    const grpc_core::chttp2::FlowControlAction& action,
     grpc_chttp2_transport* t, grpc_chttp2_stream* s) {
-  WithUrgency(
-      exec_ctx, t, action.send_stream_update(),
-      GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
-      [exec_ctx, t, s]() { grpc_chttp2_mark_stream_writable(exec_ctx, t, s); });
-  WithUrgency(exec_ctx, t, action.send_transport_update(),
+  WithUrgency(t, action.send_stream_update(),
+              GRPC_CHTTP2_INITIATE_WRITE_STREAM_FLOW_CONTROL,
+              [t, s]() { grpc_chttp2_mark_stream_writable(t, s); });
+  WithUrgency(t, action.send_transport_update(),
               GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL, []() {});
-  WithUrgency(exec_ctx, t, action.send_initial_window_update(),
-              GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS,
-              [exec_ctx, t, &action]() {
-                queue_setting_update(exec_ctx, t,
+  WithUrgency(t, action.send_initial_window_update(),
+              GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [t, &action]() {
+                queue_setting_update(t,
                                      GRPC_CHTTP2_SETTINGS_INITIAL_WINDOW_SIZE,
                                      action.initial_window_size());
               });
-  WithUrgency(
-      exec_ctx, t, action.send_max_frame_size_update(),
-      GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [exec_ctx, t, &action]() {
-        queue_setting_update(exec_ctx, t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
-                             action.max_frame_size());
-      });
+  WithUrgency(t, action.send_max_frame_size_update(),
+              GRPC_CHTTP2_INITIATE_WRITE_SEND_SETTINGS, [t, &action]() {
+                queue_setting_update(t, GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE,
+                                     action.max_frame_size());
+              });
 }
 
-static grpc_error* try_http_parsing(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_transport* t) {
+static grpc_error* try_http_parsing(grpc_chttp2_transport* t) {
   grpc_http_parser parser;
   size_t i = 0;
   grpc_error* error = GRPC_ERROR_NONE;
@@ -2490,8 +2380,7 @@
   return error;
 }
 
-static void read_action_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                               grpc_error* error) {
+static void read_action_locked(void* tp, grpc_error* error) {
   GPR_TIMER_BEGIN("reading_action_locked", 0);
 
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
@@ -2515,11 +2404,10 @@
     for (; i < t->read_buffer.count && errors[1] == GRPC_ERROR_NONE; i++) {
       t->flow_control->bdp_estimator()->AddIncomingBytes(
           (int64_t)GRPC_SLICE_LENGTH(t->read_buffer.slices[i]));
-      errors[1] =
-          grpc_chttp2_perform_read(exec_ctx, t, t->read_buffer.slices[i]);
+      errors[1] = grpc_chttp2_perform_read(t, t->read_buffer.slices[i]);
     }
     if (errors[1] != GRPC_ERROR_NONE) {
-      errors[2] = try_http_parsing(exec_ctx, t);
+      errors[2] = try_http_parsing(t);
       GRPC_ERROR_UNREF(error);
       error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
           "Failed parsing HTTP/2", errors, GPR_ARRAY_SIZE(errors));
@@ -2534,10 +2422,9 @@
       if (t->initial_window_update > 0) {
         grpc_chttp2_stream* s;
         while (grpc_chttp2_list_pop_stalled_by_stream(t, &s)) {
-          grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+          grpc_chttp2_mark_stream_writable(t, s);
           grpc_chttp2_initiate_write(
-              exec_ctx, t,
-              GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
+              t, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_SETTING);
         }
       }
       t->initial_window_update = 0;
@@ -2558,22 +2445,21 @@
       error = grpc_error_add_child(error, GRPC_ERROR_REF(t->goaway_error));
     }
 
-    close_transport_locked(exec_ctx, t, GRPC_ERROR_REF(error));
+    close_transport_locked(t, GRPC_ERROR_REF(error));
     t->endpoint_reading = 0;
   } else if (t->closed_with_error == GRPC_ERROR_NONE) {
     keep_reading = true;
     GRPC_CHTTP2_REF_TRANSPORT(t, "keep_reading");
   }
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->read_buffer);
+  grpc_slice_buffer_reset_and_unref_internal(&t->read_buffer);
 
   if (keep_reading) {
-    grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
-                       &t->read_action_locked);
-    grpc_chttp2_act_on_flowctl_action(exec_ctx, t->flow_control->MakeAction(),
-                                      t, nullptr);
-    GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
+    grpc_endpoint_read(t->ep, &t->read_buffer, &t->read_action_locked);
+    grpc_chttp2_act_on_flowctl_action(t->flow_control->MakeAction(), t,
+                                      nullptr);
+    GRPC_CHTTP2_UNREF_TRANSPORT(t, "keep_reading");
   } else {
-    GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
+    GRPC_CHTTP2_UNREF_TRANSPORT(t, "reading_action");
   }
 
   GPR_TIMER_END("post_reading_action_locked", 0);
@@ -2585,15 +2471,12 @@
 
 // t is reffed prior to calling the first time, and once the callback chain
 // that kicks off finishes, it's unreffed
-static void schedule_bdp_ping_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t) {
+static void schedule_bdp_ping_locked(grpc_chttp2_transport* t) {
   t->flow_control->bdp_estimator()->SchedulePing();
-  send_ping_locked(exec_ctx, t, &t->start_bdp_ping_locked,
-                   &t->finish_bdp_ping_locked);
+  send_ping_locked(t, &t->start_bdp_ping_locked, &t->finish_bdp_ping_locked);
 }
 
-static void start_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                  grpc_error* error) {
+static void start_bdp_ping_locked(void* tp, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   if (grpc_http_trace.enabled()) {
     gpr_log(GPR_DEBUG, "%s: Start BDP ping err=%s", t->peer_string,
@@ -2601,42 +2484,39 @@
   }
   /* Reset the keepalive ping timer */
   if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING) {
-    grpc_timer_cancel(exec_ctx, &t->keepalive_ping_timer);
+    grpc_timer_cancel(&t->keepalive_ping_timer);
   }
   t->flow_control->bdp_estimator()->StartPing();
 }
 
-static void finish_bdp_ping_locked(grpc_exec_ctx* exec_ctx, void* tp,
-                                   grpc_error* error) {
+static void finish_bdp_ping_locked(void* tp, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   if (grpc_http_trace.enabled()) {
     gpr_log(GPR_DEBUG, "%s: Complete BDP ping err=%s", t->peer_string,
             grpc_error_string(error));
   }
   if (error != GRPC_ERROR_NONE) {
-    GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
+    GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping");
     return;
   }
-  grpc_millis next_ping =
-      t->flow_control->bdp_estimator()->CompletePing(exec_ctx);
-  grpc_chttp2_act_on_flowctl_action(
-      exec_ctx, t->flow_control->PeriodicUpdate(exec_ctx), t, nullptr);
+  grpc_millis next_ping = t->flow_control->bdp_estimator()->CompletePing();
+  grpc_chttp2_act_on_flowctl_action(t->flow_control->PeriodicUpdate(), t,
+                                    nullptr);
   GPR_ASSERT(!t->have_next_bdp_ping_timer);
   t->have_next_bdp_ping_timer = true;
-  grpc_timer_init(exec_ctx, &t->next_bdp_ping_timer, next_ping,
+  grpc_timer_init(&t->next_bdp_ping_timer, next_ping,
                   &t->next_bdp_ping_timer_expired_locked);
 }
 
-static void next_bdp_ping_timer_expired_locked(grpc_exec_ctx* exec_ctx,
-                                               void* tp, grpc_error* error) {
+static void next_bdp_ping_timer_expired_locked(void* tp, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   GPR_ASSERT(t->have_next_bdp_ping_timer);
   t->have_next_bdp_ping_timer = false;
   if (error != GRPC_ERROR_NONE) {
-    GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "bdp_ping");
+    GRPC_CHTTP2_UNREF_TRANSPORT(t, "bdp_ping");
     return;
   }
-  schedule_bdp_ping_locked(exec_ctx, t);
+  schedule_bdp_ping_locked(t);
 }
 
 void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args,
@@ -2646,7 +2526,9 @@
     for (i = 0; i < args->num_args; i++) {
       if (0 == strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_TIME_MS)) {
         const int value = grpc_channel_arg_get_integer(
-            &args->args[i], {g_default_client_keepalive_time_ms, 1, INT_MAX});
+            &args->args[i], {is_client ? g_default_client_keepalive_time_ms
+                                       : g_default_server_keepalive_time_ms,
+                             1, INT_MAX});
         if (is_client) {
           g_default_client_keepalive_time_ms = value;
         } else {
@@ -2655,8 +2537,9 @@
       } else if (0 ==
                  strcmp(args->args[i].key, GRPC_ARG_KEEPALIVE_TIMEOUT_MS)) {
         const int value = grpc_channel_arg_get_integer(
-            &args->args[i],
-            {g_default_client_keepalive_timeout_ms, 0, INT_MAX});
+            &args->args[i], {is_client ? g_default_client_keepalive_timeout_ms
+                                       : g_default_server_keepalive_timeout_ms,
+                             0, INT_MAX});
         if (is_client) {
           g_default_client_keepalive_timeout_ms = value;
         } else {
@@ -2664,10 +2547,16 @@
         }
       } else if (0 == strcmp(args->args[i].key,
                              GRPC_ARG_KEEPALIVE_PERMIT_WITHOUT_CALLS)) {
-        g_default_keepalive_permit_without_calls =
-            (uint32_t)grpc_channel_arg_get_integer(
-                &args->args[i],
-                {g_default_keepalive_permit_without_calls, 0, 1});
+        const bool value = (uint32_t)grpc_channel_arg_get_integer(
+            &args->args[i],
+            {is_client ? g_default_client_keepalive_permit_without_calls
+                       : g_default_server_keepalive_timeout_ms,
+             0, 1});
+        if (is_client) {
+          g_default_client_keepalive_permit_without_calls = value;
+        } else {
+          g_default_server_keepalive_permit_without_calls = value;
+        }
       } else if (0 ==
                  strcmp(args->args[i].key, GRPC_ARG_HTTP2_MAX_PING_STRIKES)) {
         g_default_max_ping_strikes = grpc_channel_arg_get_integer(
@@ -2697,8 +2586,7 @@
   }
 }
 
-static void init_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                       grpc_error* error) {
+static void init_keepalive_ping_locked(void* arg, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
   GPR_ASSERT(t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_WAITING);
   if (t->destroying || t->closed_with_error != GRPC_ERROR_NONE) {
@@ -2708,59 +2596,55 @@
         grpc_chttp2_stream_map_size(&t->stream_map) > 0) {
       t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_PINGING;
       GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive ping end");
-      send_ping_locked(exec_ctx, t, &t->start_keepalive_ping_locked,
+      send_ping_locked(t, &t->start_keepalive_ping_locked,
                        &t->finish_keepalive_ping_locked);
-      grpc_chttp2_initiate_write(exec_ctx, t,
-                                 GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
+      grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_KEEPALIVE_PING);
     } else {
       GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
-      grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
-                      grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+      grpc_timer_init(&t->keepalive_ping_timer,
+                      grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
                       &t->init_keepalive_ping_locked);
     }
   } else if (error == GRPC_ERROR_CANCELLED) {
     /* The keepalive ping timer may be cancelled by bdp */
     GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
-    grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
-                    grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+    grpc_timer_init(&t->keepalive_ping_timer,
+                    grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
                     &t->init_keepalive_ping_locked);
   }
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "init keepalive ping");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "init keepalive ping");
 }
 
-static void start_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void start_keepalive_ping_locked(void* arg, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
   GRPC_CHTTP2_REF_TRANSPORT(t, "keepalive watchdog");
-  grpc_timer_init(exec_ctx, &t->keepalive_watchdog_timer,
-                  grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+  grpc_timer_init(&t->keepalive_watchdog_timer,
+                  grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
                   &t->keepalive_watchdog_fired_locked);
 }
 
-static void finish_keepalive_ping_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                         grpc_error* error) {
+static void finish_keepalive_ping_locked(void* arg, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
   if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
     if (error == GRPC_ERROR_NONE) {
       t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_WAITING;
-      grpc_timer_cancel(exec_ctx, &t->keepalive_watchdog_timer);
+      grpc_timer_cancel(&t->keepalive_watchdog_timer);
       GRPC_CHTTP2_REF_TRANSPORT(t, "init keepalive ping");
-      grpc_timer_init(exec_ctx, &t->keepalive_ping_timer,
-                      grpc_exec_ctx_now(exec_ctx) + t->keepalive_time,
+      grpc_timer_init(&t->keepalive_ping_timer,
+                      grpc_core::ExecCtx::Get()->Now() + t->keepalive_time,
                       &t->init_keepalive_ping_locked);
     }
   }
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive ping end");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "keepalive ping end");
 }
 
-static void keepalive_watchdog_fired_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                            grpc_error* error) {
+static void keepalive_watchdog_fired_locked(void* arg, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
   if (t->keepalive_state == GRPC_CHTTP2_KEEPALIVE_STATE_PINGING) {
     if (error == GRPC_ERROR_NONE) {
       t->keepalive_state = GRPC_CHTTP2_KEEPALIVE_STATE_DYING;
       close_transport_locked(
-          exec_ctx, t,
+          t,
           grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                                  "keepalive watchdog timeout"),
                              GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_INTERNAL));
@@ -2773,71 +2657,67 @@
               t->keepalive_state, GRPC_CHTTP2_KEEPALIVE_STATE_PINGING);
     }
   }
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keepalive watchdog");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "keepalive watchdog");
 }
 
 /*******************************************************************************
  * CALLBACK LOOP
  */
 
-static void connectivity_state_set(grpc_exec_ctx* exec_ctx,
-                                   grpc_chttp2_transport* t,
+static void connectivity_state_set(grpc_chttp2_transport* t,
                                    grpc_connectivity_state state,
                                    grpc_error* error, const char* reason) {
   GRPC_CHTTP2_IF_TRACING(
       gpr_log(GPR_DEBUG, "set connectivity_state=%d", state));
-  grpc_connectivity_state_set(exec_ctx, &t->channel_callback.state_tracker,
-                              state, error, reason);
+  grpc_connectivity_state_set(&t->channel_callback.state_tracker, state, error,
+                              reason);
 }
 
 /*******************************************************************************
  * POLLSET STUFF
  */
 
-static void set_pollset(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                        grpc_stream* gs, grpc_pollset* pollset) {
+static void set_pollset(grpc_transport* gt, grpc_stream* gs,
+                        grpc_pollset* pollset) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
-  grpc_endpoint_add_to_pollset(exec_ctx, t->ep, pollset);
+  grpc_endpoint_add_to_pollset(t->ep, pollset);
 }
 
-static void set_pollset_set(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                            grpc_stream* gs, grpc_pollset_set* pollset_set) {
+static void set_pollset_set(grpc_transport* gt, grpc_stream* gs,
+                            grpc_pollset_set* pollset_set) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)gt;
-  grpc_endpoint_add_to_pollset_set(exec_ctx, t->ep, pollset_set);
+  grpc_endpoint_add_to_pollset_set(t->ep, pollset_set);
 }
 
 /*******************************************************************************
  * BYTE STREAM
  */
 
-static void reset_byte_stream(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* error) {
+static void reset_byte_stream(void* arg, grpc_error* error) {
   grpc_chttp2_stream* s = (grpc_chttp2_stream*)arg;
 
   s->pending_byte_stream = false;
   if (error == GRPC_ERROR_NONE) {
-    grpc_chttp2_maybe_complete_recv_message(exec_ctx, s->t, s);
-    grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, s->t, s);
+    grpc_chttp2_maybe_complete_recv_message(s->t, s);
+    grpc_chttp2_maybe_complete_recv_trailing_metadata(s->t, s);
   } else {
     GPR_ASSERT(error != GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(s->on_next, GRPC_ERROR_REF(error));
     s->on_next = nullptr;
     GRPC_ERROR_UNREF(s->byte_stream_error);
     s->byte_stream_error = GRPC_ERROR_NONE;
-    grpc_chttp2_cancel_stream(exec_ctx, s->t, s, GRPC_ERROR_REF(error));
+    grpc_chttp2_cancel_stream(s->t, s, GRPC_ERROR_REF(error));
     s->byte_stream_error = GRPC_ERROR_REF(error);
   }
 }
 
-static void incoming_byte_stream_unref(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_incoming_byte_stream* bs) {
+static void incoming_byte_stream_unref(grpc_chttp2_incoming_byte_stream* bs) {
   if (gpr_unref(&bs->refs)) {
     gpr_free(bs);
   }
 }
 
-static void incoming_byte_stream_next_locked(grpc_exec_ctx* exec_ctx,
-                                             void* argp,
+static void incoming_byte_stream_next_locked(void* argp,
                                              grpc_error* error_ignored) {
   grpc_chttp2_incoming_byte_stream* bs =
       (grpc_chttp2_incoming_byte_stream*)argp;
@@ -2848,30 +2728,29 @@
   if (!s->read_closed) {
     s->flow_control->IncomingByteStreamUpdate(bs->next_action.max_size_hint,
                                               cur_length);
-    grpc_chttp2_act_on_flowctl_action(exec_ctx, s->flow_control->MakeAction(),
-                                      t, s);
+    grpc_chttp2_act_on_flowctl_action(s->flow_control->MakeAction(), t, s);
   }
   GPR_ASSERT(s->unprocessed_incoming_frames_buffer.length == 0);
   if (s->frame_storage.length > 0) {
     grpc_slice_buffer_swap(&s->frame_storage,
                            &s->unprocessed_incoming_frames_buffer);
     s->unprocessed_incoming_frames_decompressed = false;
-    GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(bs->next_action.on_complete, GRPC_ERROR_NONE);
   } else if (s->byte_stream_error != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete,
+    GRPC_CLOSURE_SCHED(bs->next_action.on_complete,
                        GRPC_ERROR_REF(s->byte_stream_error));
     if (s->data_parser.parsing_frame != nullptr) {
-      incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame);
+      incoming_byte_stream_unref(s->data_parser.parsing_frame);
       s->data_parser.parsing_frame = nullptr;
     }
   } else if (s->read_closed) {
     if (bs->remaining_bytes != 0) {
       s->byte_stream_error =
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
-      GRPC_CLOSURE_SCHED(exec_ctx, bs->next_action.on_complete,
+      GRPC_CLOSURE_SCHED(bs->next_action.on_complete,
                          GRPC_ERROR_REF(s->byte_stream_error));
       if (s->data_parser.parsing_frame != nullptr) {
-        incoming_byte_stream_unref(exec_ctx, s->data_parser.parsing_frame);
+        incoming_byte_stream_unref(s->data_parser.parsing_frame);
         s->data_parser.parsing_frame = nullptr;
       }
     } else {
@@ -2881,11 +2760,10 @@
   } else {
     s->on_next = bs->next_action.on_complete;
   }
-  incoming_byte_stream_unref(exec_ctx, bs);
+  incoming_byte_stream_unref(bs);
 }
 
-static bool incoming_byte_stream_next(grpc_exec_ctx* exec_ctx,
-                                      grpc_byte_stream* byte_stream,
+static bool incoming_byte_stream_next(grpc_byte_stream* byte_stream,
                                       size_t max_size_hint,
                                       grpc_closure* on_complete) {
   GPR_TIMER_BEGIN("incoming_byte_stream_next", 0);
@@ -2900,7 +2778,6 @@
     bs->next_action.max_size_hint = max_size_hint;
     bs->next_action.on_complete = on_complete;
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_INIT(&bs->next_action.closure,
                           incoming_byte_stream_next_locked, bs,
                           grpc_combiner_scheduler(bs->transport->combiner)),
@@ -2910,8 +2787,7 @@
   }
 }
 
-static grpc_error* incoming_byte_stream_pull(grpc_exec_ctx* exec_ctx,
-                                             grpc_byte_stream* byte_stream,
+static grpc_error* incoming_byte_stream_pull(grpc_byte_stream* byte_stream,
                                              grpc_slice* slice) {
   GPR_TIMER_BEGIN("incoming_byte_stream_pull", 0);
   grpc_chttp2_incoming_byte_stream* bs =
@@ -2947,31 +2823,28 @@
       }
     }
     error = grpc_deframe_unprocessed_incoming_frames(
-        exec_ctx, &s->data_parser, s, &s->unprocessed_incoming_frames_buffer,
-        slice, nullptr);
+        &s->data_parser, s, &s->unprocessed_incoming_frames_buffer, slice,
+        nullptr);
     if (error != GRPC_ERROR_NONE) {
       return error;
     }
   } else {
     error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Truncated message");
-    GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(&s->reset_byte_stream, GRPC_ERROR_REF(error));
     return error;
   }
   GPR_TIMER_END("incoming_byte_stream_pull", 0);
   return GRPC_ERROR_NONE;
 }
 
-static void incoming_byte_stream_destroy_locked(grpc_exec_ctx* exec_ctx,
-                                                void* byte_stream,
+static void incoming_byte_stream_destroy_locked(void* byte_stream,
                                                 grpc_error* error_ignored);
 
-static void incoming_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
-                                         grpc_byte_stream* byte_stream) {
+static void incoming_byte_stream_destroy(grpc_byte_stream* byte_stream) {
   GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
   grpc_chttp2_incoming_byte_stream* bs =
       (grpc_chttp2_incoming_byte_stream*)byte_stream;
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&bs->destroy_action,
                         incoming_byte_stream_destroy_locked, bs,
                         grpc_combiner_scheduler(bs->transport->combiner)),
@@ -2980,30 +2853,28 @@
 }
 
 static void incoming_byte_stream_publish_error(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_error* error) {
+    grpc_chttp2_incoming_byte_stream* bs, grpc_error* error) {
   grpc_chttp2_stream* s = bs->stream;
 
   GPR_ASSERT(error != GRPC_ERROR_NONE);
-  GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(s->on_next, GRPC_ERROR_REF(error));
   s->on_next = nullptr;
   GRPC_ERROR_UNREF(s->byte_stream_error);
   s->byte_stream_error = GRPC_ERROR_REF(error);
-  grpc_chttp2_cancel_stream(exec_ctx, bs->transport, bs->stream,
-                            GRPC_ERROR_REF(error));
+  grpc_chttp2_cancel_stream(bs->transport, bs->stream, GRPC_ERROR_REF(error));
 }
 
 grpc_error* grpc_chttp2_incoming_byte_stream_push(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_slice slice, grpc_slice* slice_out) {
+    grpc_chttp2_incoming_byte_stream* bs, grpc_slice slice,
+    grpc_slice* slice_out) {
   grpc_chttp2_stream* s = bs->stream;
 
   if (bs->remaining_bytes < GRPC_SLICE_LENGTH(slice)) {
     grpc_error* error =
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Too many bytes in stream");
 
-    GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
-    grpc_slice_unref_internal(exec_ctx, slice);
+    GRPC_CLOSURE_SCHED(&s->reset_byte_stream, GRPC_ERROR_REF(error));
+    grpc_slice_unref_internal(slice);
     return error;
   } else {
     bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
@@ -3015,8 +2886,8 @@
 }
 
 grpc_error* grpc_chttp2_incoming_byte_stream_finished(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_error* error, bool reset_on_error) {
+    grpc_chttp2_incoming_byte_stream* bs, grpc_error* error,
+    bool reset_on_error) {
   grpc_chttp2_stream* s = bs->stream;
 
   if (error == GRPC_ERROR_NONE) {
@@ -3025,27 +2896,25 @@
     }
   }
   if (error != GRPC_ERROR_NONE && reset_on_error) {
-    GRPC_CLOSURE_SCHED(exec_ctx, &s->reset_byte_stream, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(&s->reset_byte_stream, GRPC_ERROR_REF(error));
   }
-  incoming_byte_stream_unref(exec_ctx, bs);
+  incoming_byte_stream_unref(bs);
   return error;
 }
 
-static void incoming_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
-                                          grpc_byte_stream* byte_stream,
+static void incoming_byte_stream_shutdown(grpc_byte_stream* byte_stream,
                                           grpc_error* error) {
   grpc_chttp2_incoming_byte_stream* bs =
       (grpc_chttp2_incoming_byte_stream*)byte_stream;
   GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished(
-      exec_ctx, bs, error, true /* reset_on_error */));
+      bs, error, true /* reset_on_error */));
 }
 
 static const grpc_byte_stream_vtable grpc_chttp2_incoming_byte_stream_vtable = {
     incoming_byte_stream_next, incoming_byte_stream_pull,
     incoming_byte_stream_shutdown, incoming_byte_stream_destroy};
 
-static void incoming_byte_stream_destroy_locked(grpc_exec_ctx* exec_ctx,
-                                                void* byte_stream,
+static void incoming_byte_stream_destroy_locked(void* byte_stream,
                                                 grpc_error* error_ignored) {
   grpc_chttp2_incoming_byte_stream* bs =
       (grpc_chttp2_incoming_byte_stream*)byte_stream;
@@ -3053,15 +2922,15 @@
   grpc_chttp2_transport* t = s->t;
 
   GPR_ASSERT(bs->base.vtable == &grpc_chttp2_incoming_byte_stream_vtable);
-  incoming_byte_stream_unref(exec_ctx, bs);
+  incoming_byte_stream_unref(bs);
   s->pending_byte_stream = false;
-  grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
-  grpc_chttp2_maybe_complete_recv_trailing_metadata(exec_ctx, t, s);
+  grpc_chttp2_maybe_complete_recv_message(t, s);
+  grpc_chttp2_maybe_complete_recv_trailing_metadata(t, s);
 }
 
 grpc_chttp2_incoming_byte_stream* grpc_chttp2_incoming_byte_stream_create(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t, grpc_chttp2_stream* s,
-    uint32_t frame_size, uint32_t flags) {
+    grpc_chttp2_transport* t, grpc_chttp2_stream* s, uint32_t frame_size,
+    uint32_t flags) {
   grpc_chttp2_incoming_byte_stream* incoming_byte_stream =
       (grpc_chttp2_incoming_byte_stream*)gpr_malloc(
           sizeof(*incoming_byte_stream));
@@ -3081,30 +2950,25 @@
  * RESOURCE QUOTAS
  */
 
-static void post_benign_reclaimer(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_transport* t) {
+static void post_benign_reclaimer(grpc_chttp2_transport* t) {
   if (!t->benign_reclaimer_registered) {
     t->benign_reclaimer_registered = true;
     GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
-    grpc_resource_user_post_reclaimer(exec_ctx,
-                                      grpc_endpoint_get_resource_user(t->ep),
+    grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep),
                                       false, &t->benign_reclaimer_locked);
   }
 }
 
-static void post_destructive_reclaimer(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_transport* t) {
+static void post_destructive_reclaimer(grpc_chttp2_transport* t) {
   if (!t->destructive_reclaimer_registered) {
     t->destructive_reclaimer_registered = true;
     GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
-    grpc_resource_user_post_reclaimer(exec_ctx,
-                                      grpc_endpoint_get_resource_user(t->ep),
+    grpc_resource_user_post_reclaimer(grpc_endpoint_get_resource_user(t->ep),
                                       true, &t->destructive_reclaimer_locked);
   }
 }
 
-static void benign_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                    grpc_error* error) {
+static void benign_reclaimer_locked(void* arg, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
   if (error == GRPC_ERROR_NONE &&
       grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
@@ -3114,7 +2978,7 @@
       gpr_log(GPR_DEBUG, "HTTP2: %s - send goaway to free memory",
               t->peer_string);
     }
-    send_goaway(exec_ctx, t,
+    send_goaway(t,
                 grpc_error_set_int(
                     GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
                     GRPC_ERROR_INT_HTTP2_ERROR, GRPC_HTTP2_ENHANCE_YOUR_CALM));
@@ -3127,13 +2991,12 @@
   t->benign_reclaimer_registered = false;
   if (error != GRPC_ERROR_CANCELLED) {
     grpc_resource_user_finish_reclamation(
-        exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+        grpc_endpoint_get_resource_user(t->ep));
   }
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "benign_reclaimer");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "benign_reclaimer");
 }
 
-static void destructive_reclaimer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                         grpc_error* error) {
+static void destructive_reclaimer_locked(void* arg, grpc_error* error) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)arg;
   size_t n = grpc_chttp2_stream_map_size(&t->stream_map);
   t->destructive_reclaimer_registered = false;
@@ -3145,7 +3008,7 @@
               s->id);
     }
     grpc_chttp2_cancel_stream(
-        exec_ctx, t, s,
+        t, s,
         grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING("Buffers full"),
                            GRPC_ERROR_INT_HTTP2_ERROR,
                            GRPC_HTTP2_ENHANCE_YOUR_CALM));
@@ -3154,14 +3017,14 @@
          there are more streams left, we can immediately post a new
          reclaimer in case the resource quota needs to free more
          memory */
-      post_destructive_reclaimer(exec_ctx, t);
+      post_destructive_reclaimer(t);
     }
   }
   if (error != GRPC_ERROR_CANCELLED) {
     grpc_resource_user_finish_reclamation(
-        exec_ctx, grpc_endpoint_get_resource_user(t->ep));
+        grpc_endpoint_get_resource_user(t->ep));
   }
-  GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "destructive_reclaimer");
+  GRPC_CHTTP2_UNREF_TRANSPORT(t, "destructive_reclaimer");
 }
 
 /*******************************************************************************
@@ -3215,8 +3078,7 @@
   GPR_UNREACHABLE_CODE(return "unknown");
 }
 
-static grpc_endpoint* chttp2_get_endpoint(grpc_exec_ctx* exec_ctx,
-                                          grpc_transport* t) {
+static grpc_endpoint* chttp2_get_endpoint(grpc_transport* t) {
   return ((grpc_chttp2_transport*)t)->ep;
 }
 
@@ -3234,17 +3096,16 @@
 static const grpc_transport_vtable* get_vtable(void) { return &vtable; }
 
 grpc_transport* grpc_create_chttp2_transport(
-    grpc_exec_ctx* exec_ctx, const grpc_channel_args* channel_args,
-    grpc_endpoint* ep, bool is_client) {
+    const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client) {
   grpc_chttp2_transport* t =
       (grpc_chttp2_transport*)gpr_zalloc(sizeof(grpc_chttp2_transport));
-  init_transport(exec_ctx, t, channel_args, ep, is_client);
+  init_transport(t, channel_args, ep, is_client);
   return &t->base;
 }
 
 void grpc_chttp2_transport_start_reading(
-    grpc_exec_ctx* exec_ctx, grpc_transport* transport,
-    grpc_slice_buffer* read_buffer, grpc_closure* notify_on_receive_settings) {
+    grpc_transport* transport, grpc_slice_buffer* read_buffer,
+    grpc_closure* notify_on_receive_settings) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)transport;
   GRPC_CHTTP2_REF_TRANSPORT(
       t, "reading_action"); /* matches unref inside reading_action */
@@ -3253,5 +3114,5 @@
     gpr_free(read_buffer);
   }
   t->notify_on_receive_settings = notify_on_receive_settings;
-  GRPC_CLOSURE_SCHED(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(&t->read_action_locked, GRPC_ERROR_NONE);
 }
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.h b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
index 198523d..596abab 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.h
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.h
@@ -27,24 +27,15 @@
 extern grpc_core::TraceFlag grpc_trace_http2_stream_state;
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_chttp2_refcount;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_transport* grpc_create_chttp2_transport(
-    grpc_exec_ctx* exec_ctx, const grpc_channel_args* channel_args,
-    grpc_endpoint* ep, bool is_client);
+    const grpc_channel_args* channel_args, grpc_endpoint* ep, bool is_client);
 
 /// Takes ownership of \a read_buffer, which (if non-NULL) contains
 /// leftover bytes previously read from the endpoint (e.g., by handshakers).
 /// If non-null, \a notify_on_receive_settings will be scheduled when
 /// HTTP/2 settings are received from the peer.
 void grpc_chttp2_transport_start_reading(
-    grpc_exec_ctx* exec_ctx, grpc_transport* transport,
-    grpc_slice_buffer* read_buffer, grpc_closure* notify_on_receive_settings);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_transport* transport, grpc_slice_buffer* read_buffer,
+    grpc_closure* notify_on_receive_settings);
 
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_CHTTP2_TRANSPORT_H */
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.cc b/src/core/ext/transport/chttp2/transport/flow_control.cc
index 8a057bd..ca48cc7 100644
--- a/src/core/ext/transport/chttp2/transport/flow_control.cc
+++ b/src/core/ext/transport/chttp2/transport/flow_control.cc
@@ -149,8 +149,7 @@
   gpr_free(mf_str);
 }
 
-TransportFlowControl::TransportFlowControl(grpc_exec_ctx* exec_ctx,
-                                           const grpc_chttp2_transport* t,
+TransportFlowControl::TransportFlowControl(const grpc_chttp2_transport* t,
                                            bool enable_bdp_probe)
     : t_(t),
       enable_bdp_probe_(enable_bdp_probe),
@@ -163,7 +162,7 @@
                           .set_min_control_value(-1)
                           .set_max_control_value(25)
                           .set_integral_range(10)),
-      last_pid_update_(grpc_exec_ctx_now(exec_ctx)) {}
+      last_pid_update_(grpc_core::ExecCtx::Get()->Now()) {}
 
 uint32_t TransportFlowControl::MaybeSendUpdate(bool writing_anyway) {
   FlowControlTrace trace("t updt sent", this, nullptr);
@@ -308,9 +307,8 @@
       1 + log2(bdp_estimator_.EstimateBdp()));
 }
 
-double TransportFlowControl::SmoothLogBdp(grpc_exec_ctx* exec_ctx,
-                                          double value) {
-  grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+double TransportFlowControl::SmoothLogBdp(double value) {
+  grpc_millis now = grpc_core::ExecCtx::Get()->Now();
   double bdp_error = value - pid_controller_.last_control_value();
   const double dt = (double)(now - last_pid_update_) * 1e-3;
   last_pid_update_ = now;
@@ -331,15 +329,14 @@
   }
 }
 
-FlowControlAction TransportFlowControl::PeriodicUpdate(
-    grpc_exec_ctx* exec_ctx) {
+FlowControlAction TransportFlowControl::PeriodicUpdate() {
   FlowControlAction action;
   if (enable_bdp_probe_) {
     // get bdp estimate and update initial_window accordingly.
     // target might change based on how much memory pressure we are under
     // TODO(ncteisen): experiment with setting target to be huge under low
     // memory pressure.
-    const double target = pow(2, SmoothLogBdp(exec_ctx, TargetLogBdp()));
+    const double target = pow(2, SmoothLogBdp(TargetLogBdp()));
 
     // Though initial window 'could' drop to 0, we keep the floor at 128
     target_initial_window_size_ = (int32_t)GPR_CLAMP(target, 128, INT32_MAX);
diff --git a/src/core/ext/transport/chttp2/transport/flow_control.h b/src/core/ext/transport/chttp2/transport/flow_control.h
index 2515c94..8306047 100644
--- a/src/core/ext/transport/chttp2/transport/flow_control.h
+++ b/src/core/ext/transport/chttp2/transport/flow_control.h
@@ -134,8 +134,7 @@
 
 class TransportFlowControl {
  public:
-  TransportFlowControl(grpc_exec_ctx* exec_ctx, const grpc_chttp2_transport* t,
-                       bool enable_bdp_probe);
+  TransportFlowControl(const grpc_chttp2_transport* t, bool enable_bdp_probe);
   ~TransportFlowControl() {}
 
   bool bdp_probe() const { return enable_bdp_probe_; }
@@ -153,7 +152,7 @@
   // Call periodically (at a low-ish rate, 100ms - 10s makes sense)
   // to perform more complex flow control calculations and return an action
   // to let chttp2 change its parameters
-  FlowControlAction PeriodicUpdate(grpc_exec_ctx* exec_ctx);
+  FlowControlAction PeriodicUpdate();
 
   void StreamSentData(int64_t size) { remote_window_ -= size; }
 
@@ -212,7 +211,7 @@
  private:
   friend class ::grpc::testing::TrickledCHTTP2;
   double TargetLogBdp();
-  double SmoothLogBdp(grpc_exec_ctx* exec_ctx, double value);
+  double SmoothLogBdp(double value);
   FlowControlAction::Urgency DeltaUrgency(int32_t value,
                                           grpc_chttp2_setting_id setting_id);
 
diff --git a/src/core/ext/transport/chttp2/transport/frame.h b/src/core/ext/transport/chttp2/transport/frame.h
index e7debda..dba4c00 100644
--- a/src/core/ext/transport/chttp2/transport/frame.h
+++ b/src/core/ext/transport/chttp2/transport/frame.h
@@ -24,10 +24,6 @@
 
 #include "src/core/lib/iomgr/error.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* defined in internal.h */
 typedef struct grpc_chttp2_stream grpc_chttp2_stream;
 typedef struct grpc_chttp2_transport grpc_chttp2_transport;
@@ -47,8 +43,4 @@
 #define GRPC_CHTTP2_DATA_FLAG_PADDED 8
 #define GRPC_CHTTP2_FLAG_HAS_PRIORITY 0x20
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.cc b/src/core/ext/transport/chttp2/transport/frame_data.cc
index f0c3b55..9b3a6ac 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_data.cc
@@ -36,11 +36,10 @@
   return GRPC_ERROR_NONE;
 }
 
-void grpc_chttp2_data_parser_destroy(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_data_parser* parser) {
+void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser) {
   if (parser->parsing_frame != nullptr) {
     GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished(
-        exec_ctx, parser->parsing_frame,
+        parser->parsing_frame,
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Parser destroyed"), false));
   }
   GRPC_ERROR_UNREF(parser->error);
@@ -98,7 +97,7 @@
 }
 
 grpc_error* grpc_deframe_unprocessed_incoming_frames(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_data_parser* p, grpc_chttp2_stream* s,
+    grpc_chttp2_data_parser* p, grpc_chttp2_stream* s,
     grpc_slice_buffer* slices, grpc_slice* slice_out,
     grpc_byte_stream** stream_out) {
   grpc_error* error = GRPC_ERROR_NONE;
@@ -118,14 +117,14 @@
     char* msg;
 
     if (cur == end) {
-      grpc_slice_unref_internal(exec_ctx, slice);
+      grpc_slice_unref_internal(slice);
       continue;
     }
 
     switch (p->state) {
       case GRPC_CHTTP2_DATA_ERROR:
         p->state = GRPC_CHTTP2_DATA_ERROR;
-        grpc_slice_unref_internal(exec_ctx, slice);
+        grpc_slice_unref_internal(slice);
         return GRPC_ERROR_REF(p->error);
       case GRPC_CHTTP2_DATA_FH_0:
         s->stats.incoming.framing_bytes++;
@@ -150,12 +149,12 @@
             p->error =
                 grpc_error_set_int(p->error, GRPC_ERROR_INT_OFFSET, cur - beg);
             p->state = GRPC_CHTTP2_DATA_ERROR;
-            grpc_slice_unref_internal(exec_ctx, slice);
+            grpc_slice_unref_internal(slice);
             return GRPC_ERROR_REF(p->error);
         }
         if (++cur == end) {
           p->state = GRPC_CHTTP2_DATA_FH_1;
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           continue;
         }
       /* fallthrough */
@@ -164,7 +163,7 @@
         p->frame_size = ((uint32_t)*cur) << 24;
         if (++cur == end) {
           p->state = GRPC_CHTTP2_DATA_FH_2;
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           continue;
         }
       /* fallthrough */
@@ -173,7 +172,7 @@
         p->frame_size |= ((uint32_t)*cur) << 16;
         if (++cur == end) {
           p->state = GRPC_CHTTP2_DATA_FH_3;
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           continue;
         }
       /* fallthrough */
@@ -182,7 +181,7 @@
         p->frame_size |= ((uint32_t)*cur) << 8;
         if (++cur == end) {
           p->state = GRPC_CHTTP2_DATA_FH_4;
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           continue;
         }
       /* fallthrough */
@@ -198,11 +197,11 @@
           message_flags |= GRPC_WRITE_INTERNAL_COMPRESS;
         }
         p->parsing_frame = grpc_chttp2_incoming_byte_stream_create(
-            exec_ctx, t, s, p->frame_size, message_flags);
+            t, s, p->frame_size, message_flags);
         *stream_out = &p->parsing_frame->base;
         if (p->parsing_frame->remaining_bytes == 0) {
           GRPC_ERROR_UNREF(grpc_chttp2_incoming_byte_stream_finished(
-              exec_ctx, p->parsing_frame, GRPC_ERROR_NONE, true));
+              p->parsing_frame, GRPC_ERROR_NONE, true));
           p->parsing_frame = nullptr;
           p->state = GRPC_CHTTP2_DATA_FH_0;
         }
@@ -213,64 +212,64 @@
               slices,
               grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
         }
-        grpc_slice_unref_internal(exec_ctx, slice);
+        grpc_slice_unref_internal(slice);
         return GRPC_ERROR_NONE;
       case GRPC_CHTTP2_DATA_FRAME: {
         GPR_ASSERT(p->parsing_frame != nullptr);
         GPR_ASSERT(slice_out != nullptr);
         if (cur == end) {
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           continue;
         }
         uint32_t remaining = (uint32_t)(end - cur);
         if (remaining == p->frame_size) {
           s->stats.incoming.data_bytes += remaining;
           if (GRPC_ERROR_NONE != (error = grpc_chttp2_incoming_byte_stream_push(
-                                      exec_ctx, p->parsing_frame,
+                                      p->parsing_frame,
                                       grpc_slice_sub(slice, (size_t)(cur - beg),
                                                      (size_t)(end - beg)),
                                       slice_out))) {
-            grpc_slice_unref_internal(exec_ctx, slice);
+            grpc_slice_unref_internal(slice);
             return error;
           }
           if (GRPC_ERROR_NONE !=
               (error = grpc_chttp2_incoming_byte_stream_finished(
-                   exec_ctx, p->parsing_frame, GRPC_ERROR_NONE, true))) {
-            grpc_slice_unref_internal(exec_ctx, slice);
+                   p->parsing_frame, GRPC_ERROR_NONE, true))) {
+            grpc_slice_unref_internal(slice);
             return error;
           }
           p->parsing_frame = nullptr;
           p->state = GRPC_CHTTP2_DATA_FH_0;
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           return GRPC_ERROR_NONE;
         } else if (remaining < p->frame_size) {
           s->stats.incoming.data_bytes += remaining;
           if (GRPC_ERROR_NONE != (error = grpc_chttp2_incoming_byte_stream_push(
-                                      exec_ctx, p->parsing_frame,
+                                      p->parsing_frame,
                                       grpc_slice_sub(slice, (size_t)(cur - beg),
                                                      (size_t)(end - beg)),
                                       slice_out))) {
             return error;
           }
           p->frame_size -= remaining;
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           return GRPC_ERROR_NONE;
         } else {
           GPR_ASSERT(remaining > p->frame_size);
           s->stats.incoming.data_bytes += p->frame_size;
           if (GRPC_ERROR_NONE !=
               (grpc_chttp2_incoming_byte_stream_push(
-                  exec_ctx, p->parsing_frame,
+                  p->parsing_frame,
                   grpc_slice_sub(slice, (size_t)(cur - beg),
                                  (size_t)(cur + p->frame_size - beg)),
                   slice_out))) {
-            grpc_slice_unref_internal(exec_ctx, slice);
+            grpc_slice_unref_internal(slice);
             return error;
           }
           if (GRPC_ERROR_NONE !=
               (error = grpc_chttp2_incoming_byte_stream_finished(
-                   exec_ctx, p->parsing_frame, GRPC_ERROR_NONE, true))) {
-            grpc_slice_unref_internal(exec_ctx, slice);
+                   p->parsing_frame, GRPC_ERROR_NONE, true))) {
+            grpc_slice_unref_internal(slice);
             return error;
           }
           p->parsing_frame = nullptr;
@@ -279,7 +278,7 @@
           grpc_slice_buffer_undo_take_first(
               slices,
               grpc_slice_sub(slice, (size_t)(cur - beg), (size_t)(end - beg)));
-          grpc_slice_unref_internal(exec_ctx, slice);
+          grpc_slice_unref_internal(slice);
           return GRPC_ERROR_NONE;
         }
       }
@@ -289,19 +288,19 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_chttp2_data_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+grpc_error* grpc_chttp2_data_parser_parse(void* parser,
                                           grpc_chttp2_transport* t,
                                           grpc_chttp2_stream* s,
                                           grpc_slice slice, int is_last) {
   if (!s->pending_byte_stream) {
     grpc_slice_ref_internal(slice);
     grpc_slice_buffer_add(&s->frame_storage, slice);
-    grpc_chttp2_maybe_complete_recv_message(exec_ctx, t, s);
+    grpc_chttp2_maybe_complete_recv_message(t, s);
   } else if (s->on_next) {
     GPR_ASSERT(s->frame_storage.length == 0);
     grpc_slice_ref_internal(slice);
     grpc_slice_buffer_add(&s->unprocessed_incoming_frames_buffer, slice);
-    GRPC_CLOSURE_SCHED(exec_ctx, s->on_next, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(s->on_next, GRPC_ERROR_NONE);
     s->on_next = nullptr;
     s->unprocessed_incoming_frames_decompressed = false;
   } else {
@@ -310,8 +309,7 @@
   }
 
   if (is_last && s->received_last_frame) {
-    grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
-                                   GRPC_ERROR_NONE);
+    grpc_chttp2_mark_stream_closed(t, s, true, false, GRPC_ERROR_NONE);
   }
 
   return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_data.h b/src/core/ext/transport/chttp2/transport/frame_data.h
index 96f823a..964cc59 100644
--- a/src/core/ext/transport/chttp2/transport/frame_data.h
+++ b/src/core/ext/transport/chttp2/transport/frame_data.h
@@ -28,10 +28,6 @@
 #include "src/core/lib/transport/byte_stream.h"
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_CHTTP2_DATA_FH_0,
   GRPC_CHTTP2_DATA_FH_1,
@@ -58,8 +54,7 @@
 /* initialize per-stream state for data frame parsing */
 grpc_error* grpc_chttp2_data_parser_init(grpc_chttp2_data_parser* parser);
 
-void grpc_chttp2_data_parser_destroy(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_data_parser* parser);
+void grpc_chttp2_data_parser_destroy(grpc_chttp2_data_parser* parser);
 
 /* start processing a new data frame */
 grpc_error* grpc_chttp2_data_parser_begin_frame(grpc_chttp2_data_parser* parser,
@@ -69,7 +64,7 @@
 
 /* handle a slice of a data frame - is_last indicates the last slice of a
    frame */
-grpc_error* grpc_chttp2_data_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+grpc_error* grpc_chttp2_data_parser_parse(void* parser,
                                           grpc_chttp2_transport* t,
                                           grpc_chttp2_stream* s,
                                           grpc_slice slice, int is_last);
@@ -80,12 +75,8 @@
                              grpc_slice_buffer* outbuf);
 
 grpc_error* grpc_deframe_unprocessed_incoming_frames(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_data_parser* p, grpc_chttp2_stream* s,
+    grpc_chttp2_data_parser* p, grpc_chttp2_stream* s,
     grpc_slice_buffer* slices, grpc_slice* slice_out,
     grpc_byte_stream** stream_out);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_DATA_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.cc b/src/core/ext/transport/chttp2/transport/frame_goaway.cc
index a2ce709..b60b422 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.cc
@@ -52,8 +52,7 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_chttp2_goaway_parser_parse(grpc_exec_ctx* exec_ctx,
-                                            void* parser,
+grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
                                             grpc_chttp2_transport* t,
                                             grpc_chttp2_stream* s,
                                             grpc_slice slice, int is_last) {
@@ -135,7 +134,7 @@
       p->state = GRPC_CHTTP2_GOAWAY_DEBUG;
       if (is_last) {
         grpc_chttp2_add_incoming_goaway(
-            exec_ctx, t, (uint32_t)p->error_code,
+            t, (uint32_t)p->error_code,
             grpc_slice_new(p->debug_data, p->debug_length, gpr_free));
         p->debug_data = nullptr;
       }
diff --git a/src/core/ext/transport/chttp2/transport/frame_goaway.h b/src/core/ext/transport/chttp2/transport/frame_goaway.h
index 9790d0b..064d39a 100644
--- a/src/core/ext/transport/chttp2/transport/frame_goaway.h
+++ b/src/core/ext/transport/chttp2/transport/frame_goaway.h
@@ -25,10 +25,6 @@
 #include "src/core/ext/transport/chttp2/transport/frame.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_CHTTP2_GOAWAY_LSI0,
   GRPC_CHTTP2_GOAWAY_LSI1,
@@ -54,8 +50,7 @@
 void grpc_chttp2_goaway_parser_destroy(grpc_chttp2_goaway_parser* p);
 grpc_error* grpc_chttp2_goaway_parser_begin_frame(
     grpc_chttp2_goaway_parser* parser, uint32_t length, uint8_t flags);
-grpc_error* grpc_chttp2_goaway_parser_parse(grpc_exec_ctx* exec_ctx,
-                                            void* parser,
+grpc_error* grpc_chttp2_goaway_parser_parse(void* parser,
                                             grpc_chttp2_transport* t,
                                             grpc_chttp2_stream* s,
                                             grpc_slice slice, int is_last);
@@ -64,8 +59,4 @@
                                grpc_slice debug_data,
                                grpc_slice_buffer* slice_buffer);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_GOAWAY_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.cc b/src/core/ext/transport/chttp2/transport/frame_ping.cc
index d0feb51..298a567 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.cc
@@ -68,7 +68,7 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_chttp2_ping_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
                                           grpc_chttp2_transport* t,
                                           grpc_chttp2_stream* s,
                                           grpc_slice slice, int is_last) {
@@ -86,10 +86,10 @@
   if (p->byte == 8) {
     GPR_ASSERT(is_last);
     if (p->is_ack) {
-      grpc_chttp2_ack_ping(exec_ctx, t, p->opaque_8bytes);
+      grpc_chttp2_ack_ping(t, p->opaque_8bytes);
     } else {
       if (!t->is_client) {
-        grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+        grpc_millis now = grpc_core::ExecCtx::Get()->Now();
         grpc_millis next_allowed_ping =
             t->ping_recv_state.last_ping_recv_time +
             t->ping_policy.min_recv_ping_interval_without_data;
@@ -104,7 +104,7 @@
         }
 
         if (next_allowed_ping > now) {
-          grpc_chttp2_add_ping_strike(exec_ctx, t);
+          grpc_chttp2_add_ping_strike(t);
         }
 
         t->ping_recv_state.last_ping_recv_time = now;
@@ -116,8 +116,7 @@
               t->ping_acks, t->ping_ack_capacity * sizeof(*t->ping_acks));
         }
         t->ping_acks[t->ping_ack_count++] = p->opaque_8bytes;
-        grpc_chttp2_initiate_write(exec_ctx, t,
-                                   GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
+        grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_PING_RESPONSE);
       }
     }
   }
diff --git a/src/core/ext/transport/chttp2/transport/frame_ping.h b/src/core/ext/transport/chttp2/transport/frame_ping.h
index 034aad0..75bacfb 100644
--- a/src/core/ext/transport/chttp2/transport/frame_ping.h
+++ b/src/core/ext/transport/chttp2/transport/frame_ping.h
@@ -23,10 +23,6 @@
 #include "src/core/ext/transport/chttp2/transport/frame.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   uint8_t byte;
   uint8_t is_ack;
@@ -37,7 +33,7 @@
 
 grpc_error* grpc_chttp2_ping_parser_begin_frame(grpc_chttp2_ping_parser* parser,
                                                 uint32_t length, uint8_t flags);
-grpc_error* grpc_chttp2_ping_parser_parse(grpc_exec_ctx* exec_ctx, void* parser,
+grpc_error* grpc_chttp2_ping_parser_parse(void* parser,
                                           grpc_chttp2_transport* t,
                                           grpc_chttp2_stream* s,
                                           grpc_slice slice, int is_last);
@@ -45,8 +41,4 @@
 /* Test-only function for disabling ping ack */
 void grpc_set_disable_ping_ack(bool disable_ping_ack);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_PING_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
index 05a7f05..fee5766 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.cc
@@ -69,8 +69,7 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx* exec_ctx,
-                                                void* parser,
+grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
                                                 grpc_chttp2_transport* t,
                                                 grpc_chttp2_stream* s,
                                                 grpc_slice slice, int is_last) {
@@ -103,7 +102,7 @@
           GRPC_ERROR_INT_HTTP2_ERROR, (intptr_t)reason);
       gpr_free(message);
     }
-    grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, error);
+    grpc_chttp2_mark_stream_closed(t, s, true, true, error);
   }
 
   return GRPC_ERROR_NONE;
diff --git a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
index 3f5417e..e76a3ca 100644
--- a/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
+++ b/src/core/ext/transport/chttp2/transport/frame_rst_stream.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   uint8_t byte;
   uint8_t reason_bytes[4];
@@ -38,14 +34,9 @@
 
 grpc_error* grpc_chttp2_rst_stream_parser_begin_frame(
     grpc_chttp2_rst_stream_parser* parser, uint32_t length, uint8_t flags);
-grpc_error* grpc_chttp2_rst_stream_parser_parse(grpc_exec_ctx* exec_ctx,
-                                                void* parser,
+grpc_error* grpc_chttp2_rst_stream_parser_parse(void* parser,
                                                 grpc_chttp2_transport* t,
                                                 grpc_chttp2_stream* s,
                                                 grpc_slice slice, int is_last);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_RST_STREAM_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.cc b/src/core/ext/transport/chttp2/transport/frame_settings.cc
index de4340f..c6c2a6c 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.cc
@@ -108,8 +108,7 @@
   }
 }
 
-grpc_error* grpc_chttp2_settings_parser_parse(grpc_exec_ctx* exec_ctx, void* p,
-                                              grpc_chttp2_transport* t,
+grpc_error* grpc_chttp2_settings_parser_parse(void* p, grpc_chttp2_transport* t,
                                               grpc_chttp2_stream* s,
                                               grpc_slice slice, int is_last) {
   grpc_chttp2_settings_parser* parser = (grpc_chttp2_settings_parser*)p;
@@ -132,7 +131,7 @@
                    GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
             grpc_slice_buffer_add(&t->qbuf, grpc_chttp2_settings_ack_create());
             if (t->notify_on_receive_settings != nullptr) {
-              GRPC_CLOSURE_SCHED(exec_ctx, t->notify_on_receive_settings,
+              GRPC_CLOSURE_SCHED(t->notify_on_receive_settings,
                                  GRPC_ERROR_NONE);
               t->notify_on_receive_settings = nullptr;
             }
diff --git a/src/core/ext/transport/chttp2/transport/frame_settings.h b/src/core/ext/transport/chttp2/transport/frame_settings.h
index 18bde92..ce65402 100644
--- a/src/core/ext/transport/chttp2/transport/frame_settings.h
+++ b/src/core/ext/transport/chttp2/transport/frame_settings.h
@@ -25,10 +25,6 @@
 #include "src/core/ext/transport/chttp2/transport/http2_settings.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_CHTTP2_SPS_ID0,
   GRPC_CHTTP2_SPS_ID1,
@@ -56,14 +52,9 @@
 grpc_error* grpc_chttp2_settings_parser_begin_frame(
     grpc_chttp2_settings_parser* parser, uint32_t length, uint8_t flags,
     uint32_t* settings);
-grpc_error* grpc_chttp2_settings_parser_parse(grpc_exec_ctx* exec_ctx,
-                                              void* parser,
+grpc_error* grpc_chttp2_settings_parser_parse(void* parser,
                                               grpc_chttp2_transport* t,
                                               grpc_chttp2_stream* s,
                                               grpc_slice slice, int is_last);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.cc b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
index 08407a8..418ca14 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.cc
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.cc
@@ -64,9 +64,11 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_chttp2_window_update_parser_parse(
-    grpc_exec_ctx* exec_ctx, void* parser, grpc_chttp2_transport* t,
-    grpc_chttp2_stream* s, grpc_slice slice, int is_last) {
+grpc_error* grpc_chttp2_window_update_parser_parse(void* parser,
+                                                   grpc_chttp2_transport* t,
+                                                   grpc_chttp2_stream* s,
+                                                   grpc_slice slice,
+                                                   int is_last) {
   uint8_t* const beg = GRPC_SLICE_START_PTR(slice);
   uint8_t* const end = GRPC_SLICE_END_PTR(slice);
   uint8_t* cur = beg;
@@ -98,10 +100,9 @@
       if (s != nullptr) {
         s->flow_control->RecvUpdate(received_update);
         if (grpc_chttp2_list_remove_stalled_by_stream(t, s)) {
-          grpc_chttp2_mark_stream_writable(exec_ctx, t, s);
+          grpc_chttp2_mark_stream_writable(t, s);
           grpc_chttp2_initiate_write(
-              exec_ctx, t,
-              GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE);
+              t, GRPC_CHTTP2_INITIATE_WRITE_FLOW_CONTROL_UNSTALLED_BY_UPDATE);
         }
       }
     } else {
@@ -110,8 +111,7 @@
       bool is_zero = t->flow_control->remote_window() <= 0;
       if (was_zero && !is_zero) {
         grpc_chttp2_initiate_write(
-            exec_ctx, t,
-            GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED);
+            t, GRPC_CHTTP2_INITIATE_WRITE_TRANSPORT_FLOW_CONTROL_UNSTALLED);
       }
     }
   }
diff --git a/src/core/ext/transport/chttp2/transport/frame_window_update.h b/src/core/ext/transport/chttp2/transport/frame_window_update.h
index daf7d2d..a32f1a9 100644
--- a/src/core/ext/transport/chttp2/transport/frame_window_update.h
+++ b/src/core/ext/transport/chttp2/transport/frame_window_update.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   uint8_t byte;
   uint8_t is_connection_update;
@@ -39,12 +35,10 @@
 
 grpc_error* grpc_chttp2_window_update_parser_begin_frame(
     grpc_chttp2_window_update_parser* parser, uint32_t length, uint8_t flags);
-grpc_error* grpc_chttp2_window_update_parser_parse(
-    grpc_exec_ctx* exec_ctx, void* parser, grpc_chttp2_transport* t,
-    grpc_chttp2_stream* s, grpc_slice slice, int is_last);
-
-#ifdef __cplusplus
-}
-#endif
+grpc_error* grpc_chttp2_window_update_parser_parse(void* parser,
+                                                   grpc_chttp2_transport* t,
+                                                   grpc_chttp2_stream* s,
+                                                   grpc_slice slice,
+                                                   int is_last);
 
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_FRAME_WINDOW_UPDATE_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
index e76d92e..3a5692a 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.cc
@@ -206,14 +206,12 @@
 }
 
 /* dummy function */
-static void add_nothing(grpc_exec_ctx* exec_ctx,
-                        grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
+static void add_nothing(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
                         size_t elem_size) {}
 
 // Add a key to the dynamic table. Both key and value will be added to table at
 // the decoder.
-static void add_key_with_index(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_compressor* c,
+static void add_key_with_index(grpc_chttp2_hpack_compressor* c,
                                grpc_mdelem elem, uint32_t new_index) {
   if (new_index == 0) {
     return;
@@ -240,14 +238,12 @@
     c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
   } else if (c->indices_keys[HASH_FRAGMENT_2(key_hash)] <
              c->indices_keys[HASH_FRAGMENT_3(key_hash)]) {
-    grpc_slice_unref_internal(exec_ctx,
-                              c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
+    grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_2(key_hash)]);
     c->entries_keys[HASH_FRAGMENT_2(key_hash)] =
         grpc_slice_ref_internal(GRPC_MDKEY(elem));
     c->indices_keys[HASH_FRAGMENT_2(key_hash)] = new_index;
   } else {
-    grpc_slice_unref_internal(exec_ctx,
-                              c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
+    grpc_slice_unref_internal(c->entries_keys[HASH_FRAGMENT_3(key_hash)]);
     c->entries_keys[HASH_FRAGMENT_3(key_hash)] =
         grpc_slice_ref_internal(GRPC_MDKEY(elem));
     c->indices_keys[HASH_FRAGMENT_3(key_hash)] = new_index;
@@ -255,8 +251,7 @@
 }
 
 /* add an element to the decoder table */
-static void add_elem_with_index(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_compressor* c,
+static void add_elem_with_index(grpc_chttp2_hpack_compressor* c,
                                 grpc_mdelem elem, uint32_t new_index) {
   if (new_index == 0) {
     return;
@@ -286,35 +281,34 @@
   } else if (c->indices_elems[HASH_FRAGMENT_2(elem_hash)] <
              c->indices_elems[HASH_FRAGMENT_3(elem_hash)]) {
     /* not there: replace oldest */
-    GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
+    GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_2(elem_hash)]);
     c->entries_elems[HASH_FRAGMENT_2(elem_hash)] = GRPC_MDELEM_REF(elem);
     c->indices_elems[HASH_FRAGMENT_2(elem_hash)] = new_index;
   } else {
     /* not there: replace oldest */
-    GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
+    GRPC_MDELEM_UNREF(c->entries_elems[HASH_FRAGMENT_3(elem_hash)]);
     c->entries_elems[HASH_FRAGMENT_3(elem_hash)] = GRPC_MDELEM_REF(elem);
     c->indices_elems[HASH_FRAGMENT_3(elem_hash)] = new_index;
   }
 
-  add_key_with_index(exec_ctx, c, elem, new_index);
+  add_key_with_index(c, elem, new_index);
 }
 
-static void add_elem(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_compressor* c,
-                     grpc_mdelem elem, size_t elem_size) {
+static void add_elem(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
+                     size_t elem_size) {
   uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
-  add_elem_with_index(exec_ctx, c, elem, new_index);
+  add_elem_with_index(c, elem, new_index);
 }
 
-static void add_key(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_compressor* c,
-                    grpc_mdelem elem, size_t elem_size) {
+static void add_key(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
+                    size_t elem_size) {
   uint32_t new_index = prepare_space_for_new_elem(c, elem_size);
-  add_key_with_index(exec_ctx, c, elem, new_index);
+  add_key_with_index(c, elem, new_index);
 }
 
-static void emit_indexed(grpc_exec_ctx* exec_ctx,
-                         grpc_chttp2_hpack_compressor* c, uint32_t elem_index,
+static void emit_indexed(grpc_chttp2_hpack_compressor* c, uint32_t elem_index,
                          framer_state* st) {
-  GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx);
+  GRPC_STATS_INC_HPACK_SEND_INDEXED();
   uint32_t len = GRPC_CHTTP2_VARINT_LENGTH(elem_index, 1);
   GRPC_CHTTP2_WRITE_VARINT(elem_index, 1, 0x80, add_tiny_header_data(st, len),
                            len);
@@ -326,18 +320,17 @@
   bool insert_null_before_wire_value;
 } wire_value;
 
-static wire_value get_wire_value(grpc_exec_ctx* exec_ctx, grpc_mdelem elem,
-                                 bool true_binary_enabled) {
+static wire_value get_wire_value(grpc_mdelem elem, bool true_binary_enabled) {
   wire_value wire_val;
   if (grpc_is_binary_header(GRPC_MDKEY(elem))) {
     if (true_binary_enabled) {
-      GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx);
+      GRPC_STATS_INC_HPACK_SEND_BINARY();
       wire_val.huffman_prefix = 0x00;
       wire_val.insert_null_before_wire_value = true;
       wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
 
     } else {
-      GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx);
+      GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64();
       wire_val.huffman_prefix = 0x80;
       wire_val.insert_null_before_wire_value = false;
       wire_val.data =
@@ -345,7 +338,7 @@
     }
   } else {
     /* TODO(ctiller): opportunistically compress non-binary headers */
-    GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
+    GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
     wire_val.huffman_prefix = 0x00;
     wire_val.insert_null_before_wire_value = false;
     wire_val.data = grpc_slice_ref_internal(GRPC_MDVALUE(elem));
@@ -362,14 +355,12 @@
   add_header_data(st, v.data);
 }
 
-static void emit_lithdr_incidx(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_compressor* c,
+static void emit_lithdr_incidx(grpc_chttp2_hpack_compressor* c,
                                uint32_t key_index, grpc_mdelem elem,
                                framer_state* st) {
-  GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx);
+  GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX();
   uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 2);
-  wire_value value =
-      get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
+  wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
   size_t len_val = wire_value_length(value);
   uint32_t len_val_len;
   GPR_ASSERT(len_val <= UINT32_MAX);
@@ -381,14 +372,12 @@
   add_wire_value(st, value);
 }
 
-static void emit_lithdr_noidx(grpc_exec_ctx* exec_ctx,
-                              grpc_chttp2_hpack_compressor* c,
+static void emit_lithdr_noidx(grpc_chttp2_hpack_compressor* c,
                               uint32_t key_index, grpc_mdelem elem,
                               framer_state* st) {
-  GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx);
+  GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX();
   uint32_t len_pfx = GRPC_CHTTP2_VARINT_LENGTH(key_index, 4);
-  wire_value value =
-      get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
+  wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
   size_t len_val = wire_value_length(value);
   uint32_t len_val_len;
   GPR_ASSERT(len_val <= UINT32_MAX);
@@ -400,16 +389,14 @@
   add_wire_value(st, value);
 }
 
-static void emit_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_hpack_compressor* c,
+static void emit_lithdr_incidx_v(grpc_chttp2_hpack_compressor* c,
                                  uint32_t unused_index, grpc_mdelem elem,
                                  framer_state* st) {
   GPR_ASSERT(unused_index == 0);
-  GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx);
-  GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
+  GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V();
+  GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
   uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
-  wire_value value =
-      get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
+  wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
   uint32_t len_val = (uint32_t)wire_value_length(value);
   uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
   uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@@ -424,16 +411,14 @@
   add_wire_value(st, value);
 }
 
-static void emit_lithdr_noidx_v(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_compressor* c,
+static void emit_lithdr_noidx_v(grpc_chttp2_hpack_compressor* c,
                                 uint32_t unused_index, grpc_mdelem elem,
                                 framer_state* st) {
   GPR_ASSERT(unused_index == 0);
-  GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx);
-  GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx);
+  GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V();
+  GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED();
   uint32_t len_key = (uint32_t)GRPC_SLICE_LENGTH(GRPC_MDKEY(elem));
-  wire_value value =
-      get_wire_value(exec_ctx, elem, st->use_true_binary_metadata);
+  wire_value value = get_wire_value(elem, st->use_true_binary_metadata);
   uint32_t len_val = (uint32_t)wire_value_length(value);
   uint32_t len_key_len = GRPC_CHTTP2_VARINT_LENGTH(len_key, 1);
   uint32_t len_val_len = GRPC_CHTTP2_VARINT_LENGTH(len_val, 1);
@@ -462,8 +447,8 @@
 }
 
 /* encode an mdelem */
-static void hpack_enc(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_compressor* c,
-                      grpc_mdelem elem, framer_state* st) {
+static void hpack_enc(grpc_chttp2_hpack_compressor* c, grpc_mdelem elem,
+                      framer_state* st) {
   GPR_ASSERT(GRPC_SLICE_LENGTH(GRPC_MDKEY(elem)) > 0);
   if (GRPC_SLICE_START_PTR(GRPC_MDKEY(elem))[0] != ':') { /* regular header */
     st->seen_regular_header = 1;
@@ -496,7 +481,7 @@
 
   // Key is not interned, emit literals.
   if (!key_interned) {
-    emit_lithdr_noidx_v(exec_ctx, c, 0, elem, st);
+    emit_lithdr_noidx_v(c, 0, elem, st);
     return;
   }
 
@@ -515,16 +500,16 @@
     if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_2(elem_hash)], elem) &&
         c->indices_elems[HASH_FRAGMENT_2(elem_hash)] > c->tail_remote_index) {
       /* HIT: complete element (first cuckoo hash) */
-      emit_indexed(exec_ctx, c,
-                   dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]), st);
+      emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_2(elem_hash)]),
+                   st);
       return;
     }
 
     if (grpc_mdelem_eq(c->entries_elems[HASH_FRAGMENT_3(elem_hash)], elem) &&
         c->indices_elems[HASH_FRAGMENT_3(elem_hash)] > c->tail_remote_index) {
       /* HIT: complete element (second cuckoo hash) */
-      emit_indexed(exec_ctx, c,
-                   dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]), st);
+      emit_indexed(c, dynidx(c, c->indices_elems[HASH_FRAGMENT_3(elem_hash)]),
+                   st);
       return;
     }
   }
@@ -538,10 +523,10 @@
                          decoder_space_usage < MAX_DECODER_SPACE_USAGE &&
                          c->filter_elems[HASH_FRAGMENT_1(elem_hash)] >=
                              c->filter_elems_sum / ONE_ON_ADD_PROBABILITY;
-  void (*maybe_add)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, grpc_mdelem,
-                    size_t) = should_add_elem ? add_elem : add_nothing;
-  void (*emit)(grpc_exec_ctx*, grpc_chttp2_hpack_compressor*, uint32_t,
-               grpc_mdelem, framer_state*) =
+  void (*maybe_add)(grpc_chttp2_hpack_compressor*, grpc_mdelem, size_t) =
+      should_add_elem ? add_elem : add_nothing;
+  void (*emit)(grpc_chttp2_hpack_compressor*, uint32_t, grpc_mdelem,
+               framer_state*) =
       should_add_elem ? emit_lithdr_incidx : emit_lithdr_noidx;
 
   /* no hits for the elem... maybe there's a key? */
@@ -550,8 +535,8 @@
                     GRPC_MDKEY(elem)) &&
       indices_key > c->tail_remote_index) {
     /* HIT: key (first cuckoo hash) */
-    emit(exec_ctx, c, dynidx(c, indices_key), elem, st);
-    maybe_add(exec_ctx, c, elem, decoder_space_usage);
+    emit(c, dynidx(c, indices_key), elem, st);
+    maybe_add(c, elem, decoder_space_usage);
     return;
   }
 
@@ -560,8 +545,8 @@
                     GRPC_MDKEY(elem)) &&
       indices_key > c->tail_remote_index) {
     /* HIT: key (first cuckoo hash) */
-    emit(exec_ctx, c, dynidx(c, indices_key), elem, st);
-    maybe_add(exec_ctx, c, elem, decoder_space_usage);
+    emit(c, dynidx(c, indices_key), elem, st);
+    maybe_add(c, elem, decoder_space_usage);
     return;
   }
 
@@ -572,24 +557,23 @@
                                              : emit_lithdr_noidx_v;
   maybe_add =
       should_add_elem ? add_elem : (should_add_key ? add_key : add_nothing);
-  emit(exec_ctx, c, 0, elem, st);
-  maybe_add(exec_ctx, c, elem, decoder_space_usage);
+  emit(c, 0, elem, st);
+  maybe_add(c, elem, decoder_space_usage);
 }
 
 #define STRLEN_LIT(x) (sizeof(x) - 1)
 #define TIMEOUT_KEY "grpc-timeout"
 
-static void deadline_enc(grpc_exec_ctx* exec_ctx,
-                         grpc_chttp2_hpack_compressor* c, grpc_millis deadline,
+static void deadline_enc(grpc_chttp2_hpack_compressor* c, grpc_millis deadline,
                          framer_state* st) {
   char timeout_str[GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE];
   grpc_mdelem mdelem;
-  grpc_http2_encode_timeout(deadline - grpc_exec_ctx_now(exec_ctx),
+  grpc_http2_encode_timeout(deadline - grpc_core::ExecCtx::Get()->Now(),
                             timeout_str);
-  mdelem = grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_TIMEOUT,
+  mdelem = grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_TIMEOUT,
                                    grpc_slice_from_copied_string(timeout_str));
-  hpack_enc(exec_ctx, c, mdelem, st);
-  GRPC_MDELEM_UNREF(exec_ctx, mdelem);
+  hpack_enc(c, mdelem, st);
+  GRPC_MDELEM_UNREF(mdelem);
 }
 
 static uint32_t elems_for_bytes(uint32_t bytes) { return (bytes + 31) / 32; }
@@ -609,14 +593,13 @@
   }
 }
 
-void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_hpack_compressor* c) {
+void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c) {
   int i;
   for (i = 0; i < GRPC_CHTTP2_HPACKC_NUM_VALUES; i++) {
     if (c->entries_keys[i].refcount != &terminal_slice_refcount) {
-      grpc_slice_unref_internal(exec_ctx, c->entries_keys[i]);
+      grpc_slice_unref_internal(c->entries_keys[i]);
     }
-    GRPC_MDELEM_UNREF(exec_ctx, c->entries_elems[i]);
+    GRPC_MDELEM_UNREF(c->entries_elems[i]);
   }
   gpr_free(c->table_elem_size);
 }
@@ -672,8 +655,7 @@
   }
 }
 
-void grpc_chttp2_encode_header(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_compressor* c,
+void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c,
                                grpc_mdelem** extra_headers,
                                size_t extra_headers_size,
                                grpc_metadata_batch* metadata,
@@ -699,15 +681,15 @@
     emit_advertise_table_size_change(c, &st);
   }
   for (size_t i = 0; i < extra_headers_size; ++i) {
-    hpack_enc(exec_ctx, c, *extra_headers[i], &st);
+    hpack_enc(c, *extra_headers[i], &st);
   }
   grpc_metadata_batch_assert_ok(metadata);
   for (grpc_linked_mdelem* l = metadata->list.head; l; l = l->next) {
-    hpack_enc(exec_ctx, c, l->md, &st);
+    hpack_enc(c, l->md, &st);
   }
   grpc_millis deadline = metadata->deadline;
   if (deadline != GRPC_MILLIS_INF_FUTURE) {
-    deadline_enc(exec_ctx, c, deadline, &st);
+    deadline_enc(c, deadline, &st);
   }
 
   finish_frame(&st, 1, options->is_eof);
diff --git a/src/core/ext/transport/chttp2/transport/hpack_encoder.h b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
index 96d8e99..a26514c 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_encoder.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_encoder.h
@@ -36,10 +36,6 @@
 
 extern grpc_core::TraceFlag grpc_http_trace;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   uint32_t filter_elems_sum;
   uint32_t max_table_size;
@@ -74,8 +70,7 @@
 } grpc_chttp2_hpack_compressor;
 
 void grpc_chttp2_hpack_compressor_init(grpc_chttp2_hpack_compressor* c);
-void grpc_chttp2_hpack_compressor_destroy(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_hpack_compressor* c);
+void grpc_chttp2_hpack_compressor_destroy(grpc_chttp2_hpack_compressor* c);
 void grpc_chttp2_hpack_compressor_set_max_table_size(
     grpc_chttp2_hpack_compressor* c, uint32_t max_table_size);
 void grpc_chttp2_hpack_compressor_set_max_usable_size(
@@ -89,16 +84,11 @@
   grpc_transport_one_way_stats* stats;
 } grpc_encode_header_options;
 
-void grpc_chttp2_encode_header(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_compressor* c,
+void grpc_chttp2_encode_header(grpc_chttp2_hpack_compressor* c,
                                grpc_mdelem** extra_headers,
                                size_t extra_headers_size,
                                grpc_metadata_batch* metadata,
                                const grpc_encode_header_options* options,
                                grpc_slice_buffer* outbuf);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_ENCODER_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.cc b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
index 18cb27f..a395ab2 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.cc
@@ -61,96 +61,69 @@
    a set of indirect jumps, and so not waste stack space. */
 
 /* forward declarations for parsing states */
-static grpc_error* parse_begin(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_begin(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                const uint8_t* end);
-static grpc_error* parse_error(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_error(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                const uint8_t* end, grpc_error* error);
-static grpc_error* still_parse_error(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hpack_parser* p,
+static grpc_error* still_parse_error(grpc_chttp2_hpack_parser* p,
                                      const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_illegal_op(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_illegal_op(grpc_chttp2_hpack_parser* p,
                                     const uint8_t* cur, const uint8_t* end);
 
-static grpc_error* parse_string_prefix(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_string_prefix(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_key_string(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_key_string(grpc_chttp2_hpack_parser* p,
                                     const uint8_t* cur, const uint8_t* end);
 static grpc_error* parse_value_string_with_indexed_key(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
-    const uint8_t* end);
+    grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end);
 static grpc_error* parse_value_string_with_literal_key(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
-    const uint8_t* end);
+    grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end);
 
-static grpc_error* parse_value0(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value0(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end);
-static grpc_error* parse_value1(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value1(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end);
-static grpc_error* parse_value2(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value2(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end);
-static grpc_error* parse_value3(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value3(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end);
-static grpc_error* parse_value4(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value4(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end);
-static grpc_error* parse_value5up(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_value5up(grpc_chttp2_hpack_parser* p,
                                   const uint8_t* cur, const uint8_t* end);
 
-static grpc_error* parse_indexed_field(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_indexed_field(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_indexed_field_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_indexed_field_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_lithdr_incidx(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_incidx(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_lithdr_incidx_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_incidx_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_incidx_v(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_lithdr_notidx(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_notidx(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_lithdr_notidx_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_notidx_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_lithdr_notidx_v(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_notidx_v(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_lithdr_nvridx(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_nvridx(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_lithdr_nvridx_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_lithdr_nvridx_v(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end);
-static grpc_error* parse_max_tbl_size(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_max_tbl_size(grpc_chttp2_hpack_parser* p,
                                       const uint8_t* cur, const uint8_t* end);
-static grpc_error* parse_max_tbl_size_x(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_max_tbl_size_x(grpc_chttp2_hpack_parser* p,
                                         const uint8_t* cur, const uint8_t* end);
 
 /* we translate the first byte of a hpack field into one of these decoding
@@ -649,8 +622,8 @@
 };
 
 /* emission helpers */
-static grpc_error* on_hdr(grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p,
-                          grpc_mdelem md, int add_to_table) {
+static grpc_error* on_hdr(grpc_chttp2_hpack_parser* p, grpc_mdelem md,
+                          int add_to_table) {
   if (grpc_http_trace.enabled()) {
     char* k = grpc_slice_to_c_string(GRPC_MDKEY(md));
     char* v = nullptr;
@@ -671,26 +644,25 @@
   if (add_to_table) {
     GPR_ASSERT(GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_INTERNED ||
                GRPC_MDELEM_STORAGE(md) == GRPC_MDELEM_STORAGE_STATIC);
-    grpc_error* err = grpc_chttp2_hptbl_add(exec_ctx, &p->table, md);
+    grpc_error* err = grpc_chttp2_hptbl_add(&p->table, md);
     if (err != GRPC_ERROR_NONE) return err;
   }
   if (p->on_header == nullptr) {
-    GRPC_MDELEM_UNREF(exec_ctx, md);
+    GRPC_MDELEM_UNREF(md);
     return GRPC_ERROR_CREATE_FROM_STATIC_STRING("on_header callback not set");
   }
-  p->on_header(exec_ctx, p->on_header_user_data, md);
+  p->on_header(p->on_header_user_data, md);
   return GRPC_ERROR_NONE;
 }
 
-static grpc_slice take_string(grpc_exec_ctx* exec_ctx,
-                              grpc_chttp2_hpack_parser* p,
+static grpc_slice take_string(grpc_chttp2_hpack_parser* p,
                               grpc_chttp2_hpack_parser_string* str,
                               bool intern) {
   grpc_slice s;
   if (!str->copied) {
     if (intern) {
       s = grpc_slice_intern(str->data.referenced);
-      grpc_slice_unref_internal(exec_ctx, str->data.referenced);
+      grpc_slice_unref_internal(str->data.referenced);
     } else {
       s = str->data.referenced;
     }
@@ -708,85 +680,77 @@
 }
 
 /* jump to the next state */
-static grpc_error* parse_next(grpc_exec_ctx* exec_ctx,
-                              grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_next(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                               const uint8_t* end) {
   p->state = *p->next_state++;
-  return p->state(exec_ctx, p, cur, end);
+  return p->state(p, cur, end);
 }
 
 /* begin parsing a header: all functionality is encoded into lookup tables
    above */
-static grpc_error* parse_begin(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_begin(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                const uint8_t* end) {
   if (cur == end) {
     p->state = parse_begin;
     return GRPC_ERROR_NONE;
   }
 
-  return first_byte_action[first_byte_lut[*cur]](exec_ctx, p, cur, end);
+  return first_byte_action[first_byte_lut[*cur]](p, cur, end);
 }
 
 /* stream dependency and prioritization data: we just skip it */
-static grpc_error* parse_stream_weight(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_stream_weight(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   if (cur == end) {
     p->state = parse_stream_weight;
     return GRPC_ERROR_NONE;
   }
 
-  return p->after_prioritization(exec_ctx, p, cur + 1, end);
+  return p->after_prioritization(p, cur + 1, end);
 }
 
-static grpc_error* parse_stream_dep3(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_stream_dep3(grpc_chttp2_hpack_parser* p,
                                      const uint8_t* cur, const uint8_t* end) {
   if (cur == end) {
     p->state = parse_stream_dep3;
     return GRPC_ERROR_NONE;
   }
 
-  return parse_stream_weight(exec_ctx, p, cur + 1, end);
+  return parse_stream_weight(p, cur + 1, end);
 }
 
-static grpc_error* parse_stream_dep2(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_stream_dep2(grpc_chttp2_hpack_parser* p,
                                      const uint8_t* cur, const uint8_t* end) {
   if (cur == end) {
     p->state = parse_stream_dep2;
     return GRPC_ERROR_NONE;
   }
 
-  return parse_stream_dep3(exec_ctx, p, cur + 1, end);
+  return parse_stream_dep3(p, cur + 1, end);
 }
 
-static grpc_error* parse_stream_dep1(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_stream_dep1(grpc_chttp2_hpack_parser* p,
                                      const uint8_t* cur, const uint8_t* end) {
   if (cur == end) {
     p->state = parse_stream_dep1;
     return GRPC_ERROR_NONE;
   }
 
-  return parse_stream_dep2(exec_ctx, p, cur + 1, end);
+  return parse_stream_dep2(p, cur + 1, end);
 }
 
-static grpc_error* parse_stream_dep0(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_stream_dep0(grpc_chttp2_hpack_parser* p,
                                      const uint8_t* cur, const uint8_t* end) {
   if (cur == end) {
     p->state = parse_stream_dep0;
     return GRPC_ERROR_NONE;
   }
 
-  return parse_stream_dep1(exec_ctx, p, cur + 1, end);
+  return parse_stream_dep1(p, cur + 1, end);
 }
 
 /* emit an indexed field; jumps to begin the next field on completion */
-static grpc_error* finish_indexed_field(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_indexed_field(grpc_chttp2_hpack_parser* p,
                                         const uint8_t* cur,
                                         const uint8_t* end) {
   grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
@@ -798,24 +762,22 @@
         GRPC_ERROR_INT_SIZE, (intptr_t)p->table.num_ents);
   }
   GRPC_MDELEM_REF(md);
-  GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx);
-  grpc_error* err = on_hdr(exec_ctx, p, md, 0);
+  GRPC_STATS_INC_HPACK_RECV_INDEXED();
+  grpc_error* err = on_hdr(p, md, 0);
   if (err != GRPC_ERROR_NONE) return err;
-  return parse_begin(exec_ctx, p, cur, end);
+  return parse_begin(p, cur, end);
 }
 
 /* parse an indexed field with index < 127 */
-static grpc_error* parse_indexed_field(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_indexed_field(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   p->dynamic_table_update_allowed = 0;
   p->index = (*cur) & 0x7f;
-  return finish_indexed_field(exec_ctx, p, cur + 1, end);
+  return finish_indexed_field(p, cur + 1, end);
 }
 
 /* parse an indexed field with index >= 127 */
-static grpc_error* parse_indexed_field_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_indexed_field_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -824,56 +786,52 @@
   p->next_state = and_then;
   p->index = 0x7f;
   p->parsing.value = &p->index;
-  return parse_value0(exec_ctx, p, cur + 1, end);
+  return parse_value0(p, cur + 1, end);
 }
 
 /* finish a literal header with incremental indexing */
-static grpc_error* finish_lithdr_incidx(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_lithdr_incidx(grpc_chttp2_hpack_parser* p,
                                         const uint8_t* cur,
                                         const uint8_t* end) {
   grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
   GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
-  GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx);
-  grpc_error* err = on_hdr(
-      exec_ctx, p,
-      grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
-                              take_string(exec_ctx, p, &p->value, true)),
-      1);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+  GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX();
+  grpc_error* err =
+      on_hdr(p,
+             grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)),
+                                     take_string(p, &p->value, true)),
+             1);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* finish a literal header with incremental indexing with no index */
-static grpc_error* finish_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_lithdr_incidx_v(grpc_chttp2_hpack_parser* p,
                                           const uint8_t* cur,
                                           const uint8_t* end) {
-  GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx);
-  grpc_error* err = on_hdr(
-      exec_ctx, p,
-      grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
-                              take_string(exec_ctx, p, &p->value, true)),
-      1);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+  GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V();
+  grpc_error* err =
+      on_hdr(p,
+             grpc_mdelem_from_slices(take_string(p, &p->key, true),
+                                     take_string(p, &p->value, true)),
+             1);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* parse a literal header with incremental indexing; index < 63 */
-static grpc_error* parse_lithdr_incidx(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_incidx(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
       parse_value_string_with_indexed_key, finish_lithdr_incidx};
   p->dynamic_table_update_allowed = 0;
   p->next_state = and_then;
   p->index = (*cur) & 0x3f;
-  return parse_string_prefix(exec_ctx, p, cur + 1, end);
+  return parse_string_prefix(p, cur + 1, end);
 }
 
 /* parse a literal header with incremental indexing; index >= 63 */
-static grpc_error* parse_lithdr_incidx_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_incidx_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -883,12 +841,11 @@
   p->next_state = and_then;
   p->index = 0x3f;
   p->parsing.value = &p->index;
-  return parse_value0(exec_ctx, p, cur + 1, end);
+  return parse_value0(p, cur + 1, end);
 }
 
 /* parse a literal header with incremental indexing; index = 0 */
-static grpc_error* parse_lithdr_incidx_v(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_incidx_v(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -896,56 +853,52 @@
       parse_value_string_with_literal_key, finish_lithdr_incidx_v};
   p->dynamic_table_update_allowed = 0;
   p->next_state = and_then;
-  return parse_string_prefix(exec_ctx, p, cur + 1, end);
+  return parse_string_prefix(p, cur + 1, end);
 }
 
 /* finish a literal header without incremental indexing */
-static grpc_error* finish_lithdr_notidx(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_lithdr_notidx(grpc_chttp2_hpack_parser* p,
                                         const uint8_t* cur,
                                         const uint8_t* end) {
   grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
   GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
-  GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx);
-  grpc_error* err = on_hdr(
-      exec_ctx, p,
-      grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
-                              take_string(exec_ctx, p, &p->value, false)),
-      0);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+  GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX();
+  grpc_error* err =
+      on_hdr(p,
+             grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)),
+                                     take_string(p, &p->value, false)),
+             0);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* finish a literal header without incremental indexing with index = 0 */
-static grpc_error* finish_lithdr_notidx_v(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_lithdr_notidx_v(grpc_chttp2_hpack_parser* p,
                                           const uint8_t* cur,
                                           const uint8_t* end) {
-  GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx);
-  grpc_error* err = on_hdr(
-      exec_ctx, p,
-      grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
-                              take_string(exec_ctx, p, &p->value, false)),
-      0);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+  GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V();
+  grpc_error* err =
+      on_hdr(p,
+             grpc_mdelem_from_slices(take_string(p, &p->key, true),
+                                     take_string(p, &p->value, false)),
+             0);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* parse a literal header without incremental indexing; index < 15 */
-static grpc_error* parse_lithdr_notidx(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_notidx(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
       parse_value_string_with_indexed_key, finish_lithdr_notidx};
   p->dynamic_table_update_allowed = 0;
   p->next_state = and_then;
   p->index = (*cur) & 0xf;
-  return parse_string_prefix(exec_ctx, p, cur + 1, end);
+  return parse_string_prefix(p, cur + 1, end);
 }
 
 /* parse a literal header without incremental indexing; index >= 15 */
-static grpc_error* parse_lithdr_notidx_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_notidx_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -955,12 +908,11 @@
   p->next_state = and_then;
   p->index = 0xf;
   p->parsing.value = &p->index;
-  return parse_value0(exec_ctx, p, cur + 1, end);
+  return parse_value0(p, cur + 1, end);
 }
 
 /* parse a literal header without incremental indexing; index == 0 */
-static grpc_error* parse_lithdr_notidx_v(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_notidx_v(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -968,56 +920,52 @@
       parse_value_string_with_literal_key, finish_lithdr_notidx_v};
   p->dynamic_table_update_allowed = 0;
   p->next_state = and_then;
-  return parse_string_prefix(exec_ctx, p, cur + 1, end);
+  return parse_string_prefix(p, cur + 1, end);
 }
 
 /* finish a literal header that is never indexed */
-static grpc_error* finish_lithdr_nvridx(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_lithdr_nvridx(grpc_chttp2_hpack_parser* p,
                                         const uint8_t* cur,
                                         const uint8_t* end) {
   grpc_mdelem md = grpc_chttp2_hptbl_lookup(&p->table, p->index);
   GPR_ASSERT(!GRPC_MDISNULL(md)); /* handled in string parsing */
-  GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx);
-  grpc_error* err = on_hdr(
-      exec_ctx, p,
-      grpc_mdelem_from_slices(exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(md)),
-                              take_string(exec_ctx, p, &p->value, false)),
-      0);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+  GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX();
+  grpc_error* err =
+      on_hdr(p,
+             grpc_mdelem_from_slices(grpc_slice_ref_internal(GRPC_MDKEY(md)),
+                                     take_string(p, &p->value, false)),
+             0);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* finish a literal header that is never indexed with an extra value */
-static grpc_error* finish_lithdr_nvridx_v(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p,
                                           const uint8_t* cur,
                                           const uint8_t* end) {
-  GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx);
-  grpc_error* err = on_hdr(
-      exec_ctx, p,
-      grpc_mdelem_from_slices(exec_ctx, take_string(exec_ctx, p, &p->key, true),
-                              take_string(exec_ctx, p, &p->value, false)),
-      0);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+  GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V();
+  grpc_error* err =
+      on_hdr(p,
+             grpc_mdelem_from_slices(take_string(p, &p->key, true),
+                                     take_string(p, &p->value, false)),
+             0);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* parse a literal header that is never indexed; index < 15 */
-static grpc_error* parse_lithdr_nvridx(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_nvridx(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
       parse_value_string_with_indexed_key, finish_lithdr_nvridx};
   p->dynamic_table_update_allowed = 0;
   p->next_state = and_then;
   p->index = (*cur) & 0xf;
-  return parse_string_prefix(exec_ctx, p, cur + 1, end);
+  return parse_string_prefix(p, cur + 1, end);
 }
 
 /* parse a literal header that is never indexed; index >= 15 */
-static grpc_error* parse_lithdr_nvridx_x(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_nvridx_x(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -1027,12 +975,11 @@
   p->next_state = and_then;
   p->index = 0xf;
   p->parsing.value = &p->index;
-  return parse_value0(exec_ctx, p, cur + 1, end);
+  return parse_value0(p, cur + 1, end);
 }
 
 /* parse a literal header that is never indexed; index == 0 */
-static grpc_error* parse_lithdr_nvridx_v(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_lithdr_nvridx_v(grpc_chttp2_hpack_parser* p,
                                          const uint8_t* cur,
                                          const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
@@ -1040,47 +987,44 @@
       parse_value_string_with_literal_key, finish_lithdr_nvridx_v};
   p->dynamic_table_update_allowed = 0;
   p->next_state = and_then;
-  return parse_string_prefix(exec_ctx, p, cur + 1, end);
+  return parse_string_prefix(p, cur + 1, end);
 }
 
 /* finish parsing a max table size change */
-static grpc_error* finish_max_tbl_size(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* finish_max_tbl_size(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   if (grpc_http_trace.enabled()) {
     gpr_log(GPR_INFO, "MAX TABLE SIZE: %d", p->index);
   }
   grpc_error* err =
-      grpc_chttp2_hptbl_set_current_table_size(exec_ctx, &p->table, p->index);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_begin(exec_ctx, p, cur, end);
+      grpc_chttp2_hptbl_set_current_table_size(&p->table, p->index);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_begin(p, cur, end);
 }
 
 /* parse a max table size change, max size < 15 */
-static grpc_error* parse_max_tbl_size(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_max_tbl_size(grpc_chttp2_hpack_parser* p,
                                       const uint8_t* cur, const uint8_t* end) {
   if (p->dynamic_table_update_allowed == 0) {
     return parse_error(
-        exec_ctx, p, cur, end,
+        p, cur, end,
         GRPC_ERROR_CREATE_FROM_STATIC_STRING(
             "More than two max table size changes in a single frame"));
   }
   p->dynamic_table_update_allowed--;
   p->index = (*cur) & 0x1f;
-  return finish_max_tbl_size(exec_ctx, p, cur + 1, end);
+  return finish_max_tbl_size(p, cur + 1, end);
 }
 
 /* parse a max table size change, max size >= 15 */
-static grpc_error* parse_max_tbl_size_x(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_max_tbl_size_x(grpc_chttp2_hpack_parser* p,
                                         const uint8_t* cur,
                                         const uint8_t* end) {
   static const grpc_chttp2_hpack_parser_state and_then[] = {
       finish_max_tbl_size};
   if (p->dynamic_table_update_allowed == 0) {
     return parse_error(
-        exec_ctx, p, cur, end,
+        p, cur, end,
         GRPC_ERROR_CREATE_FROM_STATIC_STRING(
             "More than two max table size changes in a single frame"));
   }
@@ -1088,12 +1032,11 @@
   p->next_state = and_then;
   p->index = 0x1f;
   p->parsing.value = &p->index;
-  return parse_value0(exec_ctx, p, cur + 1, end);
+  return parse_value0(p, cur + 1, end);
 }
 
 /* a parse error: jam the parse state into parse_error, and return error */
-static grpc_error* parse_error(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_error(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                const uint8_t* end, grpc_error* err) {
   GPR_ASSERT(err != GRPC_ERROR_NONE);
   if (p->last_error == GRPC_ERROR_NONE) {
@@ -1103,27 +1046,24 @@
   return err;
 }
 
-static grpc_error* still_parse_error(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hpack_parser* p,
+static grpc_error* still_parse_error(grpc_chttp2_hpack_parser* p,
                                      const uint8_t* cur, const uint8_t* end) {
   return GRPC_ERROR_REF(p->last_error);
 }
 
-static grpc_error* parse_illegal_op(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_illegal_op(grpc_chttp2_hpack_parser* p,
                                     const uint8_t* cur, const uint8_t* end) {
   GPR_ASSERT(cur != end);
   char* msg;
   gpr_asprintf(&msg, "Illegal hpack op code %d", *cur);
   grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
   gpr_free(msg);
-  return parse_error(exec_ctx, p, cur, end, err);
+  return parse_error(p, cur, end, err);
 }
 
 /* parse the 1st byte of a varint into p->parsing.value
    no overflow is possible */
-static grpc_error* parse_value0(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value0(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end) {
   if (cur == end) {
     p->state = parse_value0;
@@ -1133,16 +1073,15 @@
   *p->parsing.value += (*cur) & 0x7f;
 
   if ((*cur) & 0x80) {
-    return parse_value1(exec_ctx, p, cur + 1, end);
+    return parse_value1(p, cur + 1, end);
   } else {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 }
 
 /* parse the 2nd byte of a varint into p->parsing.value
    no overflow is possible */
-static grpc_error* parse_value1(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value1(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end) {
   if (cur == end) {
     p->state = parse_value1;
@@ -1152,16 +1091,15 @@
   *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 7;
 
   if ((*cur) & 0x80) {
-    return parse_value2(exec_ctx, p, cur + 1, end);
+    return parse_value2(p, cur + 1, end);
   } else {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 }
 
 /* parse the 3rd byte of a varint into p->parsing.value
    no overflow is possible */
-static grpc_error* parse_value2(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value2(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end) {
   if (cur == end) {
     p->state = parse_value2;
@@ -1171,16 +1109,15 @@
   *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 14;
 
   if ((*cur) & 0x80) {
-    return parse_value3(exec_ctx, p, cur + 1, end);
+    return parse_value3(p, cur + 1, end);
   } else {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 }
 
 /* parse the 4th byte of a varint into p->parsing.value
    no overflow is possible */
-static grpc_error* parse_value3(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value3(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end) {
   if (cur == end) {
     p->state = parse_value3;
@@ -1190,16 +1127,15 @@
   *p->parsing.value += (((uint32_t)*cur) & 0x7f) << 21;
 
   if ((*cur) & 0x80) {
-    return parse_value4(exec_ctx, p, cur + 1, end);
+    return parse_value4(p, cur + 1, end);
   } else {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 }
 
 /* parse the 5th byte of a varint into p->parsing.value
    depending on the byte, we may overflow, and care must be taken */
-static grpc_error* parse_value4(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_value4(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end) {
   uint8_t c;
   uint32_t cur_value;
@@ -1225,9 +1161,9 @@
   *p->parsing.value = cur_value + add_value;
 
   if ((*cur) & 0x80) {
-    return parse_value5up(exec_ctx, p, cur + 1, end);
+    return parse_value5up(p, cur + 1, end);
   } else {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 
 error:
@@ -1237,14 +1173,13 @@
                *p->parsing.value, *cur);
   grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
   gpr_free(msg);
-  return parse_error(exec_ctx, p, cur, end, err);
+  return parse_error(p, cur, end, err);
 }
 
 /* parse any trailing bytes in a varint: it's possible to append an arbitrary
    number of 0x80's and not affect the value - a zero will terminate - and
    anything else will overflow */
-static grpc_error* parse_value5up(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_value5up(grpc_chttp2_hpack_parser* p,
                                   const uint8_t* cur, const uint8_t* end) {
   while (cur != end && *cur == 0x80) {
     ++cur;
@@ -1256,7 +1191,7 @@
   }
 
   if (*cur == 0) {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 
   char* msg;
@@ -1266,12 +1201,11 @@
                *p->parsing.value, *cur);
   grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
   gpr_free(msg);
-  return parse_error(exec_ctx, p, cur, end, err);
+  return parse_error(p, cur, end, err);
 }
 
 /* parse a string prefix */
-static grpc_error* parse_string_prefix(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_string_prefix(grpc_chttp2_hpack_parser* p,
                                        const uint8_t* cur, const uint8_t* end) {
   if (cur == end) {
     p->state = parse_string_prefix;
@@ -1282,9 +1216,9 @@
   p->huff = (*cur) >> 7;
   if (p->strlen == 0x7f) {
     p->parsing.value = &p->strlen;
-    return parse_value0(exec_ctx, p, cur + 1, end);
+    return parse_value0(p, cur + 1, end);
   } else {
-    return parse_next(exec_ctx, p, cur + 1, end);
+    return parse_next(p, cur + 1, end);
   }
 }
 
@@ -1303,8 +1237,7 @@
   str->data.copied.length += (uint32_t)length;
 }
 
-static grpc_error* append_string(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_hpack_parser* p,
+static grpc_error* append_string(grpc_chttp2_hpack_parser* p,
                                  const uint8_t* cur, const uint8_t* end) {
   grpc_chttp2_hpack_parser_string* str = p->parsing.str;
   uint32_t bits;
@@ -1322,11 +1255,11 @@
         /* 'true-binary' case */
         ++cur;
         p->binary = NOT_BINARY;
-        GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx);
+        GRPC_STATS_INC_HPACK_RECV_BINARY();
         append_bytes(str, cur, (size_t)(end - cur));
         return GRPC_ERROR_NONE;
       }
-      GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx);
+      GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64();
     /* fallthrough */
     b64_byte0:
     case B64_BYTE0:
@@ -1338,7 +1271,7 @@
       ++cur;
       if (bits == 255)
         return parse_error(
-            exec_ctx, p, cur, end,
+            p, cur, end,
             GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
       else if (bits == 64)
         goto b64_byte0;
@@ -1354,7 +1287,7 @@
       ++cur;
       if (bits == 255)
         return parse_error(
-            exec_ctx, p, cur, end,
+            p, cur, end,
             GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
       else if (bits == 64)
         goto b64_byte1;
@@ -1370,7 +1303,7 @@
       ++cur;
       if (bits == 255)
         return parse_error(
-            exec_ctx, p, cur, end,
+            p, cur, end,
             GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
       else if (bits == 64)
         goto b64_byte2;
@@ -1386,7 +1319,7 @@
       ++cur;
       if (bits == 255)
         return parse_error(
-            exec_ctx, p, cur, end,
+            p, cur, end,
             GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal base64 character"));
       else if (bits == 64)
         goto b64_byte3;
@@ -1399,12 +1332,11 @@
       goto b64_byte0;
   }
   GPR_UNREACHABLE_CODE(return parse_error(
-      exec_ctx, p, cur, end,
+      p, cur, end,
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Should never reach here")));
 }
 
-static grpc_error* finish_str(grpc_exec_ctx* exec_ctx,
-                              grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* finish_str(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                               const uint8_t* end) {
   uint8_t decoded[2];
   uint32_t bits;
@@ -1417,7 +1349,7 @@
     case B64_BYTE0:
       break;
     case B64_BYTE1:
-      return parse_error(exec_ctx, p, cur, end,
+      return parse_error(p, cur, end,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                              "illegal base64 encoding")); /* illegal encoding */
     case B64_BYTE2:
@@ -1428,7 +1360,7 @@
                      bits & 0xffff);
         grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
         gpr_free(msg);
-        return parse_error(exec_ctx, p, cur, end, err);
+        return parse_error(p, cur, end, err);
       }
       decoded[0] = (uint8_t)(bits >> 16);
       append_bytes(str, decoded, 1);
@@ -1441,7 +1373,7 @@
                      bits & 0xff);
         grpc_error* err = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
         gpr_free(msg);
-        return parse_error(exec_ctx, p, cur, end, err);
+        return parse_error(p, cur, end, err);
       }
       decoded[0] = (uint8_t)(bits >> 16);
       decoded[1] = (uint8_t)(bits >> 8);
@@ -1452,14 +1384,13 @@
 }
 
 /* decode a nibble from a huffman encoded stream */
-static grpc_error* huff_nibble(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hpack_parser* p, uint8_t nibble) {
+static grpc_error* huff_nibble(grpc_chttp2_hpack_parser* p, uint8_t nibble) {
   int16_t emit = emit_sub_tbl[16 * emit_tbl[p->huff_state] + nibble];
   int16_t next = next_sub_tbl[16 * next_tbl[p->huff_state] + nibble];
   if (emit != -1) {
     if (emit >= 0 && emit < 256) {
       uint8_t c = (uint8_t)emit;
-      grpc_error* err = append_string(exec_ctx, p, &c, (&c) + 1);
+      grpc_error* err = append_string(p, &c, (&c) + 1);
       if (err != GRPC_ERROR_NONE) return err;
     } else {
       assert(emit == 256);
@@ -1470,45 +1401,42 @@
 }
 
 /* decode full bytes from a huffman encoded stream */
-static grpc_error* add_huff_bytes(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_hpack_parser* p,
+static grpc_error* add_huff_bytes(grpc_chttp2_hpack_parser* p,
                                   const uint8_t* cur, const uint8_t* end) {
   for (; cur != end; ++cur) {
-    grpc_error* err = huff_nibble(exec_ctx, p, *cur >> 4);
-    if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-    err = huff_nibble(exec_ctx, p, *cur & 0xf);
-    if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+    grpc_error* err = huff_nibble(p, *cur >> 4);
+    if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+    err = huff_nibble(p, *cur & 0xf);
+    if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
   }
   return GRPC_ERROR_NONE;
 }
 
 /* decode some string bytes based on the current decoding mode
    (huffman or not) */
-static grpc_error* add_str_bytes(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_hpack_parser* p,
+static grpc_error* add_str_bytes(grpc_chttp2_hpack_parser* p,
                                  const uint8_t* cur, const uint8_t* end) {
   if (p->huff) {
-    return add_huff_bytes(exec_ctx, p, cur, end);
+    return add_huff_bytes(p, cur, end);
   } else {
-    return append_string(exec_ctx, p, cur, end);
+    return append_string(p, cur, end);
   }
 }
 
 /* parse a string - tries to do large chunks at a time */
-static grpc_error* parse_string(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_hpack_parser* p, const uint8_t* cur,
+static grpc_error* parse_string(grpc_chttp2_hpack_parser* p, const uint8_t* cur,
                                 const uint8_t* end) {
   size_t remaining = p->strlen - p->strgot;
   size_t given = (size_t)(end - cur);
   if (remaining <= given) {
-    grpc_error* err = add_str_bytes(exec_ctx, p, cur, cur + remaining);
-    if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-    err = finish_str(exec_ctx, p, cur + remaining, end);
-    if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-    return parse_next(exec_ctx, p, cur + remaining, end);
+    grpc_error* err = add_str_bytes(p, cur, cur + remaining);
+    if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+    err = finish_str(p, cur + remaining, end);
+    if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+    return parse_next(p, cur + remaining, end);
   } else {
-    grpc_error* err = add_str_bytes(exec_ctx, p, cur, cur + given);
-    if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
+    grpc_error* err = add_str_bytes(p, cur, cur + given);
+    if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
     GPR_ASSERT(given <= UINT32_MAX - p->strgot);
     p->strgot += (uint32_t)given;
     p->state = parse_string;
@@ -1517,20 +1445,19 @@
 }
 
 /* begin parsing a string - performs setup, calls parse_string */
-static grpc_error* begin_parse_string(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_hpack_parser* p,
+static grpc_error* begin_parse_string(grpc_chttp2_hpack_parser* p,
                                       const uint8_t* cur, const uint8_t* end,
                                       uint8_t binary,
                                       grpc_chttp2_hpack_parser_string* str) {
   if (!p->huff && binary == NOT_BINARY && (end - cur) >= (intptr_t)p->strlen &&
       p->current_slice_refcount != nullptr) {
-    GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
+    GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED();
     str->copied = false;
     str->data.referenced.refcount = p->current_slice_refcount;
     str->data.referenced.data.refcounted.bytes = (uint8_t*)cur;
     str->data.referenced.data.refcounted.length = p->strlen;
     grpc_slice_ref_internal(str->data.referenced);
-    return parse_next(exec_ctx, p, cur + p->strlen, end);
+    return parse_next(p, cur + p->strlen, end);
   }
   p->strgot = 0;
   str->copied = true;
@@ -1541,9 +1468,9 @@
   switch (p->binary) {
     case NOT_BINARY:
       if (p->huff) {
-        GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx);
+        GRPC_STATS_INC_HPACK_RECV_HUFFMAN();
       } else {
-        GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx);
+        GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED();
       }
       break;
     case BINARY_BEGIN:
@@ -1552,14 +1479,13 @@
     default:
       abort();
   }
-  return parse_string(exec_ctx, p, cur, end);
+  return parse_string(p, cur, end);
 }
 
 /* parse the key string */
-static grpc_error* parse_key_string(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_key_string(grpc_chttp2_hpack_parser* p,
                                     const uint8_t* cur, const uint8_t* end) {
-  return begin_parse_string(exec_ctx, p, cur, end, NOT_BINARY, &p->key);
+  return begin_parse_string(p, cur, end, NOT_BINARY, &p->key);
 }
 
 /* check if a key represents a binary header or not */
@@ -1586,33 +1512,29 @@
 }
 
 /* parse the value string */
-static grpc_error* parse_value_string(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_hpack_parser* p,
+static grpc_error* parse_value_string(grpc_chttp2_hpack_parser* p,
                                       const uint8_t* cur, const uint8_t* end,
                                       bool is_binary) {
-  return begin_parse_string(exec_ctx, p, cur, end,
-                            is_binary ? BINARY_BEGIN : NOT_BINARY, &p->value);
+  return begin_parse_string(p, cur, end, is_binary ? BINARY_BEGIN : NOT_BINARY,
+                            &p->value);
 }
 
 static grpc_error* parse_value_string_with_indexed_key(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
-    const uint8_t* end) {
+    grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end) {
   bool is_binary = false;
   grpc_error* err = is_binary_indexed_header(p, &is_binary);
-  if (err != GRPC_ERROR_NONE) return parse_error(exec_ctx, p, cur, end, err);
-  return parse_value_string(exec_ctx, p, cur, end, is_binary);
+  if (err != GRPC_ERROR_NONE) return parse_error(p, cur, end, err);
+  return parse_value_string(p, cur, end, is_binary);
 }
 
 static grpc_error* parse_value_string_with_literal_key(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* cur,
-    const uint8_t* end) {
-  return parse_value_string(exec_ctx, p, cur, end, is_binary_literal_header(p));
+    grpc_chttp2_hpack_parser* p, const uint8_t* cur, const uint8_t* end) {
+  return parse_value_string(p, cur, end, is_binary_literal_header(p));
 }
 
 /* PUBLIC INTERFACE */
 
-void grpc_chttp2_hpack_parser_init(grpc_exec_ctx* exec_ctx,
-                                   grpc_chttp2_hpack_parser* p) {
+void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser* p) {
   p->on_header = nullptr;
   p->on_header_user_data = nullptr;
   p->state = parse_begin;
@@ -1626,7 +1548,7 @@
   p->value.data.copied.length = 0;
   p->dynamic_table_update_allowed = 2;
   p->last_error = GRPC_ERROR_NONE;
-  grpc_chttp2_hptbl_init(exec_ctx, &p->table);
+  grpc_chttp2_hptbl_init(&p->table);
 }
 
 void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p) {
@@ -1634,18 +1556,16 @@
   p->state = parse_stream_dep0;
 }
 
-void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_hpack_parser* p) {
-  grpc_chttp2_hptbl_destroy(exec_ctx, &p->table);
+void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p) {
+  grpc_chttp2_hptbl_destroy(&p->table);
   GRPC_ERROR_UNREF(p->last_error);
-  grpc_slice_unref_internal(exec_ctx, p->key.data.referenced);
-  grpc_slice_unref_internal(exec_ctx, p->value.data.referenced);
+  grpc_slice_unref_internal(p->key.data.referenced);
+  grpc_slice_unref_internal(p->value.data.referenced);
   gpr_free(p->key.data.copied.str);
   gpr_free(p->value.data.copied.str);
 }
 
-grpc_error* grpc_chttp2_hpack_parser_parse(grpc_exec_ctx* exec_ctx,
-                                           grpc_chttp2_hpack_parser* p,
+grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
                                            grpc_slice slice) {
 /* max number of bytes to parse at a time... limits call stack depth on
  * compilers without TCO */
@@ -1656,37 +1576,33 @@
   grpc_error* error = GRPC_ERROR_NONE;
   while (start != end && error == GRPC_ERROR_NONE) {
     uint8_t* target = start + GPR_MIN(MAX_PARSE_LENGTH, end - start);
-    error = p->state(exec_ctx, p, start, target);
+    error = p->state(p, start, target);
     start = target;
   }
   p->current_slice_refcount = nullptr;
   return error;
 }
 
-typedef void (*maybe_complete_func_type)(grpc_exec_ctx* exec_ctx,
-                                         grpc_chttp2_transport* t,
+typedef void (*maybe_complete_func_type)(grpc_chttp2_transport* t,
                                          grpc_chttp2_stream* s);
 static const maybe_complete_func_type maybe_complete_funcs[] = {
     grpc_chttp2_maybe_complete_recv_initial_metadata,
     grpc_chttp2_maybe_complete_recv_trailing_metadata};
 
-static void force_client_rst_stream(grpc_exec_ctx* exec_ctx, void* sp,
-                                    grpc_error* error) {
+static void force_client_rst_stream(void* sp, grpc_error* error) {
   grpc_chttp2_stream* s = (grpc_chttp2_stream*)sp;
   grpc_chttp2_transport* t = s->t;
   if (!s->write_closed) {
     grpc_slice_buffer_add(
         &t->qbuf, grpc_chttp2_rst_stream_create(s->id, GRPC_HTTP2_NO_ERROR,
                                                 &s->stats.outgoing));
-    grpc_chttp2_initiate_write(exec_ctx, t,
-                               GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
-    grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, true, GRPC_ERROR_NONE);
+    grpc_chttp2_initiate_write(t, GRPC_CHTTP2_INITIATE_WRITE_FORCE_RST_STREAM);
+    grpc_chttp2_mark_stream_closed(t, s, true, true, GRPC_ERROR_NONE);
   }
-  GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "final_rst");
+  GRPC_CHTTP2_STREAM_UNREF(s, "final_rst");
 }
 
-static void parse_stream_compression_md(grpc_exec_ctx* exec_ctx,
-                                        grpc_chttp2_transport* t,
+static void parse_stream_compression_md(grpc_chttp2_transport* t,
                                         grpc_chttp2_stream* s,
                                         grpc_metadata_batch* initial_metadata) {
   if (initial_metadata->idx.named.content_encoding == nullptr ||
@@ -1698,8 +1614,7 @@
   }
 }
 
-grpc_error* grpc_chttp2_header_parser_parse(grpc_exec_ctx* exec_ctx,
-                                            void* hpack_parser,
+grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
                                             grpc_chttp2_transport* t,
                                             grpc_chttp2_stream* s,
                                             grpc_slice slice, int is_last) {
@@ -1708,7 +1623,7 @@
   if (s != nullptr) {
     s->stats.incoming.header_bytes += GRPC_SLICE_LENGTH(slice);
   }
-  grpc_error* error = grpc_chttp2_hpack_parser_parse(exec_ctx, parser, slice);
+  grpc_error* error = grpc_chttp2_hpack_parser_parse(parser, slice);
   if (error != GRPC_ERROR_NONE) {
     GPR_TIMER_END("grpc_chttp2_hpack_parser_parse", 0);
     return error;
@@ -1731,12 +1646,11 @@
         /* Process stream compression md element if it exists */
         if (s->header_frames_received ==
             0) { /* Only acts on initial metadata */
-          parse_stream_compression_md(exec_ctx, t, s,
-                                      &s->metadata_buffer[0].batch);
+          parse_stream_compression_md(t, s, &s->metadata_buffer[0].batch);
         }
         s->published_metadata[s->header_frames_received] =
             GRPC_METADATA_PUBLISHED_FROM_WIRE;
-        maybe_complete_funcs[s->header_frames_received](exec_ctx, t, s);
+        maybe_complete_funcs[s->header_frames_received](t, s);
         s->header_frames_received++;
       }
       if (parser->is_eof) {
@@ -1747,13 +1661,11 @@
              and can avoid the extra write */
           GRPC_CHTTP2_STREAM_REF(s, "final_rst");
           GRPC_CLOSURE_SCHED(
-              exec_ctx,
               GRPC_CLOSURE_CREATE(force_client_rst_stream, s,
                                   grpc_combiner_finally_scheduler(t->combiner)),
               GRPC_ERROR_NONE);
         }
-        grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
-                                       GRPC_ERROR_NONE);
+        grpc_chttp2_mark_stream_closed(t, s, true, false, GRPC_ERROR_NONE);
       }
     }
     parser->on_header = nullptr;
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.h b/src/core/ext/transport/chttp2/transport/hpack_parser.h
index 838c482..060bc5c 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.h
@@ -27,15 +27,10 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/transport/metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_chttp2_hpack_parser grpc_chttp2_hpack_parser;
 
 typedef grpc_error* (*grpc_chttp2_hpack_parser_state)(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_hpack_parser* p, const uint8_t* beg,
-    const uint8_t* end);
+    grpc_chttp2_hpack_parser* p, const uint8_t* beg, const uint8_t* end);
 
 typedef struct {
   bool copied;
@@ -51,7 +46,7 @@
 
 struct grpc_chttp2_hpack_parser {
   /* user specified callback for each header output */
-  void (*on_header)(grpc_exec_ctx* exec_ctx, void* user_data, grpc_mdelem md);
+  void (*on_header)(void* user_data, grpc_mdelem md);
   void* on_header_user_data;
 
   grpc_error* last_error;
@@ -96,27 +91,19 @@
   grpc_chttp2_hptbl table;
 };
 
-void grpc_chttp2_hpack_parser_init(grpc_exec_ctx* exec_ctx,
-                                   grpc_chttp2_hpack_parser* p);
-void grpc_chttp2_hpack_parser_destroy(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_hpack_parser* p);
+void grpc_chttp2_hpack_parser_init(grpc_chttp2_hpack_parser* p);
+void grpc_chttp2_hpack_parser_destroy(grpc_chttp2_hpack_parser* p);
 
 void grpc_chttp2_hpack_parser_set_has_priority(grpc_chttp2_hpack_parser* p);
 
-grpc_error* grpc_chttp2_hpack_parser_parse(grpc_exec_ctx* exec_ctx,
-                                           grpc_chttp2_hpack_parser* p,
+grpc_error* grpc_chttp2_hpack_parser_parse(grpc_chttp2_hpack_parser* p,
                                            grpc_slice slice);
 
 /* wraps grpc_chttp2_hpack_parser_parse to provide a frame level parser for
    the transport */
-grpc_error* grpc_chttp2_header_parser_parse(grpc_exec_ctx* exec_ctx,
-                                            void* hpack_parser,
+grpc_error* grpc_chttp2_header_parser_parse(void* hpack_parser,
                                             grpc_chttp2_transport* t,
                                             grpc_chttp2_stream* s,
                                             grpc_slice slice, int is_last);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_PARSER_H */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.cc b/src/core/ext/transport/chttp2/transport/hpack_table.cc
index 75b83b8..c325465 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.cc
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.cc
@@ -165,7 +165,7 @@
          GRPC_CHTTP2_HPACK_ENTRY_OVERHEAD;
 }
 
-void grpc_chttp2_hptbl_init(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl) {
+void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl* tbl) {
   size_t i;
 
   memset(tbl, 0, sizeof(*tbl));
@@ -177,22 +177,19 @@
   memset(tbl->ents, 0, sizeof(*tbl->ents) * tbl->cap_entries);
   for (i = 1; i <= GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
     tbl->static_ents[i - 1] = grpc_mdelem_from_slices(
-        exec_ctx,
         grpc_slice_intern(grpc_slice_from_static_string(static_table[i].key)),
         grpc_slice_intern(
             grpc_slice_from_static_string(static_table[i].value)));
   }
 }
 
-void grpc_chttp2_hptbl_destroy(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_hptbl* tbl) {
+void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl* tbl) {
   size_t i;
   for (i = 0; i < GRPC_CHTTP2_LAST_STATIC_ENTRY; i++) {
-    GRPC_MDELEM_UNREF(exec_ctx, tbl->static_ents[i]);
+    GRPC_MDELEM_UNREF(tbl->static_ents[i]);
   }
   for (i = 0; i < tbl->num_ents; i++) {
-    GRPC_MDELEM_UNREF(exec_ctx,
-                      tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]);
+    GRPC_MDELEM_UNREF(tbl->ents[(tbl->first_ent + i) % tbl->cap_entries]);
   }
   gpr_free(tbl->ents);
 }
@@ -215,7 +212,7 @@
 }
 
 /* Evict one element from the table */
-static void evict1(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl) {
+static void evict1(grpc_chttp2_hptbl* tbl) {
   grpc_mdelem first_ent = tbl->ents[tbl->first_ent];
   size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(first_ent)) +
                       GRPC_SLICE_LENGTH(GRPC_MDVALUE(first_ent)) +
@@ -224,7 +221,7 @@
   tbl->mem_used -= (uint32_t)elem_bytes;
   tbl->first_ent = ((tbl->first_ent + 1) % tbl->cap_entries);
   tbl->num_ents--;
-  GRPC_MDELEM_UNREF(exec_ctx, first_ent);
+  GRPC_MDELEM_UNREF(first_ent);
 }
 
 static void rebuild_ents(grpc_chttp2_hptbl* tbl, uint32_t new_cap) {
@@ -240,8 +237,7 @@
   tbl->first_ent = 0;
 }
 
-void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hptbl* tbl,
+void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl,
                                      uint32_t max_bytes) {
   if (tbl->max_bytes == max_bytes) {
     return;
@@ -250,13 +246,12 @@
     gpr_log(GPR_DEBUG, "Update hpack parser max size to %d", max_bytes);
   }
   while (tbl->mem_used > max_bytes) {
-    evict1(exec_ctx, tbl);
+    evict1(tbl);
   }
   tbl->max_bytes = max_bytes;
 }
 
-grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx* exec_ctx,
-                                                     grpc_chttp2_hptbl* tbl,
+grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl,
                                                      uint32_t bytes) {
   if (tbl->current_table_bytes == bytes) {
     return GRPC_ERROR_NONE;
@@ -274,7 +269,7 @@
     gpr_log(GPR_DEBUG, "Update hpack parser table size to %d", bytes);
   }
   while (tbl->mem_used > bytes) {
-    evict1(exec_ctx, tbl);
+    evict1(tbl);
   }
   tbl->current_table_bytes = bytes;
   tbl->max_entries = entries_for_bytes(bytes);
@@ -289,8 +284,7 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_chttp2_hptbl_add(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_hptbl* tbl, grpc_mdelem md) {
+grpc_error* grpc_chttp2_hptbl_add(grpc_chttp2_hptbl* tbl, grpc_mdelem md) {
   /* determine how many bytes of buffer this entry represents */
   size_t elem_bytes = GRPC_SLICE_LENGTH(GRPC_MDKEY(md)) +
                       GRPC_SLICE_LENGTH(GRPC_MDVALUE(md)) +
@@ -320,14 +314,14 @@
      * empty table.
      */
     while (tbl->num_ents) {
-      evict1(exec_ctx, tbl);
+      evict1(tbl);
     }
     return GRPC_ERROR_NONE;
   }
 
   /* evict entries to ensure no overflow */
   while (elem_bytes > (size_t)tbl->current_table_bytes - tbl->mem_used) {
-    evict1(exec_ctx, tbl);
+    evict1(tbl);
   }
 
   /* copy the finalized entry in */
diff --git a/src/core/ext/transport/chttp2/transport/hpack_table.h b/src/core/ext/transport/chttp2/transport/hpack_table.h
index ddc8888..189ad1c 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_table.h
+++ b/src/core/ext/transport/chttp2/transport/hpack_table.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/iomgr/error.h"
 #include "src/core/lib/transport/metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* HPACK header table */
 
 /* last index in the static table */
@@ -73,21 +69,18 @@
 } grpc_chttp2_hptbl;
 
 /* initialize a hpack table */
-void grpc_chttp2_hptbl_init(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl);
-void grpc_chttp2_hptbl_destroy(grpc_exec_ctx* exec_ctx, grpc_chttp2_hptbl* tbl);
-void grpc_chttp2_hptbl_set_max_bytes(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_hptbl* tbl,
+void grpc_chttp2_hptbl_init(grpc_chttp2_hptbl* tbl);
+void grpc_chttp2_hptbl_destroy(grpc_chttp2_hptbl* tbl);
+void grpc_chttp2_hptbl_set_max_bytes(grpc_chttp2_hptbl* tbl,
                                      uint32_t max_bytes);
-grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_exec_ctx* exec_ctx,
-                                                     grpc_chttp2_hptbl* tbl,
+grpc_error* grpc_chttp2_hptbl_set_current_table_size(grpc_chttp2_hptbl* tbl,
                                                      uint32_t bytes);
 
 /* lookup a table entry based on its hpack index */
 grpc_mdelem grpc_chttp2_hptbl_lookup(const grpc_chttp2_hptbl* tbl,
                                      uint32_t index);
 /* add a table entry to the index */
-grpc_error* grpc_chttp2_hptbl_add(grpc_exec_ctx* exec_ctx,
-                                  grpc_chttp2_hptbl* tbl,
+grpc_error* grpc_chttp2_hptbl_add(grpc_chttp2_hptbl* tbl,
                                   grpc_mdelem md) GRPC_MUST_USE_RESULT;
 /* Find a key/value pair in the table... returns the index in the table of the
    most similar entry, or 0 if the value was not found */
@@ -98,8 +91,4 @@
 grpc_chttp2_hptbl_find_result grpc_chttp2_hptbl_find(
     const grpc_chttp2_hptbl* tbl, grpc_mdelem md);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HPACK_TABLE_H */
diff --git a/src/core/ext/transport/chttp2/transport/http2_settings.h b/src/core/ext/transport/chttp2/transport/http2_settings.h
index 86069b4..fd15b69 100644
--- a/src/core/ext/transport/chttp2/transport/http2_settings.h
+++ b/src/core/ext/transport/chttp2/transport/http2_settings.h
@@ -36,9 +36,6 @@
 
 #define GRPC_CHTTP2_NUM_SETTINGS 7
 
-#ifdef __cplusplus
-extern "C" {
-#endif
 extern const uint16_t grpc_setting_id_to_wire_id[];
 
 bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id* out);
@@ -60,8 +57,4 @@
 extern const grpc_chttp2_setting_parameters
     grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */
diff --git a/src/core/ext/transport/chttp2/transport/huffsyms.h b/src/core/ext/transport/chttp2/transport/huffsyms.h
index 4002706..2e2a5da 100644
--- a/src/core/ext/transport/chttp2/transport/huffsyms.h
+++ b/src/core/ext/transport/chttp2/transport/huffsyms.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HUFFSYMS_H
 #define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HUFFSYMS_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* HPACK static huffman table */
 
 #define GRPC_CHTTP2_NUM_HUFFSYMS 257
@@ -34,8 +30,4 @@
 
 extern const grpc_chttp2_huffsym grpc_chttp2_huffsyms[GRPC_CHTTP2_NUM_HUFFSYMS];
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HUFFSYMS_H */
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
index 4461f8c..ef0c9ed 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.cc
@@ -33,33 +33,31 @@
 }
 
 void grpc_chttp2_incoming_metadata_buffer_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer) {
-  grpc_metadata_batch_destroy(exec_ctx, &buffer->batch);
+    grpc_chttp2_incoming_metadata_buffer* buffer) {
+  grpc_metadata_batch_destroy(&buffer->batch);
 }
 
 grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
-    grpc_mdelem elem) {
+    grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) {
   buffer->size += GRPC_MDELEM_LENGTH(elem);
   return grpc_metadata_batch_add_tail(
-      exec_ctx, &buffer->batch,
+      &buffer->batch,
       (grpc_linked_mdelem*)gpr_arena_alloc(buffer->arena,
                                            sizeof(grpc_linked_mdelem)),
       elem);
 }
 
 grpc_error* grpc_chttp2_incoming_metadata_buffer_replace_or_add(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
-    grpc_mdelem elem) {
+    grpc_chttp2_incoming_metadata_buffer* buffer, grpc_mdelem elem) {
   for (grpc_linked_mdelem* l = buffer->batch.list.head; l != nullptr;
        l = l->next) {
     if (grpc_slice_eq(GRPC_MDKEY(l->md), GRPC_MDKEY(elem))) {
-      GRPC_MDELEM_UNREF(exec_ctx, l->md);
+      GRPC_MDELEM_UNREF(l->md);
       l->md = elem;
       return GRPC_ERROR_NONE;
     }
   }
-  return grpc_chttp2_incoming_metadata_buffer_add(exec_ctx, buffer, elem);
+  return grpc_chttp2_incoming_metadata_buffer_add(buffer, elem);
 }
 
 void grpc_chttp2_incoming_metadata_buffer_set_deadline(
@@ -68,8 +66,7 @@
 }
 
 void grpc_chttp2_incoming_metadata_buffer_publish(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
-    grpc_metadata_batch* batch) {
+    grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch) {
   *batch = buffer->batch;
   grpc_metadata_batch_init(&buffer->batch);
 }
diff --git a/src/core/ext/transport/chttp2/transport/incoming_metadata.h b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
index 7ccb4a0..b84cd48 100644
--- a/src/core/ext/transport/chttp2/transport/incoming_metadata.h
+++ b/src/core/ext/transport/chttp2/transport/incoming_metadata.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   gpr_arena* arena;
   grpc_metadata_batch batch;
@@ -35,22 +31,17 @@
 void grpc_chttp2_incoming_metadata_buffer_init(
     grpc_chttp2_incoming_metadata_buffer* buffer, gpr_arena* arena);
 void grpc_chttp2_incoming_metadata_buffer_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer);
+    grpc_chttp2_incoming_metadata_buffer* buffer);
 void grpc_chttp2_incoming_metadata_buffer_publish(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
-    grpc_metadata_batch* batch);
+    grpc_chttp2_incoming_metadata_buffer* buffer, grpc_metadata_batch* batch);
 
 grpc_error* grpc_chttp2_incoming_metadata_buffer_add(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
+    grpc_chttp2_incoming_metadata_buffer* buffer,
     grpc_mdelem elem) GRPC_MUST_USE_RESULT;
 grpc_error* grpc_chttp2_incoming_metadata_buffer_replace_or_add(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_metadata_buffer* buffer,
+    grpc_chttp2_incoming_metadata_buffer* buffer,
     grpc_mdelem elem) GRPC_MUST_USE_RESULT;
 void grpc_chttp2_incoming_metadata_buffer_set_deadline(
     grpc_chttp2_incoming_metadata_buffer* buffer, grpc_millis deadline);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INCOMING_METADATA_H */
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index e939445..932f5ba 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -42,10 +42,6 @@
 #include "src/core/lib/transport/connectivity_state.h"
 #include "src/core/lib/transport/transport_impl.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* streams are kept in various linked lists depending on what things need to
    happen to them... this enum labels each list */
 typedef enum {
@@ -286,8 +282,8 @@
 
   struct {
     /* accept stream callback */
-    void (*accept_stream)(grpc_exec_ctx* exec_ctx, void* user_data,
-                          grpc_transport* transport, const void* server_data);
+    void (*accept_stream)(void* user_data, grpc_transport* transport,
+                          const void* server_data);
     void* accept_stream_user_data;
 
     /** connectivity tracking */
@@ -375,9 +371,8 @@
   /* active parser */
   void* parser_data;
   grpc_chttp2_stream* incoming_stream;
-  grpc_error* (*parser)(grpc_exec_ctx* exec_ctx, void* parser_user_data,
-                        grpc_chttp2_transport* t, grpc_chttp2_stream* s,
-                        grpc_slice slice, int is_last);
+  grpc_error* (*parser)(void* parser_user_data, grpc_chttp2_transport* t,
+                        grpc_chttp2_stream* s, grpc_slice slice, int is_last);
 
   grpc_chttp2_write_cb* write_cb_pool;
 
@@ -575,8 +570,7 @@
 
     The actual call chain is documented in the implementation of this function.
     */
-void grpc_chttp2_initiate_write(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_transport* t,
+void grpc_chttp2_initiate_write(grpc_chttp2_transport* t,
                                 grpc_chttp2_initiate_write_reason reason);
 
 typedef struct {
@@ -589,14 +583,12 @@
 } grpc_chttp2_begin_write_result;
 
 grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t);
-void grpc_chttp2_end_write(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                           grpc_error* error);
+    grpc_chttp2_transport* t);
+void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error);
 
 /** Process one slice of incoming data; return 1 if the connection is still
     viable after reading, or 0 if the connection should be torn down */
-grpc_error* grpc_chttp2_perform_read(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
                                      grpc_slice slice);
 
 bool grpc_chttp2_list_add_writable_stream(grpc_chttp2_transport* t,
@@ -644,27 +636,23 @@
 
 // Takes in a flow control action and performs all the needed operations.
 void grpc_chttp2_act_on_flowctl_action(
-    grpc_exec_ctx* exec_ctx, const grpc_core::chttp2::FlowControlAction& action,
+    const grpc_core::chttp2::FlowControlAction& action,
     grpc_chttp2_transport* t, grpc_chttp2_stream* s);
 
 /********* End of Flow Control ***************/
 
 grpc_chttp2_stream* grpc_chttp2_parsing_lookup_stream(grpc_chttp2_transport* t,
                                                       uint32_t id);
-grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_exec_ctx* exec_ctx,
-                                                      grpc_chttp2_transport* t,
+grpc_chttp2_stream* grpc_chttp2_parsing_accept_stream(grpc_chttp2_transport* t,
                                                       uint32_t id);
 
-void grpc_chttp2_add_incoming_goaway(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+void grpc_chttp2_add_incoming_goaway(grpc_chttp2_transport* t,
                                      uint32_t goaway_error,
                                      grpc_slice goaway_text);
 
-void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx* exec_ctx,
-                                            grpc_chttp2_transport* t);
+void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t);
 
-void grpc_chttp2_complete_closure_step(grpc_exec_ctx* exec_ctx,
-                                       grpc_chttp2_transport* t,
+void grpc_chttp2_complete_closure_step(grpc_chttp2_transport* t,
                                        grpc_chttp2_stream* s,
                                        grpc_closure** pclosure,
                                        grpc_error* error, const char* desc);
@@ -685,94 +673,80 @@
   else                               \
     stmt
 
-void grpc_chttp2_fake_status(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
+void grpc_chttp2_fake_status(grpc_chttp2_transport* t,
                              grpc_chttp2_stream* stream, grpc_error* error);
-void grpc_chttp2_mark_stream_closed(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_transport* t,
+void grpc_chttp2_mark_stream_closed(grpc_chttp2_transport* t,
                                     grpc_chttp2_stream* s, int close_reads,
                                     int close_writes, grpc_error* error);
-void grpc_chttp2_start_writing(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_transport* t);
+void grpc_chttp2_start_writing(grpc_chttp2_transport* t);
 
 #ifndef NDEBUG
 #define GRPC_CHTTP2_STREAM_REF(stream, reason) \
   grpc_chttp2_stream_ref(stream, reason)
-#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
-  grpc_chttp2_stream_unref(exec_ctx, stream, reason)
+#define GRPC_CHTTP2_STREAM_UNREF(stream, reason) \
+  grpc_chttp2_stream_unref(stream, reason)
 void grpc_chttp2_stream_ref(grpc_chttp2_stream* s, const char* reason);
-void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s,
-                              const char* reason);
+void grpc_chttp2_stream_unref(grpc_chttp2_stream* s, const char* reason);
 #else
 #define GRPC_CHTTP2_STREAM_REF(stream, reason) grpc_chttp2_stream_ref(stream)
-#define GRPC_CHTTP2_STREAM_UNREF(exec_ctx, stream, reason) \
-  grpc_chttp2_stream_unref(exec_ctx, stream)
+#define GRPC_CHTTP2_STREAM_UNREF(stream, reason) \
+  grpc_chttp2_stream_unref(stream)
 void grpc_chttp2_stream_ref(grpc_chttp2_stream* s);
-void grpc_chttp2_stream_unref(grpc_exec_ctx* exec_ctx, grpc_chttp2_stream* s);
+void grpc_chttp2_stream_unref(grpc_chttp2_stream* s);
 #endif
 
 #ifndef NDEBUG
 #define GRPC_CHTTP2_REF_TRANSPORT(t, r) \
   grpc_chttp2_ref_transport(t, r, __FILE__, __LINE__)
-#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) \
-  grpc_chttp2_unref_transport(cl, t, r, __FILE__, __LINE__)
-void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t, const char* reason,
+#define GRPC_CHTTP2_UNREF_TRANSPORT(t, r) \
+  grpc_chttp2_unref_transport(t, r, __FILE__, __LINE__)
+void grpc_chttp2_unref_transport(grpc_chttp2_transport* t, const char* reason,
                                  const char* file, int line);
 void grpc_chttp2_ref_transport(grpc_chttp2_transport* t, const char* reason,
                                const char* file, int line);
 #else
 #define GRPC_CHTTP2_REF_TRANSPORT(t, r) grpc_chttp2_ref_transport(t)
-#define GRPC_CHTTP2_UNREF_TRANSPORT(cl, t, r) grpc_chttp2_unref_transport(cl, t)
-void grpc_chttp2_unref_transport(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t);
+#define GRPC_CHTTP2_UNREF_TRANSPORT(t, r) grpc_chttp2_unref_transport(t)
+void grpc_chttp2_unref_transport(grpc_chttp2_transport* t);
 void grpc_chttp2_ref_transport(grpc_chttp2_transport* t);
 #endif
 
 grpc_chttp2_incoming_byte_stream* grpc_chttp2_incoming_byte_stream_create(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t, grpc_chttp2_stream* s,
-    uint32_t frame_size, uint32_t flags);
+    grpc_chttp2_transport* t, grpc_chttp2_stream* s, uint32_t frame_size,
+    uint32_t flags);
 grpc_error* grpc_chttp2_incoming_byte_stream_push(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_slice slice, grpc_slice* slice_out);
+    grpc_chttp2_incoming_byte_stream* bs, grpc_slice slice,
+    grpc_slice* slice_out);
 grpc_error* grpc_chttp2_incoming_byte_stream_finished(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_error* error, bool reset_on_error);
+    grpc_chttp2_incoming_byte_stream* bs, grpc_error* error,
+    bool reset_on_error);
 void grpc_chttp2_incoming_byte_stream_notify(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_incoming_byte_stream* bs,
-    grpc_error* error);
+    grpc_chttp2_incoming_byte_stream* bs, grpc_error* error);
 
-void grpc_chttp2_ack_ping(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                          uint64_t id);
+void grpc_chttp2_ack_ping(grpc_chttp2_transport* t, uint64_t id);
 
 /** Add a new ping strike to ping_recv_state.ping_strikes. If
     ping_recv_state.ping_strikes > ping_policy.max_ping_strikes, it sends GOAWAY
     with error code ENHANCE_YOUR_CALM and additional debug data resembling
     "too_many_pings" followed by immediately closing the connection. */
-void grpc_chttp2_add_ping_strike(grpc_exec_ctx* exec_ctx,
-                                 grpc_chttp2_transport* t);
+void grpc_chttp2_add_ping_strike(grpc_chttp2_transport* t);
 
 /** add a ref to the stream and add it to the writable list;
     ref will be dropped in writing.c */
-void grpc_chttp2_mark_stream_writable(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_transport* t,
+void grpc_chttp2_mark_stream_writable(grpc_chttp2_transport* t,
                                       grpc_chttp2_stream* s);
 
-void grpc_chttp2_cancel_stream(grpc_exec_ctx* exec_ctx,
-                               grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+void grpc_chttp2_cancel_stream(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
                                grpc_error* due_to_error);
 
-void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_exec_ctx* exec_ctx,
-                                                      grpc_chttp2_transport* t,
+void grpc_chttp2_maybe_complete_recv_initial_metadata(grpc_chttp2_transport* t,
                                                       grpc_chttp2_stream* s);
-void grpc_chttp2_maybe_complete_recv_message(grpc_exec_ctx* exec_ctx,
-                                             grpc_chttp2_transport* t,
+void grpc_chttp2_maybe_complete_recv_message(grpc_chttp2_transport* t,
                                              grpc_chttp2_stream* s);
-void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_exec_ctx* exec_ctx,
-                                                       grpc_chttp2_transport* t,
+void grpc_chttp2_maybe_complete_recv_trailing_metadata(grpc_chttp2_transport* t,
                                                        grpc_chttp2_stream* s);
 
-void grpc_chttp2_fail_pending_writes(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+void grpc_chttp2_fail_pending_writes(grpc_chttp2_transport* t,
                                      grpc_chttp2_stream* s, grpc_error* error);
 
 /** Set the default keepalive configurations, must only be called at
@@ -780,8 +754,4 @@
 void grpc_chttp2_config_default_keepalive_args(grpc_channel_args* args,
                                                bool is_client);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_INTERNAL_H */
diff --git a/src/core/ext/transport/chttp2/transport/parsing.cc b/src/core/ext/transport/chttp2/transport/parsing.cc
index 46ec3fb..a56f89c 100644
--- a/src/core/ext/transport/chttp2/transport/parsing.cc
+++ b/src/core/ext/transport/chttp2/transport/parsing.cc
@@ -31,33 +31,22 @@
 #include "src/core/lib/transport/status_conversion.h"
 #include "src/core/lib/transport/timeout_encoding.h"
 
-static grpc_error* init_frame_parser(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t);
-static grpc_error* init_header_frame_parser(grpc_exec_ctx* exec_ctx,
-                                            grpc_chttp2_transport* t,
+static grpc_error* init_frame_parser(grpc_chttp2_transport* t);
+static grpc_error* init_header_frame_parser(grpc_chttp2_transport* t,
                                             int is_continuation);
-static grpc_error* init_data_frame_parser(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t);
-static grpc_error* init_rst_stream_parser(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t);
-static grpc_error* init_settings_frame_parser(grpc_exec_ctx* exec_ctx,
-                                              grpc_chttp2_transport* t);
-static grpc_error* init_window_update_frame_parser(grpc_exec_ctx* exec_ctx,
-                                                   grpc_chttp2_transport* t);
-static grpc_error* init_ping_parser(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_transport* t);
-static grpc_error* init_goaway_parser(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_transport* t);
-static grpc_error* init_skip_frame_parser(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t,
+static grpc_error* init_data_frame_parser(grpc_chttp2_transport* t);
+static grpc_error* init_rst_stream_parser(grpc_chttp2_transport* t);
+static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t);
+static grpc_error* init_window_update_frame_parser(grpc_chttp2_transport* t);
+static grpc_error* init_ping_parser(grpc_chttp2_transport* t);
+static grpc_error* init_goaway_parser(grpc_chttp2_transport* t);
+static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t,
                                           int is_header);
 
-static grpc_error* parse_frame_slice(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t, grpc_slice slice,
+static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice,
                                      int is_last);
 
-grpc_error* grpc_chttp2_perform_read(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t,
+grpc_error* grpc_chttp2_perform_read(grpc_chttp2_transport* t,
                                      grpc_slice slice) {
   uint8_t* beg = GRPC_SLICE_START_PTR(slice);
   uint8_t* end = GRPC_SLICE_END_PTR(slice);
@@ -182,12 +171,12 @@
       GPR_ASSERT(cur < end);
       t->incoming_stream_id |= ((uint32_t)*cur);
       t->deframe_state = GRPC_DTS_FRAME;
-      err = init_frame_parser(exec_ctx, t);
+      err = init_frame_parser(t);
       if (err != GRPC_ERROR_NONE) {
         return err;
       }
       if (t->incoming_frame_size == 0) {
-        err = parse_frame_slice(exec_ctx, t, grpc_empty_slice(), 1);
+        err = parse_frame_slice(t, grpc_empty_slice(), 1);
         if (err != GRPC_ERROR_NONE) {
           return err;
         }
@@ -217,7 +206,7 @@
       GPR_ASSERT(cur < end);
       if ((uint32_t)(end - cur) == t->incoming_frame_size) {
         err =
-            parse_frame_slice(exec_ctx, t,
+            parse_frame_slice(t,
                               grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
                                                     (size_t)(end - beg)),
                               1);
@@ -230,7 +219,7 @@
       } else if ((uint32_t)(end - cur) > t->incoming_frame_size) {
         size_t cur_offset = (size_t)(cur - beg);
         err = parse_frame_slice(
-            exec_ctx, t,
+            t,
             grpc_slice_sub_no_ref(slice, cur_offset,
                                   cur_offset + t->incoming_frame_size),
             1);
@@ -242,7 +231,7 @@
         goto dts_fh_0; /* loop */
       } else {
         err =
-            parse_frame_slice(exec_ctx, t,
+            parse_frame_slice(t,
                               grpc_slice_sub_no_ref(slice, (size_t)(cur - beg),
                                                     (size_t)(end - beg)),
                               0);
@@ -258,8 +247,7 @@
   GPR_UNREACHABLE_CODE(return nullptr);
 }
 
-static grpc_error* init_frame_parser(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t) {
+static grpc_error* init_frame_parser(grpc_chttp2_transport* t) {
   if (t->is_first_frame &&
       t->incoming_frame_type != GRPC_CHTTP2_FRAME_SETTINGS) {
     char* msg;
@@ -291,46 +279,43 @@
       gpr_free(msg);
       return err;
     }
-    return init_header_frame_parser(exec_ctx, t, 1);
+    return init_header_frame_parser(t, 1);
   }
   switch (t->incoming_frame_type) {
     case GRPC_CHTTP2_FRAME_DATA:
-      return init_data_frame_parser(exec_ctx, t);
+      return init_data_frame_parser(t);
     case GRPC_CHTTP2_FRAME_HEADER:
-      return init_header_frame_parser(exec_ctx, t, 0);
+      return init_header_frame_parser(t, 0);
     case GRPC_CHTTP2_FRAME_CONTINUATION:
       return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
           "Unexpected CONTINUATION frame");
     case GRPC_CHTTP2_FRAME_RST_STREAM:
-      return init_rst_stream_parser(exec_ctx, t);
+      return init_rst_stream_parser(t);
     case GRPC_CHTTP2_FRAME_SETTINGS:
-      return init_settings_frame_parser(exec_ctx, t);
+      return init_settings_frame_parser(t);
     case GRPC_CHTTP2_FRAME_WINDOW_UPDATE:
-      return init_window_update_frame_parser(exec_ctx, t);
+      return init_window_update_frame_parser(t);
     case GRPC_CHTTP2_FRAME_PING:
-      return init_ping_parser(exec_ctx, t);
+      return init_ping_parser(t);
     case GRPC_CHTTP2_FRAME_GOAWAY:
-      return init_goaway_parser(exec_ctx, t);
+      return init_goaway_parser(t);
     default:
       if (grpc_http_trace.enabled()) {
         gpr_log(GPR_ERROR, "Unknown frame type %02x", t->incoming_frame_type);
       }
-      return init_skip_frame_parser(exec_ctx, t, 0);
+      return init_skip_frame_parser(t, 0);
   }
 }
 
-static grpc_error* skip_parser(grpc_exec_ctx* exec_ctx, void* parser,
-                               grpc_chttp2_transport* t, grpc_chttp2_stream* s,
-                               grpc_slice slice, int is_last) {
+static grpc_error* skip_parser(void* parser, grpc_chttp2_transport* t,
+                               grpc_chttp2_stream* s, grpc_slice slice,
+                               int is_last) {
   return GRPC_ERROR_NONE;
 }
 
-static void skip_header(grpc_exec_ctx* exec_ctx, void* tp, grpc_mdelem md) {
-  GRPC_MDELEM_UNREF(exec_ctx, md);
-}
+static void skip_header(void* tp, grpc_mdelem md) { GRPC_MDELEM_UNREF(md); }
 
-static grpc_error* init_skip_frame_parser(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t,
+static grpc_error* init_skip_frame_parser(grpc_chttp2_transport* t,
                                           int is_header) {
   if (is_header) {
     uint8_t is_eoh = t->expect_continuation_stream_id != 0;
@@ -346,14 +331,11 @@
   return GRPC_ERROR_NONE;
 }
 
-void grpc_chttp2_parsing_become_skip_parser(grpc_exec_ctx* exec_ctx,
-                                            grpc_chttp2_transport* t) {
-  init_skip_frame_parser(exec_ctx, t,
-                         t->parser == grpc_chttp2_header_parser_parse);
+void grpc_chttp2_parsing_become_skip_parser(grpc_chttp2_transport* t) {
+  init_skip_frame_parser(t, t->parser == grpc_chttp2_header_parser_parse);
 }
 
-static grpc_error* init_data_frame_parser(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t) {
+static grpc_error* init_data_frame_parser(grpc_chttp2_transport* t) {
   grpc_chttp2_stream* s =
       grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
   grpc_error* err = GRPC_ERROR_NONE;
@@ -365,17 +347,17 @@
     err = s->flow_control->RecvData(t->incoming_frame_size);
     action = s->flow_control->MakeAction();
   }
-  grpc_chttp2_act_on_flowctl_action(exec_ctx, action, t, s);
+  grpc_chttp2_act_on_flowctl_action(action, t, s);
   if (err != GRPC_ERROR_NONE) {
     goto error_handler;
   }
   if (s == nullptr) {
-    return init_skip_frame_parser(exec_ctx, t, 0);
+    return init_skip_frame_parser(t, 0);
   }
   s->received_bytes += t->incoming_frame_size;
   s->stats.incoming.framing_bytes += 9;
   if (err == GRPC_ERROR_NONE && s->read_closed) {
-    return init_skip_frame_parser(exec_ctx, t, 0);
+    return init_skip_frame_parser(t, 0);
   }
   if (err == GRPC_ERROR_NONE) {
     err = grpc_chttp2_data_parser_begin_frame(
@@ -394,13 +376,13 @@
   } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, nullptr)) {
     /* handle stream errors by closing the stream */
     if (s != nullptr) {
-      grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false, err);
+      grpc_chttp2_mark_stream_closed(t, s, true, false, err);
     }
     grpc_slice_buffer_add(
         &t->qbuf, grpc_chttp2_rst_stream_create(t->incoming_stream_id,
                                                 GRPC_HTTP2_PROTOCOL_ERROR,
                                                 &s->stats.outgoing));
-    return init_skip_frame_parser(exec_ctx, t, 0);
+    return init_skip_frame_parser(t, 0);
   } else {
     return err;
   }
@@ -408,8 +390,7 @@
 
 static void free_timeout(void* p) { gpr_free(p); }
 
-static void on_initial_header(grpc_exec_ctx* exec_ctx, void* tp,
-                              grpc_mdelem md) {
+static void on_initial_header(void* tp, grpc_mdelem md) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   grpc_chttp2_stream* s = t->incoming_stream;
 
@@ -455,9 +436,9 @@
     }
     if (timeout != GRPC_MILLIS_INF_FUTURE) {
       grpc_chttp2_incoming_metadata_buffer_set_deadline(
-          &s->metadata_buffer[0], grpc_exec_ctx_now(exec_ctx) + timeout);
+          &s->metadata_buffer[0], grpc_core::ExecCtx::Get()->Now() + timeout);
     }
-    GRPC_MDELEM_UNREF(exec_ctx, md);
+    GRPC_MDELEM_UNREF(md);
   } else {
     const size_t new_size = s->metadata_buffer[0].size + GRPC_MDELEM_LENGTH(md);
     const size_t metadata_size_limit =
@@ -469,22 +450,22 @@
               " vs. %" PRIuPTR ")",
               new_size, metadata_size_limit);
       grpc_chttp2_cancel_stream(
-          exec_ctx, t, s,
+          t, s,
           grpc_error_set_int(
               GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                   "received initial metadata size exceeds limit"),
               GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_RESOURCE_EXHAUSTED));
-      grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+      grpc_chttp2_parsing_become_skip_parser(t);
       s->seen_error = true;
-      GRPC_MDELEM_UNREF(exec_ctx, md);
+      GRPC_MDELEM_UNREF(md);
     } else {
-      grpc_error* error = grpc_chttp2_incoming_metadata_buffer_add(
-          exec_ctx, &s->metadata_buffer[0], md);
+      grpc_error* error =
+          grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[0], md);
       if (error != GRPC_ERROR_NONE) {
-        grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
-        grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+        grpc_chttp2_cancel_stream(t, s, error);
+        grpc_chttp2_parsing_become_skip_parser(t);
         s->seen_error = true;
-        GRPC_MDELEM_UNREF(exec_ctx, md);
+        GRPC_MDELEM_UNREF(md);
       }
     }
   }
@@ -492,8 +473,7 @@
   GPR_TIMER_END("on_initial_header", 0);
 }
 
-static void on_trailing_header(grpc_exec_ctx* exec_ctx, void* tp,
-                               grpc_mdelem md) {
+static void on_trailing_header(void* tp, grpc_mdelem md) {
   grpc_chttp2_transport* t = (grpc_chttp2_transport*)tp;
   grpc_chttp2_stream* s = t->incoming_stream;
 
@@ -527,30 +507,29 @@
             " vs. %" PRIuPTR ")",
             new_size, metadata_size_limit);
     grpc_chttp2_cancel_stream(
-        exec_ctx, t, s,
+        t, s,
         grpc_error_set_int(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                                "received trailing metadata size exceeds limit"),
                            GRPC_ERROR_INT_GRPC_STATUS,
                            GRPC_STATUS_RESOURCE_EXHAUSTED));
-    grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+    grpc_chttp2_parsing_become_skip_parser(t);
     s->seen_error = true;
-    GRPC_MDELEM_UNREF(exec_ctx, md);
+    GRPC_MDELEM_UNREF(md);
   } else {
-    grpc_error* error = grpc_chttp2_incoming_metadata_buffer_add(
-        exec_ctx, &s->metadata_buffer[1], md);
+    grpc_error* error =
+        grpc_chttp2_incoming_metadata_buffer_add(&s->metadata_buffer[1], md);
     if (error != GRPC_ERROR_NONE) {
-      grpc_chttp2_cancel_stream(exec_ctx, t, s, error);
-      grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+      grpc_chttp2_cancel_stream(t, s, error);
+      grpc_chttp2_parsing_become_skip_parser(t);
       s->seen_error = true;
-      GRPC_MDELEM_UNREF(exec_ctx, md);
+      GRPC_MDELEM_UNREF(md);
     }
   }
 
   GPR_TIMER_END("on_trailing_header", 0);
 }
 
-static grpc_error* init_header_frame_parser(grpc_exec_ctx* exec_ctx,
-                                            grpc_chttp2_transport* t,
+static grpc_error* init_header_frame_parser(grpc_chttp2_transport* t,
                                             int is_continuation) {
   uint8_t is_eoh =
       (t->incoming_frame_flags & GRPC_CHTTP2_DATA_FLAG_END_HEADERS) != 0;
@@ -580,7 +559,7 @@
       GRPC_CHTTP2_IF_TRACING(
           gpr_log(GPR_ERROR,
                   "grpc_chttp2_stream disbanded before CONTINUATION received"));
-      return init_skip_frame_parser(exec_ctx, t, 1);
+      return init_skip_frame_parser(t, 1);
     }
     if (t->is_client) {
       if ((t->incoming_stream_id & 1) &&
@@ -590,7 +569,7 @@
         GRPC_CHTTP2_IF_TRACING(gpr_log(
             GPR_ERROR, "ignoring new grpc_chttp2_stream creation on client"));
       }
-      grpc_error* err = init_skip_frame_parser(exec_ctx, t, 1);
+      grpc_error* err = init_skip_frame_parser(t, 1);
       if (t->incoming_frame_flags & GRPC_CHTTP2_FLAG_HAS_PRIORITY) {
         grpc_chttp2_hpack_parser_set_has_priority(&t->hpack_parser);
       }
@@ -602,13 +581,13 @@
           "last grpc_chttp2_stream "
           "id=%d, new grpc_chttp2_stream id=%d",
           t->last_new_stream_id, t->incoming_stream_id));
-      return init_skip_frame_parser(exec_ctx, t, 1);
+      return init_skip_frame_parser(t, 1);
     } else if ((t->incoming_stream_id & 1) == 0) {
       GRPC_CHTTP2_IF_TRACING(gpr_log(
           GPR_ERROR,
           "ignoring grpc_chttp2_stream with non-client generated index %d",
           t->incoming_stream_id));
-      return init_skip_frame_parser(exec_ctx, t, 1);
+      return init_skip_frame_parser(t, 1);
     } else if (grpc_chttp2_stream_map_size(&t->stream_map) >=
                t->settings[GRPC_ACKED_SETTINGS]
                           [GRPC_CHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS]) {
@@ -616,11 +595,11 @@
     }
     t->last_new_stream_id = t->incoming_stream_id;
     s = t->incoming_stream =
-        grpc_chttp2_parsing_accept_stream(exec_ctx, t, t->incoming_stream_id);
+        grpc_chttp2_parsing_accept_stream(t, t->incoming_stream_id);
     if (s == nullptr) {
       GRPC_CHTTP2_IF_TRACING(
           gpr_log(GPR_ERROR, "grpc_chttp2_stream not accepted"));
-      return init_skip_frame_parser(exec_ctx, t, 1);
+      return init_skip_frame_parser(t, 1);
     }
   } else {
     t->incoming_stream = s;
@@ -631,7 +610,7 @@
     GRPC_CHTTP2_IF_TRACING(gpr_log(
         GPR_ERROR, "skipping already closed grpc_chttp2_stream header"));
     t->incoming_stream = nullptr;
-    return init_skip_frame_parser(exec_ctx, t, 1);
+    return init_skip_frame_parser(t, 1);
   }
   t->parser = grpc_chttp2_header_parser_parse;
   t->parser_data = &t->hpack_parser;
@@ -656,7 +635,7 @@
       break;
     case 2:
       gpr_log(GPR_ERROR, "too many header frames received");
-      return init_skip_frame_parser(exec_ctx, t, 1);
+      return init_skip_frame_parser(t, 1);
   }
   t->hpack_parser.on_header_user_data = t;
   t->hpack_parser.is_boundary = is_eoh;
@@ -668,8 +647,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* init_window_update_frame_parser(grpc_exec_ctx* exec_ctx,
-                                                   grpc_chttp2_transport* t) {
+static grpc_error* init_window_update_frame_parser(grpc_chttp2_transport* t) {
   grpc_error* err = grpc_chttp2_window_update_parser_begin_frame(
       &t->simple.window_update, t->incoming_frame_size,
       t->incoming_frame_flags);
@@ -678,7 +656,7 @@
     grpc_chttp2_stream* s = t->incoming_stream =
         grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
     if (s == nullptr) {
-      return init_skip_frame_parser(exec_ctx, t, 0);
+      return init_skip_frame_parser(t, 0);
     }
     s->stats.incoming.framing_bytes += 9;
   }
@@ -687,8 +665,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* init_ping_parser(grpc_exec_ctx* exec_ctx,
-                                    grpc_chttp2_transport* t) {
+static grpc_error* init_ping_parser(grpc_chttp2_transport* t) {
   grpc_error* err = grpc_chttp2_ping_parser_begin_frame(
       &t->simple.ping, t->incoming_frame_size, t->incoming_frame_flags);
   if (err != GRPC_ERROR_NONE) return err;
@@ -697,15 +674,14 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* init_rst_stream_parser(grpc_exec_ctx* exec_ctx,
-                                          grpc_chttp2_transport* t) {
+static grpc_error* init_rst_stream_parser(grpc_chttp2_transport* t) {
   grpc_error* err = grpc_chttp2_rst_stream_parser_begin_frame(
       &t->simple.rst_stream, t->incoming_frame_size, t->incoming_frame_flags);
   if (err != GRPC_ERROR_NONE) return err;
   grpc_chttp2_stream* s = t->incoming_stream =
       grpc_chttp2_parsing_lookup_stream(t, t->incoming_stream_id);
   if (!t->incoming_stream) {
-    return init_skip_frame_parser(exec_ctx, t, 0);
+    return init_skip_frame_parser(t, 0);
   }
   s->stats.incoming.framing_bytes += 9;
   t->parser = grpc_chttp2_rst_stream_parser_parse;
@@ -713,8 +689,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* init_goaway_parser(grpc_exec_ctx* exec_ctx,
-                                      grpc_chttp2_transport* t) {
+static grpc_error* init_goaway_parser(grpc_chttp2_transport* t) {
   grpc_error* err = grpc_chttp2_goaway_parser_begin_frame(
       &t->goaway_parser, t->incoming_frame_size, t->incoming_frame_flags);
   if (err != GRPC_ERROR_NONE) return err;
@@ -723,8 +698,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* init_settings_frame_parser(grpc_exec_ctx* exec_ctx,
-                                              grpc_chttp2_transport* t) {
+static grpc_error* init_settings_frame_parser(grpc_chttp2_transport* t) {
   if (t->incoming_stream_id != 0) {
     return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
         "Settings frame received for grpc_chttp2_stream");
@@ -740,7 +714,7 @@
     memcpy(t->settings[GRPC_ACKED_SETTINGS], t->settings[GRPC_SENT_SETTINGS],
            GRPC_CHTTP2_NUM_SETTINGS * sizeof(uint32_t));
     grpc_chttp2_hptbl_set_max_bytes(
-        exec_ctx, &t->hpack_parser.table,
+        &t->hpack_parser.table,
         t->settings[GRPC_ACKED_SETTINGS]
                    [GRPC_CHTTP2_SETTINGS_HEADER_TABLE_SIZE]);
     t->sent_local_settings = 0;
@@ -750,11 +724,10 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* parse_frame_slice(grpc_exec_ctx* exec_ctx,
-                                     grpc_chttp2_transport* t, grpc_slice slice,
+static grpc_error* parse_frame_slice(grpc_chttp2_transport* t, grpc_slice slice,
                                      int is_last) {
   grpc_chttp2_stream* s = t->incoming_stream;
-  grpc_error* err = t->parser(exec_ctx, t->parser_data, t, s, slice, is_last);
+  grpc_error* err = t->parser(t->parser_data, t, s, slice, is_last);
   if (err == GRPC_ERROR_NONE) {
     return err;
   } else if (grpc_error_get_int(err, GRPC_ERROR_INT_STREAM_ID, nullptr)) {
@@ -762,7 +735,7 @@
       const char* msg = grpc_error_string(err);
       gpr_log(GPR_ERROR, "%s", msg);
     }
-    grpc_chttp2_parsing_become_skip_parser(exec_ctx, t);
+    grpc_chttp2_parsing_become_skip_parser(t);
     if (s) {
       s->forced_close_error = err;
       grpc_slice_buffer_add(
diff --git a/src/core/ext/transport/chttp2/transport/stream_map.h b/src/core/ext/transport/chttp2/transport/stream_map.h
index c89d200..9fb8826 100644
--- a/src/core/ext/transport/chttp2/transport/stream_map.h
+++ b/src/core/ext/transport/chttp2/transport/stream_map.h
@@ -23,10 +23,6 @@
 
 #include <stddef.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Data structure to map a uint32_t to a data object (represented by a void*)
 
    Represented as a sorted array of keys, and a corresponding array of values.
@@ -69,8 +65,4 @@
                                                void* value),
                                      void* user_data);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_STREAM_MAP_H */
diff --git a/src/core/ext/transport/chttp2/transport/varint.h b/src/core/ext/transport/chttp2/transport/varint.h
index d3a9d90..5a2b670 100644
--- a/src/core/ext/transport/chttp2/transport/varint.h
+++ b/src/core/ext/transport/chttp2/transport/varint.h
@@ -21,10 +21,6 @@
 
 #include <grpc/support/port_platform.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Helpers for hpack varint encoding */
 
 /* length of a value that needs varint tail encoding (it's bigger than can be
@@ -61,8 +57,4 @@
     }                                                                         \
   } while (0)
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_VARINT_H */
diff --git a/src/core/ext/transport/chttp2/transport/writing.cc b/src/core/ext/transport/chttp2/transport/writing.cc
index 15869b8..043ca9b 100644
--- a/src/core/ext/transport/chttp2/transport/writing.cc
+++ b/src/core/ext/transport/chttp2/transport/writing.cc
@@ -33,17 +33,15 @@
   *list = cb;
 }
 
-static void finish_write_cb(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                            grpc_chttp2_stream* s, grpc_chttp2_write_cb* cb,
-                            grpc_error* error) {
-  grpc_chttp2_complete_closure_step(exec_ctx, t, s, &cb->closure, error,
+static void finish_write_cb(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+                            grpc_chttp2_write_cb* cb, grpc_error* error) {
+  grpc_chttp2_complete_closure_step(t, s, &cb->closure, error,
                                     "finish_write_cb");
   cb->next = t->write_cb_pool;
   t->write_cb_pool = cb;
 }
 
-static void maybe_initiate_ping(grpc_exec_ctx* exec_ctx,
-                                grpc_chttp2_transport* t) {
+static void maybe_initiate_ping(grpc_chttp2_transport* t) {
   grpc_chttp2_ping_queue* pq = &t->ping_queue;
   if (grpc_closure_list_empty(pq->lists[GRPC_CHTTP2_PCL_NEXT])) {
     /* no ping needed: wait */
@@ -68,37 +66,42 @@
     }
     return;
   }
-  grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+  grpc_millis now = grpc_core::ExecCtx::Get()->Now();
+
+  grpc_millis next_allowed_ping_interval =
+      (t->keepalive_permit_without_calls == 0 &&
+       grpc_chttp2_stream_map_size(&t->stream_map) == 0)
+          ? 7200 * GPR_MS_PER_SEC
+          : t->ping_policy.min_sent_ping_interval_without_data;
   grpc_millis next_allowed_ping =
-      t->ping_state.last_ping_sent_time +
-      t->ping_policy.min_sent_ping_interval_without_data;
-  if (t->keepalive_permit_without_calls == 0 &&
-      grpc_chttp2_stream_map_size(&t->stream_map) == 0) {
-    next_allowed_ping =
-        t->ping_recv_state.last_ping_recv_time + 7200 * GPR_MS_PER_SEC;
-  }
+      t->ping_state.last_ping_sent_time + next_allowed_ping_interval;
+
   if (next_allowed_ping > now) {
     /* not enough elapsed time between successive pings */
     if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
       gpr_log(GPR_DEBUG,
-              "%s: Ping delayed [%p]: not enough time elapsed since last ping",
-              t->is_client ? "CLIENT" : "SERVER", t->peer_string);
+              "%s: Ping delayed [%p]: not enough time elapsed since last ping. "
+              " Last ping %f: Next ping %f: Now %f",
+              t->is_client ? "CLIENT" : "SERVER", t->peer_string,
+              (double)t->ping_state.last_ping_sent_time,
+              (double)next_allowed_ping, (double)now);
     }
     if (!t->ping_state.is_delayed_ping_timer_set) {
       t->ping_state.is_delayed_ping_timer_set = true;
-      grpc_timer_init(exec_ctx, &t->ping_state.delayed_ping_timer,
-                      next_allowed_ping, &t->retry_initiate_ping_locked);
+      grpc_timer_init(&t->ping_state.delayed_ping_timer, next_allowed_ping,
+                      &t->retry_initiate_ping_locked);
     }
     return;
   }
+
   pq->inflight_id = t->ping_ctr;
   t->ping_ctr++;
-  GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
+  GRPC_CLOSURE_LIST_SCHED(&pq->lists[GRPC_CHTTP2_PCL_INITIATE]);
   grpc_closure_list_move(&pq->lists[GRPC_CHTTP2_PCL_NEXT],
                          &pq->lists[GRPC_CHTTP2_PCL_INFLIGHT]);
   grpc_slice_buffer_add(&t->outbuf,
                         grpc_chttp2_ping_create(false, pq->inflight_id));
-  GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx);
+  GRPC_STATS_INC_HTTP2_PINGS_SENT();
   t->ping_state.last_ping_sent_time = now;
   if (grpc_http_trace.enabled() || grpc_bdp_estimator_trace.enabled()) {
     gpr_log(GPR_DEBUG, "%s: Ping sent [%p]: %d/%d",
@@ -110,10 +113,9 @@
       (t->ping_state.pings_before_data_required != 0);
 }
 
-static bool update_list(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                        grpc_chttp2_stream* s, int64_t send_bytes,
-                        grpc_chttp2_write_cb** list, int64_t* ctr,
-                        grpc_error* error) {
+static bool update_list(grpc_chttp2_transport* t, grpc_chttp2_stream* s,
+                        int64_t send_bytes, grpc_chttp2_write_cb** list,
+                        int64_t* ctr, grpc_error* error) {
   bool sched_any = false;
   grpc_chttp2_write_cb* cb = *list;
   *list = nullptr;
@@ -122,7 +124,7 @@
     grpc_chttp2_write_cb* next = cb->next;
     if (cb->call_at_byte <= *ctr) {
       sched_any = true;
-      finish_write_cb(exec_ctx, t, s, cb, GRPC_ERROR_REF(error));
+      finish_write_cb(t, s, cb, GRPC_ERROR_REF(error));
     } else {
       add_to_write_list(list, cb);
     }
@@ -175,22 +177,22 @@
 
 class WriteContext {
  public:
-  WriteContext(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t) : t_(t) {
-    GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx);
+  WriteContext(grpc_chttp2_transport* t) : t_(t) {
+    GRPC_STATS_INC_HTTP2_WRITES_BEGUN();
     GPR_TIMER_BEGIN("grpc_chttp2_begin_write", 0);
   }
 
   // TODO(ctiller): make this the destructor
-  void FlushStats(grpc_exec_ctx* exec_ctx) {
+  void FlushStats() {
     GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(
-        exec_ctx, initial_metadata_writes_);
-    GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, message_writes_);
+        initial_metadata_writes_);
+    GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(message_writes_);
     GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(
-        exec_ctx, trailing_metadata_writes_);
-    GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, flow_control_writes_);
+        trailing_metadata_writes_);
+    GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(flow_control_writes_);
   }
 
-  void FlushSettings(grpc_exec_ctx* exec_ctx) {
+  void FlushSettings() {
     if (t_->dirtied_local_settings && !t_->sent_local_settings) {
       grpc_slice_buffer_add(
           &t_->outbuf, grpc_chttp2_settings_create(
@@ -200,17 +202,17 @@
       t_->force_send_settings = false;
       t_->dirtied_local_settings = false;
       t_->sent_local_settings = true;
-      GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx);
+      GRPC_STATS_INC_HTTP2_SETTINGS_WRITES();
     }
   }
 
-  void FlushQueuedBuffers(grpc_exec_ctx* exec_ctx) {
+  void FlushQueuedBuffers() {
     /* simple writes are queued to qbuf, and flushed here */
     grpc_slice_buffer_move_into(&t_->qbuf, &t_->outbuf);
     GPR_ASSERT(t_->qbuf.count == 0);
   }
 
-  void FlushWindowUpdates(grpc_exec_ctx* exec_ctx) {
+  void FlushWindowUpdates() {
     uint32_t transport_announce =
         t_->flow_control->MaybeSendUpdate(t_->outbuf.count > 0);
     if (transport_announce) {
@@ -230,7 +232,7 @@
     t_->ping_ack_count = 0;
   }
 
-  void EnactHpackSettings(grpc_exec_ctx* exec_ctx) {
+  void EnactHpackSettings() {
     grpc_chttp2_hpack_compressor_set_max_table_size(
         &t_->hpack_compressor,
         t_->settings[GRPC_PEER_SETTINGS]
@@ -370,8 +372,8 @@
 
   bool is_last_frame() const { return is_last_frame_; }
 
-  void CallCallbacks(grpc_exec_ctx* exec_ctx) {
-    if (update_list(exec_ctx, t_, s_,
+  void CallCallbacks() {
+    if (update_list(t_, s_,
                     (int64_t)(s_->sending_bytes - sending_bytes_before_),
                     &s_->on_flow_controlled_cbs,
                     &s_->flow_controlled_bytes_flowed, GRPC_ERROR_NONE)) {
@@ -399,7 +401,7 @@
                       s->flow_control->announced_window_delta())));
   }
 
-  void FlushInitialMetadata(grpc_exec_ctx* exec_ctx) {
+  void FlushInitialMetadata() {
     /* send initial metadata if it's available */
     if (s_->sent_initial_metadata) return;
     if (s_->send_initial_metadata == nullptr) return;
@@ -426,7 +428,7 @@
                       [GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],  // max_frame_size
           &s_->stats.outgoing                                 // stats
       };
-      grpc_chttp2_encode_header(exec_ctx, &t_->hpack_compressor, nullptr, 0,
+      grpc_chttp2_encode_header(&t_->hpack_compressor, nullptr, 0,
                                 s_->send_initial_metadata, &hopt, &t_->outbuf);
       write_context_->ResetPingRecvClock();
       write_context_->IncInitialMetadataWrites();
@@ -436,11 +438,11 @@
     s_->sent_initial_metadata = true;
     write_context_->NoteScheduledResults();
     grpc_chttp2_complete_closure_step(
-        exec_ctx, t_, s_, &s_->send_initial_metadata_finished, GRPC_ERROR_NONE,
+        t_, s_, &s_->send_initial_metadata_finished, GRPC_ERROR_NONE,
         "send_initial_metadata_finished");
   }
 
-  void FlushWindowUpdates(grpc_exec_ctx* exec_ctx) {
+  void FlushWindowUpdates() {
     /* send any window updates */
     const uint32_t stream_announce = s_->flow_control->MaybeSendUpdate();
     if (stream_announce == 0) return;
@@ -452,7 +454,7 @@
     write_context_->IncWindowUpdateWrites();
   }
 
-  void FlushData(grpc_exec_ctx* exec_ctx) {
+  void FlushData() {
     if (!s_->sent_initial_metadata) return;
 
     if (s_->flow_controlled_buffer.length == 0 &&
@@ -484,9 +486,9 @@
     }
     write_context_->ResetPingRecvClock();
     if (data_send_context.is_last_frame()) {
-      SentLastFrame(exec_ctx);
+      SentLastFrame();
     }
-    data_send_context.CallCallbacks(exec_ctx);
+    data_send_context.CallCallbacks();
     stream_became_writable_ = true;
     if (s_->flow_controlled_buffer.length > 0 ||
         s_->compressed_data_buffer.length > 0) {
@@ -496,7 +498,7 @@
     write_context_->IncMessageWrites();
   }
 
-  void FlushTrailingMetadata(grpc_exec_ctx* exec_ctx) {
+  void FlushTrailingMetadata() {
     if (!s_->sent_initial_metadata) return;
 
     if (s_->send_trailing_metadata == nullptr) return;
@@ -517,18 +519,18 @@
 
           t_->settings[GRPC_PEER_SETTINGS][GRPC_CHTTP2_SETTINGS_MAX_FRAME_SIZE],
           &s_->stats.outgoing};
-      grpc_chttp2_encode_header(exec_ctx, &t_->hpack_compressor,
+      grpc_chttp2_encode_header(&t_->hpack_compressor,
                                 extra_headers_for_trailing_metadata_,
                                 num_extra_headers_for_trailing_metadata_,
                                 s_->send_trailing_metadata, &hopt, &t_->outbuf);
     }
     write_context_->IncTrailingMetadataWrites();
     write_context_->ResetPingRecvClock();
-    SentLastFrame(exec_ctx);
+    SentLastFrame();
 
     write_context_->NoteScheduledResults();
     grpc_chttp2_complete_closure_step(
-        exec_ctx, t_, s_, &s_->send_trailing_metadata_finished, GRPC_ERROR_NONE,
+        t_, s_, &s_->send_trailing_metadata_finished, GRPC_ERROR_NONE,
         "send_trailing_metadata_finished");
   }
 
@@ -552,7 +554,7 @@
     }
   }
 
-  void SentLastFrame(grpc_exec_ctx* exec_ctx) {
+  void SentLastFrame() {
     s_->send_trailing_metadata = nullptr;
     s_->sent_trailing_metadata = true;
 
@@ -561,7 +563,7 @@
           &t_->outbuf, grpc_chttp2_rst_stream_create(
                            s_->id, GRPC_HTTP2_NO_ERROR, &s_->stats.outgoing));
     }
-    grpc_chttp2_mark_stream_closed(exec_ctx, t_, s_, !t_->is_client, true,
+    grpc_chttp2_mark_stream_closed(t_, s_, !t_->is_client, true,
                                    GRPC_ERROR_NONE);
   }
 
@@ -575,12 +577,12 @@
 }  // namespace
 
 grpc_chttp2_begin_write_result grpc_chttp2_begin_write(
-    grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t) {
-  WriteContext ctx(exec_ctx, t);
-  ctx.FlushSettings(exec_ctx);
+    grpc_chttp2_transport* t) {
+  WriteContext ctx(t);
+  ctx.FlushSettings();
   ctx.FlushPingAcks();
-  ctx.FlushQueuedBuffers(exec_ctx);
-  ctx.EnactHpackSettings(exec_ctx);
+  ctx.FlushQueuedBuffers();
+  ctx.EnactHpackSettings();
 
   if (t->flow_control->remote_window() > 0) {
     ctx.UpdateStreamsNoLongerStalled();
@@ -590,47 +592,45 @@
      (according to available window sizes) and add to the output buffer */
   while (grpc_chttp2_stream* s = ctx.NextStream()) {
     StreamWriteContext stream_ctx(&ctx, s);
-    stream_ctx.FlushInitialMetadata(exec_ctx);
-    stream_ctx.FlushWindowUpdates(exec_ctx);
-    stream_ctx.FlushData(exec_ctx);
-    stream_ctx.FlushTrailingMetadata(exec_ctx);
+    stream_ctx.FlushInitialMetadata();
+    stream_ctx.FlushWindowUpdates();
+    stream_ctx.FlushData();
+    stream_ctx.FlushTrailingMetadata();
 
     if (stream_ctx.stream_became_writable()) {
       if (!grpc_chttp2_list_add_writing_stream(t, s)) {
         /* already in writing list: drop ref */
-        GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:already_writing");
+        GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:already_writing");
       } else {
         /* ref will be dropped at end of write */
       }
     } else {
-      GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:no_write");
+      GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:no_write");
     }
   }
 
-  ctx.FlushWindowUpdates(exec_ctx);
+  ctx.FlushWindowUpdates();
 
-  maybe_initiate_ping(exec_ctx, t);
+  maybe_initiate_ping(t);
 
   GPR_TIMER_END("grpc_chttp2_begin_write", 0);
 
   return ctx.Result();
 }
 
-void grpc_chttp2_end_write(grpc_exec_ctx* exec_ctx, grpc_chttp2_transport* t,
-                           grpc_error* error) {
+void grpc_chttp2_end_write(grpc_chttp2_transport* t, grpc_error* error) {
   GPR_TIMER_BEGIN("grpc_chttp2_end_write", 0);
   grpc_chttp2_stream* s;
 
   while (grpc_chttp2_list_pop_writing_stream(t, &s)) {
     if (s->sending_bytes != 0) {
-      update_list(exec_ctx, t, s, (int64_t)s->sending_bytes,
-                  &s->on_write_finished_cbs, &s->flow_controlled_bytes_written,
-                  GRPC_ERROR_REF(error));
+      update_list(t, s, (int64_t)s->sending_bytes, &s->on_write_finished_cbs,
+                  &s->flow_controlled_bytes_written, GRPC_ERROR_REF(error));
       s->sending_bytes = 0;
     }
-    GRPC_CHTTP2_STREAM_UNREF(exec_ctx, s, "chttp2_writing:end");
+    GRPC_CHTTP2_STREAM_UNREF(s, "chttp2_writing:end");
   }
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &t->outbuf);
+  grpc_slice_buffer_reset_and_unref_internal(&t->outbuf);
   GRPC_ERROR_UNREF(error);
   GPR_TIMER_END("grpc_chttp2_end_write", 0);
 }
diff --git a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
index d590ba0..40a30e4 100644
--- a/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
+++ b/src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc
@@ -49,7 +49,6 @@
   grpc_transport* ct =
       grpc_create_cronet_transport(engine, target, args, reserved);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  return grpc_channel_create(&exec_ctx, target, args,
-                             GRPC_CLIENT_DIRECT_CHANNEL, ct);
+  grpc_core::ExecCtx exec_ctx;
+  return grpc_channel_create(target, args, GRPC_CLIENT_DIRECT_CHANNEL, ct);
 }
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.cc b/src/core/ext/transport/cronet/transport/cronet_transport.cc
index 4d24efe..5723da5 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.cc
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.cc
@@ -197,27 +197,23 @@
 #ifndef NDEBUG
 #define GRPC_CRONET_STREAM_REF(stream, reason) \
   grpc_cronet_stream_ref((stream), (reason))
-#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
-  grpc_cronet_stream_unref((exec_ctx), (stream), (reason))
+#define GRPC_CRONET_STREAM_UNREF(stream, reason) \
+  grpc_cronet_stream_unref((stream), (reason))
 void grpc_cronet_stream_ref(stream_obj* s, const char* reason) {
   grpc_stream_ref(s->refcount, reason);
 }
-void grpc_cronet_stream_unref(grpc_exec_ctx* exec_ctx, stream_obj* s,
-                              const char* reason) {
-  grpc_stream_unref(exec_ctx, s->refcount, reason);
+void grpc_cronet_stream_unref(stream_obj* s, const char* reason) {
+  grpc_stream_unref(s->refcount, reason);
 }
 #else
 #define GRPC_CRONET_STREAM_REF(stream, reason) grpc_cronet_stream_ref((stream))
-#define GRPC_CRONET_STREAM_UNREF(exec_ctx, stream, reason) \
-  grpc_cronet_stream_unref((exec_ctx), (stream))
+#define GRPC_CRONET_STREAM_UNREF(stream, reason) \
+  grpc_cronet_stream_unref((stream))
 void grpc_cronet_stream_ref(stream_obj* s) { grpc_stream_ref(s->refcount); }
-void grpc_cronet_stream_unref(grpc_exec_ctx* exec_ctx, stream_obj* s) {
-  grpc_stream_unref(exec_ctx, s->refcount);
-}
+void grpc_cronet_stream_unref(stream_obj* s) { grpc_stream_unref(s->refcount); }
 #endif
 
-static enum e_op_result execute_stream_op(grpc_exec_ctx* exec_ctx,
-                                          struct op_and_state* oas);
+static enum e_op_result execute_stream_op(struct op_and_state* oas);
 
 /*
   Utility function to translate enum into string for printing
@@ -373,12 +369,12 @@
   This can get executed from the Cronet network thread via cronet callback
   or on the application supplied thread via the perform_stream_op function.
 */
-static void execute_from_storage(grpc_exec_ctx* exec_ctx, stream_obj* s) {
+static void execute_from_storage(stream_obj* s) {
   gpr_mu_lock(&s->mu);
   for (struct op_and_state* curr = s->storage.head; curr != nullptr;) {
     CRONET_LOG(GPR_DEBUG, "calling op at %p. done = %d", curr, curr->done);
     GPR_ASSERT(curr->done == 0);
-    enum e_op_result result = execute_stream_op(exec_ctx, curr);
+    enum e_op_result result = execute_stream_op(curr);
     CRONET_LOG(GPR_DEBUG, "execute_stream_op[%p] returns %s", curr,
                op_result_string(result));
     /* if this op is done, then remove it and free memory */
@@ -401,8 +397,8 @@
   Cronet callback
 */
 static void on_failed(bidirectional_stream* stream, int net_error) {
-  CRONET_LOG(GPR_DEBUG, "on_failed(%p, %d)", stream, net_error);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  gpr_log(GPR_ERROR, "on_failed(%p, %d)", stream, net_error);
+  grpc_core::ExecCtx exec_ctx;
 
   stream_obj* s = (stream_obj*)stream->annotation;
   gpr_mu_lock(&s->mu);
@@ -419,9 +415,8 @@
   }
   null_and_maybe_free_read_buffer(s);
   gpr_mu_unlock(&s->mu);
-  execute_from_storage(&exec_ctx, s);
-  GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
-  grpc_exec_ctx_finish(&exec_ctx);
+  execute_from_storage(s);
+  GRPC_CRONET_STREAM_UNREF(s, "cronet transport");
 }
 
 /*
@@ -429,7 +424,7 @@
 */
 static void on_canceled(bidirectional_stream* stream) {
   CRONET_LOG(GPR_DEBUG, "on_canceled(%p)", stream);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   stream_obj* s = (stream_obj*)stream->annotation;
   gpr_mu_lock(&s->mu);
@@ -446,9 +441,8 @@
   }
   null_and_maybe_free_read_buffer(s);
   gpr_mu_unlock(&s->mu);
-  execute_from_storage(&exec_ctx, s);
-  GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
-  grpc_exec_ctx_finish(&exec_ctx);
+  execute_from_storage(s);
+  GRPC_CRONET_STREAM_UNREF(s, "cronet transport");
 }
 
 /*
@@ -456,7 +450,7 @@
 */
 static void on_succeeded(bidirectional_stream* stream) {
   CRONET_LOG(GPR_DEBUG, "on_succeeded(%p)", stream);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   stream_obj* s = (stream_obj*)stream->annotation;
   gpr_mu_lock(&s->mu);
@@ -465,9 +459,8 @@
   s->cbs = nullptr;
   null_and_maybe_free_read_buffer(s);
   gpr_mu_unlock(&s->mu);
-  execute_from_storage(&exec_ctx, s);
-  GRPC_CRONET_STREAM_UNREF(&exec_ctx, s, "cronet transport");
-  grpc_exec_ctx_finish(&exec_ctx);
+  execute_from_storage(s);
+  GRPC_CRONET_STREAM_UNREF(s, "cronet transport");
 }
 
 /*
@@ -475,7 +468,7 @@
 */
 static void on_stream_ready(bidirectional_stream* stream) {
   CRONET_LOG(GPR_DEBUG, "W: on_stream_ready(%p)", stream);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   stream_obj* s = (stream_obj*)stream->annotation;
   grpc_cronet_transport* t = (grpc_cronet_transport*)s->curr_ct;
   gpr_mu_lock(&s->mu);
@@ -495,8 +488,7 @@
     }
   }
   gpr_mu_unlock(&s->mu);
-  execute_from_storage(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  execute_from_storage(s);
 }
 
 /*
@@ -506,7 +498,7 @@
     bidirectional_stream* stream,
     const bidirectional_stream_header_array* headers,
     const char* negotiated_protocol) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   CRONET_LOG(GPR_DEBUG, "R: on_response_headers_received(%p, %p, %s)", stream,
              headers, negotiated_protocol);
   stream_obj* s = (stream_obj*)stream->annotation;
@@ -528,9 +520,8 @@
   for (size_t i = 0; i < headers->count; i++) {
     GRPC_LOG_IF_ERROR("on_response_headers_received",
                       grpc_chttp2_incoming_metadata_buffer_add(
-                          &exec_ctx, &s->state.rs.initial_metadata,
+                          &s->state.rs.initial_metadata,
                           grpc_mdelem_from_slices(
-                              &exec_ctx,
                               grpc_slice_intern(grpc_slice_from_static_string(
                                   headers->headers[i].key)),
                               grpc_slice_intern(grpc_slice_from_static_string(
@@ -552,15 +543,14 @@
     s->state.pending_read_from_cronet = true;
   }
   gpr_mu_unlock(&s->mu);
-  execute_from_storage(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  execute_from_storage(s);
 }
 
 /*
   Cronet callback
 */
 static void on_write_completed(bidirectional_stream* stream, const char* data) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   stream_obj* s = (stream_obj*)stream->annotation;
   CRONET_LOG(GPR_DEBUG, "W: on_write_completed(%p, %s)", stream, data);
   gpr_mu_lock(&s->mu);
@@ -570,8 +560,7 @@
   }
   s->state.state_callback_received[OP_SEND_MESSAGE] = true;
   gpr_mu_unlock(&s->mu);
-  execute_from_storage(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  execute_from_storage(s);
 }
 
 /*
@@ -579,7 +568,7 @@
 */
 static void on_read_completed(bidirectional_stream* stream, char* data,
                               int count) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   stream_obj* s = (stream_obj*)stream->annotation;
   CRONET_LOG(GPR_DEBUG, "R: on_read_completed(%p, %p, %d)", stream, data,
              count);
@@ -605,15 +594,14 @@
       gpr_mu_unlock(&s->mu);
     } else {
       gpr_mu_unlock(&s->mu);
-      execute_from_storage(&exec_ctx, s);
+      execute_from_storage(s);
     }
   } else {
     null_and_maybe_free_read_buffer(s);
     s->state.rs.read_stream_closed = true;
     gpr_mu_unlock(&s->mu);
-    execute_from_storage(&exec_ctx, s);
+    execute_from_storage(s);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 /*
@@ -622,7 +610,7 @@
 static void on_response_trailers_received(
     bidirectional_stream* stream,
     const bidirectional_stream_header_array* trailers) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   CRONET_LOG(GPR_DEBUG, "R: on_response_trailers_received(%p,%p)", stream,
              trailers);
   stream_obj* s = (stream_obj*)stream->annotation;
@@ -638,9 +626,8 @@
                trailers->headers[i].value);
     GRPC_LOG_IF_ERROR("on_response_trailers_received",
                       grpc_chttp2_incoming_metadata_buffer_add(
-                          &exec_ctx, &s->state.rs.trailing_metadata,
+                          &s->state.rs.trailing_metadata,
                           grpc_mdelem_from_slices(
-                              &exec_ctx,
                               grpc_slice_intern(grpc_slice_from_static_string(
                                   trailers->headers[i].key)),
                               grpc_slice_intern(grpc_slice_from_static_string(
@@ -670,17 +657,15 @@
     gpr_mu_unlock(&s->mu);
   } else {
     gpr_mu_unlock(&s->mu);
-    execute_from_storage(&exec_ctx, s);
+    execute_from_storage(s);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 /*
  Utility function that takes the data from s->write_slice_buffer and assembles
  into a contiguous byte stream with 5 byte gRPC header prepended.
 */
-static void create_grpc_frame(grpc_exec_ctx* exec_ctx,
-                              grpc_slice_buffer* write_slice_buffer,
+static void create_grpc_frame(grpc_slice_buffer* write_slice_buffer,
                               char** pp_write_buffer,
                               size_t* p_write_buffer_size, uint32_t flags) {
   grpc_slice slice = grpc_slice_buffer_take_first(write_slice_buffer);
@@ -700,7 +685,7 @@
   *p++ = (uint8_t)(length);
   /* append actual data */
   memcpy(p, GRPC_SLICE_START_PTR(slice), length);
-  grpc_slice_unref_internal(exec_ctx, slice);
+  grpc_slice_unref_internal(slice);
 }
 
 /*
@@ -981,8 +966,7 @@
 /*
   TODO (makdharma): Break down this function in smaller chunks for readability.
 */
-static enum e_op_result execute_stream_op(grpc_exec_ctx* exec_ctx,
-                                          struct op_and_state* oas) {
+static enum e_op_result execute_stream_op(struct op_and_state* oas) {
   grpc_transport_stream_op_batch* stream_op = &oas->op;
   struct stream_obj* s = oas->s;
   grpc_cronet_transport* t = (grpc_cronet_transport*)s->curr_ct;
@@ -1040,15 +1024,14 @@
       grpc_slice slice;
       grpc_slice_buffer_init(&write_slice_buffer);
       if (1 != grpc_byte_stream_next(
-                   exec_ctx, stream_op->payload->send_message.send_message,
+                   stream_op->payload->send_message.send_message,
                    stream_op->payload->send_message.send_message->length,
                    nullptr)) {
         /* Should never reach here */
         GPR_ASSERT(false);
       }
       if (GRPC_ERROR_NONE !=
-          grpc_byte_stream_pull(exec_ctx,
-                                stream_op->payload->send_message.send_message,
+          grpc_byte_stream_pull(stream_op->payload->send_message.send_message,
                                 &slice)) {
         /* Should never reach here */
         GPR_ASSERT(false);
@@ -1061,15 +1044,15 @@
       }
       if (write_slice_buffer.count > 0) {
         size_t write_buffer_size;
-        create_grpc_frame(exec_ctx, &write_slice_buffer,
-                          &stream_state->ws.write_buffer, &write_buffer_size,
+        create_grpc_frame(&write_slice_buffer, &stream_state->ws.write_buffer,
+                          &write_buffer_size,
                           stream_op->payload->send_message.send_message->flags);
         CRONET_LOG(GPR_DEBUG, "bidirectional_stream_write (%p, %p)", s->cbs,
                    stream_state->ws.write_buffer);
         stream_state->state_callback_received[OP_SEND_MESSAGE] = false;
         bidirectional_stream_write(s->cbs, stream_state->ws.write_buffer,
                                    (int)write_buffer_size, false);
-        grpc_slice_buffer_destroy_internal(exec_ctx, &write_slice_buffer);
+        grpc_slice_buffer_destroy_internal(&write_slice_buffer);
         if (t->use_packet_coalescing) {
           if (!stream_op->send_trailing_metadata) {
             CRONET_LOG(GPR_DEBUG, "bidirectional_stream_flush (%p)", s->cbs);
@@ -1112,25 +1095,21 @@
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_RECV_INITIAL_METADATA", oas);
     if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
           GRPC_ERROR_NONE);
     } else if (stream_state->state_callback_received[OP_FAILED]) {
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
           GRPC_ERROR_NONE);
     } else if (stream_state->state_op_done[OP_RECV_TRAILING_METADATA]) {
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
           GRPC_ERROR_NONE);
     } else {
       grpc_chttp2_incoming_metadata_buffer_publish(
-          exec_ctx, &oas->s->state.rs.initial_metadata,
+          &oas->s->state.rs.initial_metadata,
           stream_op->payload->recv_initial_metadata.recv_initial_metadata);
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           stream_op->payload->recv_initial_metadata.recv_initial_metadata_ready,
           GRPC_ERROR_NONE);
     }
@@ -1141,16 +1120,14 @@
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_RECV_MESSAGE", oas);
     if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
       CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
-      GRPC_CLOSURE_SCHED(exec_ctx,
-                         stream_op->payload->recv_message.recv_message_ready,
+      GRPC_CLOSURE_SCHED(stream_op->payload->recv_message.recv_message_ready,
                          GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
       result = ACTION_TAKEN_NO_CALLBACK;
     } else if (stream_state->state_callback_received[OP_FAILED]) {
       CRONET_LOG(GPR_DEBUG, "Stream failed.");
-      GRPC_CLOSURE_SCHED(exec_ctx,
-                         stream_op->payload->recv_message.recv_message_ready,
+      GRPC_CLOSURE_SCHED(stream_op->payload->recv_message.recv_message_ready,
                          GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
@@ -1158,16 +1135,14 @@
     } else if (stream_state->rs.read_stream_closed == true) {
       /* No more data will be received */
       CRONET_LOG(GPR_DEBUG, "read stream closed");
-      GRPC_CLOSURE_SCHED(exec_ctx,
-                         stream_op->payload->recv_message.recv_message_ready,
+      GRPC_CLOSURE_SCHED(stream_op->payload->recv_message.recv_message_ready,
                          GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
       result = ACTION_TAKEN_NO_CALLBACK;
     } else if (stream_state->flush_read) {
       CRONET_LOG(GPR_DEBUG, "flush read");
-      GRPC_CLOSURE_SCHED(exec_ctx,
-                         stream_op->payload->recv_message.recv_message_ready,
+      GRPC_CLOSURE_SCHED(stream_op->payload->recv_message.recv_message_ready,
                          GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
@@ -1200,7 +1175,7 @@
           CRONET_LOG(GPR_DEBUG, "read operation complete. Empty response.");
           /* Clean up read_slice_buffer in case there is unread data. */
           grpc_slice_buffer_destroy_internal(
-              exec_ctx, &stream_state->rs.read_slice_buffer);
+              &stream_state->rs.read_slice_buffer);
           grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
           grpc_slice_buffer_stream_init(&stream_state->rs.sbs,
                                         &stream_state->rs.read_slice_buffer, 0);
@@ -1210,7 +1185,7 @@
           *((grpc_byte_buffer**)stream_op->payload->recv_message.recv_message) =
               (grpc_byte_buffer*)&stream_state->rs.sbs;
           GRPC_CLOSURE_SCHED(
-              exec_ctx, stream_op->payload->recv_message.recv_message_ready,
+              stream_op->payload->recv_message.recv_message_ready,
               GRPC_ERROR_NONE);
           stream_state->state_op_done[OP_RECV_MESSAGE] = true;
           oas->state.state_op_done[OP_RECV_MESSAGE] = true;
@@ -1254,8 +1229,7 @@
              (size_t)stream_state->rs.length_field);
       null_and_maybe_free_read_buffer(s);
       /* Clean up read_slice_buffer in case there is unread data. */
-      grpc_slice_buffer_destroy_internal(exec_ctx,
-                                         &stream_state->rs.read_slice_buffer);
+      grpc_slice_buffer_destroy_internal(&stream_state->rs.read_slice_buffer);
       grpc_slice_buffer_init(&stream_state->rs.read_slice_buffer);
       grpc_slice_buffer_add(&stream_state->rs.read_slice_buffer,
                             read_data_slice);
@@ -1266,8 +1240,7 @@
       }
       *((grpc_byte_buffer**)stream_op->payload->recv_message.recv_message) =
           (grpc_byte_buffer*)&stream_state->rs.sbs;
-      GRPC_CLOSURE_SCHED(exec_ctx,
-                         stream_op->payload->recv_message.recv_message_ready,
+      GRPC_CLOSURE_SCHED(stream_op->payload->recv_message.recv_message_ready,
                          GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
@@ -1290,7 +1263,7 @@
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_RECV_TRAILING_METADATA", oas);
     if (oas->s->state.rs.trailing_metadata_valid) {
       grpc_chttp2_incoming_metadata_buffer_publish(
-          exec_ctx, &oas->s->state.rs.trailing_metadata,
+          &oas->s->state.rs.trailing_metadata,
           stream_op->payload->recv_trailing_metadata.recv_trailing_metadata);
       stream_state->rs.trailing_metadata_valid = false;
     }
@@ -1315,17 +1288,17 @@
              op_can_be_run(stream_op, s, &oas->state, OP_ON_COMPLETE)) {
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_ON_COMPLETE", oas);
     if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
-      GRPC_CLOSURE_SCHED(exec_ctx, stream_op->on_complete,
+      GRPC_CLOSURE_SCHED(stream_op->on_complete,
                          GRPC_ERROR_REF(stream_state->cancel_error));
     } else if (stream_state->state_callback_received[OP_FAILED]) {
       GRPC_CLOSURE_SCHED(
-          exec_ctx, stream_op->on_complete,
+          stream_op->on_complete,
           make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
     } else {
       /* All actions in this stream_op are complete. Call the on_complete
        * callback
        */
-      GRPC_CLOSURE_SCHED(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(stream_op->on_complete, GRPC_ERROR_NONE);
     }
     oas->state.state_op_done[OP_ON_COMPLETE] = true;
     oas->done = true;
@@ -1350,9 +1323,9 @@
   Functions used by upper layers to access transport functionality.
 */
 
-static int init_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                       grpc_stream* gs, grpc_stream_refcount* refcount,
-                       const void* server_data, gpr_arena* arena) {
+static int init_stream(grpc_transport* gt, grpc_stream* gs,
+                       grpc_stream_refcount* refcount, const void* server_data,
+                       gpr_arena* arena) {
   stream_obj* s = (stream_obj*)gs;
 
   s->refcount = refcount;
@@ -1383,15 +1356,13 @@
   return 0;
 }
 
-static void set_pollset_do_nothing(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                                   grpc_stream* gs, grpc_pollset* pollset) {}
+static void set_pollset_do_nothing(grpc_transport* gt, grpc_stream* gs,
+                                   grpc_pollset* pollset) {}
 
-static void set_pollset_set_do_nothing(grpc_exec_ctx* exec_ctx,
-                                       grpc_transport* gt, grpc_stream* gs,
+static void set_pollset_set_do_nothing(grpc_transport* gt, grpc_stream* gs,
                                        grpc_pollset_set* pollset_set) {}
 
-static void perform_stream_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                              grpc_stream* gs,
+static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
                               grpc_transport_stream_op_batch* op) {
   CRONET_LOG(GPR_DEBUG, "perform_stream_op");
   if (op->send_initial_metadata &&
@@ -1401,42 +1372,36 @@
      this field is present in metadata */
     if (op->recv_initial_metadata) {
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           op->payload->recv_initial_metadata.recv_initial_metadata_ready,
           GRPC_ERROR_CANCELLED);
     }
     if (op->recv_message) {
-      GRPC_CLOSURE_SCHED(exec_ctx, op->payload->recv_message.recv_message_ready,
+      GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
                          GRPC_ERROR_CANCELLED);
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_CANCELLED);
+    GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_CANCELLED);
     return;
   }
   stream_obj* s = (stream_obj*)gs;
   add_to_storage(s, op);
-  execute_from_storage(exec_ctx, s);
+  execute_from_storage(s);
 }
 
-static void destroy_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                           grpc_stream* gs,
+static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
                            grpc_closure* then_schedule_closure) {
   stream_obj* s = (stream_obj*)gs;
   null_and_maybe_free_read_buffer(s);
   /* Clean up read_slice_buffer in case there is unread data. */
-  grpc_slice_buffer_destroy_internal(exec_ctx, &s->state.rs.read_slice_buffer);
+  grpc_slice_buffer_destroy_internal(&s->state.rs.read_slice_buffer);
   GRPC_ERROR_UNREF(s->state.cancel_error);
-  GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
 }
 
-static void destroy_transport(grpc_exec_ctx* exec_ctx, grpc_transport* gt) {}
+static void destroy_transport(grpc_transport* gt) {}
 
-static grpc_endpoint* get_endpoint(grpc_exec_ctx* exec_ctx,
-                                   grpc_transport* gt) {
-  return nullptr;
-}
+static grpc_endpoint* get_endpoint(grpc_transport* gt) { return nullptr; }
 
-static void perform_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                       grpc_transport_op* op) {}
+static void perform_op(grpc_transport* gt, grpc_transport_op* op) {}
 
 static const grpc_transport_vtable grpc_cronet_vtable = {
     sizeof(stream_obj),
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.h b/src/core/ext/transport/cronet/transport/cronet_transport.h
index 7643fdb..d9ff913 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.h
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.h
@@ -21,16 +21,8 @@
 
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_transport* grpc_create_cronet_transport(void* engine, const char* target,
                                              const grpc_channel_args* args,
                                              void* reserved);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_CRONET_TRANSPORT_CRONET_TRANSPORT_H */
diff --git a/src/core/ext/transport/inproc/inproc_plugin.cc b/src/core/ext/transport/inproc/inproc_plugin.cc
index 2526dbf..83a7d8d 100644
--- a/src/core/ext/transport/inproc/inproc_plugin.cc
+++ b/src/core/ext/transport/inproc/inproc_plugin.cc
@@ -21,8 +21,6 @@
 
 grpc_core::TraceFlag grpc_inproc_trace(false, "inproc");
 
-extern "C" void grpc_inproc_plugin_init(void) { grpc_inproc_transport_init(); }
+void grpc_inproc_plugin_init(void) { grpc_inproc_transport_init(); }
 
-extern "C" void grpc_inproc_plugin_shutdown(void) {
-  grpc_inproc_transport_shutdown();
-}
+void grpc_inproc_plugin_shutdown(void) { grpc_inproc_transport_shutdown(); }
diff --git a/src/core/ext/transport/inproc/inproc_transport.cc b/src/core/ext/transport/inproc/inproc_transport.cc
index 2579060..8dd0b7d 100644
--- a/src/core/ext/transport/inproc/inproc_transport.cc
+++ b/src/core/ext/transport/inproc/inproc_transport.cc
@@ -54,8 +54,8 @@
   gpr_refcount refs;
   bool is_client;
   grpc_connectivity_state_tracker connectivity;
-  void (*accept_stream_cb)(grpc_exec_ctx* exec_ctx, void* user_data,
-                           grpc_transport* transport, const void* server_data);
+  void (*accept_stream_cb)(void* user_data, grpc_transport* transport,
+                           const void* server_data);
   void* accept_stream_data;
   bool is_closed;
   struct inproc_transport* other_side;
@@ -118,39 +118,36 @@
 } inproc_stream;
 
 static grpc_closure do_nothing_closure;
-static bool cancel_stream_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
-                                 grpc_error* error);
-static void op_state_machine(grpc_exec_ctx* exec_ctx, void* arg,
-                             grpc_error* error);
+static bool cancel_stream_locked(inproc_stream* s, grpc_error* error);
+static void op_state_machine(void* arg, grpc_error* error);
 
 static void ref_transport(inproc_transport* t) {
   INPROC_LOG(GPR_DEBUG, "ref_transport %p", t);
   gpr_ref(&t->refs);
 }
 
-static void really_destroy_transport(grpc_exec_ctx* exec_ctx,
-                                     inproc_transport* t) {
+static void really_destroy_transport(inproc_transport* t) {
   INPROC_LOG(GPR_DEBUG, "really_destroy_transport %p", t);
-  grpc_connectivity_state_destroy(exec_ctx, &t->connectivity);
+  grpc_connectivity_state_destroy(&t->connectivity);
   if (gpr_unref(&t->mu->refs)) {
     gpr_free(t->mu);
   }
   gpr_free(t);
 }
 
-static void unref_transport(grpc_exec_ctx* exec_ctx, inproc_transport* t) {
+static void unref_transport(inproc_transport* t) {
   INPROC_LOG(GPR_DEBUG, "unref_transport %p", t);
   if (gpr_unref(&t->refs)) {
-    really_destroy_transport(exec_ctx, t);
+    really_destroy_transport(t);
   }
 }
 
 #ifndef NDEBUG
 #define STREAM_REF(refs, reason) grpc_stream_ref(refs, reason)
-#define STREAM_UNREF(e, refs, reason) grpc_stream_unref(e, refs, reason)
+#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs, reason)
 #else
 #define STREAM_REF(refs, reason) grpc_stream_ref(refs)
-#define STREAM_UNREF(e, refs, reason) grpc_stream_unref(e, refs)
+#define STREAM_UNREF(refs, reason) grpc_stream_unref(refs)
 #endif
 
 static void ref_stream(inproc_stream* s, const char* reason) {
@@ -158,13 +155,12 @@
   STREAM_REF(s->refs, reason);
 }
 
-static void unref_stream(grpc_exec_ctx* exec_ctx, inproc_stream* s,
-                         const char* reason) {
+static void unref_stream(inproc_stream* s, const char* reason) {
   INPROC_LOG(GPR_DEBUG, "unref_stream %p %s", s, reason);
-  STREAM_UNREF(exec_ctx, s->refs, reason);
+  STREAM_UNREF(s->refs, reason);
 }
 
-static void really_destroy_stream(grpc_exec_ctx* exec_ctx, inproc_stream* s) {
+static void really_destroy_stream(inproc_stream* s) {
   INPROC_LOG(GPR_DEBUG, "really_destroy_stream %p", s);
 
   GRPC_ERROR_UNREF(s->write_buffer_cancel_error);
@@ -172,13 +168,13 @@
   GRPC_ERROR_UNREF(s->cancel_other_error);
 
   if (s->recv_inited) {
-    grpc_slice_buffer_destroy_internal(exec_ctx, &s->recv_message);
+    grpc_slice_buffer_destroy_internal(&s->recv_message);
   }
 
-  unref_transport(exec_ctx, s->t);
+  unref_transport(s->t);
 
   if (s->closure_at_destroy) {
-    GRPC_CLOSURE_SCHED(exec_ctx, s->closure_at_destroy, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(s->closure_at_destroy, GRPC_ERROR_NONE);
   }
 }
 
@@ -195,7 +191,7 @@
   }
 }
 
-static grpc_error* fill_in_metadata(grpc_exec_ctx* exec_ctx, inproc_stream* s,
+static grpc_error* fill_in_metadata(inproc_stream* s,
                                     const grpc_metadata_batch* metadata,
                                     uint32_t flags, grpc_metadata_batch* out_md,
                                     uint32_t* outflags, bool* markfilled) {
@@ -214,18 +210,18 @@
        (elem != nullptr) && (error == GRPC_ERROR_NONE); elem = elem->next) {
     grpc_linked_mdelem* nelem =
         (grpc_linked_mdelem*)gpr_arena_alloc(s->arena, sizeof(*nelem));
-    nelem->md = grpc_mdelem_from_slices(
-        exec_ctx, grpc_slice_intern(GRPC_MDKEY(elem->md)),
-        grpc_slice_intern(GRPC_MDVALUE(elem->md)));
+    nelem->md =
+        grpc_mdelem_from_slices(grpc_slice_intern(GRPC_MDKEY(elem->md)),
+                                grpc_slice_intern(GRPC_MDVALUE(elem->md)));
 
-    error = grpc_metadata_batch_link_tail(exec_ctx, out_md, nelem);
+    error = grpc_metadata_batch_link_tail(out_md, nelem);
   }
   return error;
 }
 
-static int init_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                       grpc_stream* gs, grpc_stream_refcount* refcount,
-                       const void* server_data, gpr_arena* arena) {
+static int init_stream(grpc_transport* gt, grpc_stream* gs,
+                       grpc_stream_refcount* refcount, const void* server_data,
+                       gpr_arena* arena) {
   INPROC_LOG(GPR_DEBUG, "init_stream %p %p %p", gt, gs, server_data);
   inproc_transport* t = (inproc_transport*)gt;
   inproc_stream* s = (inproc_stream*)gs;
@@ -285,8 +281,7 @@
                                               // side to avoid destruction
     INPROC_LOG(GPR_DEBUG, "calling accept stream cb %p %p",
                st->accept_stream_cb, st->accept_stream_data);
-    (*st->accept_stream_cb)(exec_ctx, st->accept_stream_data, &st->base,
-                            (void*)s);
+    (*st->accept_stream_cb)(st->accept_stream_data, &st->base, (void*)s);
   } else {
     // This is the server-side and is being called through accept_stream_cb
     inproc_stream* cs = (inproc_stream*)server_data;
@@ -301,19 +296,19 @@
     // Now transfer from the other side's write_buffer if any to the to_read
     // buffer
     if (cs->write_buffer_initial_md_filled) {
-      fill_in_metadata(exec_ctx, s, &cs->write_buffer_initial_md,
+      fill_in_metadata(s, &cs->write_buffer_initial_md,
                        cs->write_buffer_initial_md_flags,
                        &s->to_read_initial_md, &s->to_read_initial_md_flags,
                        &s->to_read_initial_md_filled);
       s->deadline = GPR_MIN(s->deadline, cs->write_buffer_deadline);
-      grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_initial_md);
+      grpc_metadata_batch_clear(&cs->write_buffer_initial_md);
       cs->write_buffer_initial_md_filled = false;
     }
     if (cs->write_buffer_trailing_md_filled) {
-      fill_in_metadata(exec_ctx, s, &cs->write_buffer_trailing_md, 0,
+      fill_in_metadata(s, &cs->write_buffer_trailing_md, 0,
                        &s->to_read_trailing_md, nullptr,
                        &s->to_read_trailing_md_filled);
-      grpc_metadata_batch_clear(exec_ctx, &cs->write_buffer_trailing_md);
+      grpc_metadata_batch_clear(&cs->write_buffer_trailing_md);
       cs->write_buffer_trailing_md_filled = false;
     }
     if (cs->write_buffer_cancel_error != GRPC_ERROR_NONE) {
@@ -326,11 +321,11 @@
   return 0;  // return value is not important
 }
 
-static void close_stream_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s) {
+static void close_stream_locked(inproc_stream* s) {
   if (!s->closed) {
     // Release the metadata that we would have written out
-    grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_initial_md);
-    grpc_metadata_batch_destroy(exec_ctx, &s->write_buffer_trailing_md);
+    grpc_metadata_batch_destroy(&s->write_buffer_initial_md);
+    grpc_metadata_batch_destroy(&s->write_buffer_trailing_md);
 
     if (s->listed) {
       inproc_stream* p = s->stream_list_prev;
@@ -344,22 +339,21 @@
         n->stream_list_prev = p;
       }
       s->listed = false;
-      unref_stream(exec_ctx, s, "close_stream:list");
+      unref_stream(s, "close_stream:list");
     }
     s->closed = true;
-    unref_stream(exec_ctx, s, "close_stream:closing");
+    unref_stream(s, "close_stream:closing");
   }
 }
 
 // This function means that we are done talking/listening to the other side
-static void close_other_side_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
-                                    const char* reason) {
+static void close_other_side_locked(inproc_stream* s, const char* reason) {
   if (s->other_side != nullptr) {
     // First release the metadata that came from the other side's arena
-    grpc_metadata_batch_destroy(exec_ctx, &s->to_read_initial_md);
-    grpc_metadata_batch_destroy(exec_ctx, &s->to_read_trailing_md);
+    grpc_metadata_batch_destroy(&s->to_read_initial_md);
+    grpc_metadata_batch_destroy(&s->to_read_trailing_md);
 
-    unref_stream(exec_ctx, s->other_side, reason);
+    unref_stream(s->other_side, reason);
     s->other_side_closed = true;
     s->other_side = nullptr;
   } else if (!s->other_side_closed) {
@@ -371,8 +365,7 @@
 // this stream_op_batch is only one of the pending operations for this
 // stream. This is called when one of the pending operations for the stream
 // is done and about to be NULLed out
-static void complete_if_batch_end_locked(grpc_exec_ctx* exec_ctx,
-                                         inproc_stream* s, grpc_error* error,
+static void complete_if_batch_end_locked(inproc_stream* s, grpc_error* error,
                                          grpc_transport_stream_op_batch* op,
                                          const char* msg) {
   int is_sm = (int)(op == s->send_message_op);
@@ -383,22 +376,20 @@
 
   if ((is_sm + is_stm + is_rim + is_rm + is_rtm) == 1) {
     INPROC_LOG(GPR_DEBUG, "%s %p %p %p", msg, s, op, error);
-    GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_REF(error));
   }
 }
 
-static void maybe_schedule_op_closure_locked(grpc_exec_ctx* exec_ctx,
-                                             inproc_stream* s,
+static void maybe_schedule_op_closure_locked(inproc_stream* s,
                                              grpc_error* error) {
   if (s && s->ops_needed && !s->op_closure_scheduled) {
-    GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_REF(error));
     s->op_closure_scheduled = true;
     s->ops_needed = false;
   }
 }
 
-static void fail_helper_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
-                               grpc_error* error) {
+static void fail_helper_locked(inproc_stream* s, grpc_error* error) {
   INPROC_LOG(GPR_DEBUG, "op_state_machine %p fail_helper", s);
   // If we're failing this side, we need to make sure that
   // we also send or have already sent trailing metadata
@@ -415,14 +406,14 @@
                                     : &other->to_read_trailing_md;
     bool* destfilled = (other == nullptr) ? &s->write_buffer_trailing_md_filled
                                           : &other->to_read_trailing_md_filled;
-    fill_in_metadata(exec_ctx, s, &fake_md, 0, dest, nullptr, destfilled);
-    grpc_metadata_batch_destroy(exec_ctx, &fake_md);
+    fill_in_metadata(s, &fake_md, 0, dest, nullptr, destfilled);
+    grpc_metadata_batch_destroy(&fake_md);
 
     if (other != nullptr) {
       if (other->cancel_other_error == GRPC_ERROR_NONE) {
         other->cancel_other_error = GRPC_ERROR_REF(error);
       }
-      maybe_schedule_op_closure_locked(exec_ctx, other, error);
+      maybe_schedule_op_closure_locked(other, error);
     } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
       s->write_buffer_cancel_error = GRPC_ERROR_REF(error);
     }
@@ -436,39 +427,44 @@
       grpc_metadata_batch_init(&fake_md);
       grpc_linked_mdelem* path_md =
           (grpc_linked_mdelem*)gpr_arena_alloc(s->arena, sizeof(*path_md));
-      path_md->md =
-          grpc_mdelem_from_slices(exec_ctx, g_fake_path_key, g_fake_path_value);
-      GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, path_md) ==
+      path_md->md = grpc_mdelem_from_slices(g_fake_path_key, g_fake_path_value);
+      GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, path_md) ==
                  GRPC_ERROR_NONE);
       grpc_linked_mdelem* auth_md =
           (grpc_linked_mdelem*)gpr_arena_alloc(s->arena, sizeof(*auth_md));
-      auth_md->md =
-          grpc_mdelem_from_slices(exec_ctx, g_fake_auth_key, g_fake_auth_value);
-      GPR_ASSERT(grpc_metadata_batch_link_tail(exec_ctx, &fake_md, auth_md) ==
+      auth_md->md = grpc_mdelem_from_slices(g_fake_auth_key, g_fake_auth_value);
+      GPR_ASSERT(grpc_metadata_batch_link_tail(&fake_md, auth_md) ==
                  GRPC_ERROR_NONE);
 
       fill_in_metadata(
-          exec_ctx, s, &fake_md, 0,
+          s, &fake_md, 0,
           s->recv_initial_md_op->payload->recv_initial_metadata
               .recv_initial_metadata,
           s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
           nullptr);
-      grpc_metadata_batch_destroy(exec_ctx, &fake_md);
+      grpc_metadata_batch_destroy(&fake_md);
       err = GRPC_ERROR_NONE;
     } else {
       err = GRPC_ERROR_REF(error);
     }
+    if (s->recv_initial_md_op->payload->recv_initial_metadata
+            .trailing_metadata_available != nullptr) {
+      // Set to true unconditionally, because we're failing the call, so even
+      // if we haven't actually seen the send_trailing_metadata op from the
+      // other side, we're going to return trailing metadata anyway.
+      *s->recv_initial_md_op->payload->recv_initial_metadata
+           .trailing_metadata_available = true;
+    }
     INPROC_LOG(GPR_DEBUG,
                "fail_helper %p scheduling initial-metadata-ready %p %p", s,
                error, err);
-    GRPC_CLOSURE_SCHED(exec_ctx,
-                       s->recv_initial_md_op->payload->recv_initial_metadata
+    GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
                            .recv_initial_metadata_ready,
                        err);
     // Last use of err so no need to REF and then UNREF it
 
     complete_if_batch_end_locked(
-        exec_ctx, s, error, s->recv_initial_md_op,
+        s, error, s->recv_initial_md_op,
         "fail_helper scheduling recv-initial-metadata-on-complete");
     s->recv_initial_md_op = nullptr;
   }
@@ -476,22 +472,22 @@
     INPROC_LOG(GPR_DEBUG, "fail_helper %p scheduling message-ready %p", s,
                error);
     GRPC_CLOSURE_SCHED(
-        exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
+        s->recv_message_op->payload->recv_message.recv_message_ready,
         GRPC_ERROR_REF(error));
     complete_if_batch_end_locked(
-        exec_ctx, s, error, s->recv_message_op,
+        s, error, s->recv_message_op,
         "fail_helper scheduling recv-message-on-complete");
     s->recv_message_op = nullptr;
   }
   if (s->send_message_op) {
     complete_if_batch_end_locked(
-        exec_ctx, s, error, s->send_message_op,
+        s, error, s->send_message_op,
         "fail_helper scheduling send-message-on-complete");
     s->send_message_op = nullptr;
   }
   if (s->send_trailing_md_op) {
     complete_if_batch_end_locked(
-        exec_ctx, s, error, s->send_trailing_md_op,
+        s, error, s->send_trailing_md_op,
         "fail_helper scheduling send-trailng-md-on-complete");
     s->send_trailing_md_op = nullptr;
   }
@@ -500,23 +496,22 @@
                "fail_helper %p scheduling trailing-md-on-complete %p", s,
                error);
     complete_if_batch_end_locked(
-        exec_ctx, s, error, s->recv_trailing_md_op,
+        s, error, s->recv_trailing_md_op,
         "fail_helper scheduling recv-trailing-metadata-on-complete");
     s->recv_trailing_md_op = nullptr;
   }
-  close_other_side_locked(exec_ctx, s, "fail_helper:other_side");
-  close_stream_locked(exec_ctx, s);
+  close_other_side_locked(s, "fail_helper:other_side");
+  close_stream_locked(s);
 
   GRPC_ERROR_UNREF(error);
 }
 
-static void message_transfer_locked(grpc_exec_ctx* exec_ctx,
-                                    inproc_stream* sender,
+static void message_transfer_locked(inproc_stream* sender,
                                     inproc_stream* receiver) {
   size_t remaining =
       sender->send_message_op->payload->send_message.send_message->length;
   if (receiver->recv_inited) {
-    grpc_slice_buffer_destroy_internal(exec_ctx, &receiver->recv_message);
+    grpc_slice_buffer_destroy_internal(&receiver->recv_message);
   }
   grpc_slice_buffer_init(&receiver->recv_message);
   receiver->recv_inited = true;
@@ -524,13 +519,13 @@
     grpc_slice message_slice;
     grpc_closure unused;
     GPR_ASSERT(grpc_byte_stream_next(
-        exec_ctx, sender->send_message_op->payload->send_message.send_message,
-        SIZE_MAX, &unused));
+        sender->send_message_op->payload->send_message.send_message, SIZE_MAX,
+        &unused));
     grpc_error* error = grpc_byte_stream_pull(
-        exec_ctx, sender->send_message_op->payload->send_message.send_message,
+        sender->send_message_op->payload->send_message.send_message,
         &message_slice);
     if (error != GRPC_ERROR_NONE) {
-      cancel_stream_locked(exec_ctx, sender, GRPC_ERROR_REF(error));
+      cancel_stream_locked(sender, GRPC_ERROR_REF(error));
       break;
     }
     GPR_ASSERT(error == GRPC_ERROR_NONE);
@@ -545,22 +540,20 @@
   INPROC_LOG(GPR_DEBUG, "message_transfer_locked %p scheduling message-ready",
              receiver);
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       receiver->recv_message_op->payload->recv_message.recv_message_ready,
       GRPC_ERROR_NONE);
   complete_if_batch_end_locked(
-      exec_ctx, sender, GRPC_ERROR_NONE, sender->send_message_op,
+      sender, GRPC_ERROR_NONE, sender->send_message_op,
       "message_transfer scheduling sender on_complete");
   complete_if_batch_end_locked(
-      exec_ctx, receiver, GRPC_ERROR_NONE, receiver->recv_message_op,
+      receiver, GRPC_ERROR_NONE, receiver->recv_message_op,
       "message_transfer scheduling receiver on_complete");
 
   receiver->recv_message_op = nullptr;
   sender->send_message_op = nullptr;
 }
 
-static void op_state_machine(grpc_exec_ctx* exec_ctx, void* arg,
-                             grpc_error* error) {
+static void op_state_machine(void* arg, grpc_error* error) {
   // This function gets called when we have contents in the unprocessed reads
   // Get what we want based on our ops wanted
   // Schedule our appropriate closures
@@ -581,26 +574,26 @@
   inproc_stream* other = s->other_side;
 
   if (s->cancel_self_error != GRPC_ERROR_NONE) {
-    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_self_error));
+    fail_helper_locked(s, GRPC_ERROR_REF(s->cancel_self_error));
     goto done;
   } else if (s->cancel_other_error != GRPC_ERROR_NONE) {
-    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(s->cancel_other_error));
+    fail_helper_locked(s, GRPC_ERROR_REF(s->cancel_other_error));
     goto done;
   } else if (error != GRPC_ERROR_NONE) {
-    fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(error));
+    fail_helper_locked(s, GRPC_ERROR_REF(error));
     goto done;
   }
 
   if (s->send_message_op && other) {
     if (other->recv_message_op) {
-      message_transfer_locked(exec_ctx, s, other);
-      maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
+      message_transfer_locked(s, other);
+      maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
     } else if (!s->t->is_client &&
                (s->trailing_md_sent || other->recv_trailing_md_op)) {
       // A server send will never be matched if the client is waiting
       // for trailing metadata already
       complete_if_batch_end_locked(
-          exec_ctx, s, GRPC_ERROR_NONE, s->send_message_op,
+          s, GRPC_ERROR_NONE, s->send_message_op,
           "op_state_machine scheduling send-message-on-complete");
       s->send_message_op = nullptr;
     }
@@ -622,11 +615,11 @@
       // The buffer is already in use; that's an error!
       INPROC_LOG(GPR_DEBUG, "Extra trailing metadata %p", s);
       new_err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Extra trailing metadata");
-      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
+      fail_helper_locked(s, GRPC_ERROR_REF(new_err));
       goto done;
     } else {
       if (!other || !other->closed) {
-        fill_in_metadata(exec_ctx, s,
+        fill_in_metadata(s,
                          s->send_trailing_md_op->payload->send_trailing_metadata
                              .send_trailing_metadata,
                          0, dest, nullptr, destfilled);
@@ -635,15 +628,15 @@
       if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
         INPROC_LOG(GPR_DEBUG,
                    "op_state_machine %p scheduling trailing-md-on-complete", s);
-        GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
+        GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
                            GRPC_ERROR_NONE);
         s->recv_trailing_md_op = nullptr;
         needs_close = true;
       }
     }
-    maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
+    maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
     complete_if_batch_end_locked(
-        exec_ctx, s, GRPC_ERROR_NONE, s->send_trailing_md_op,
+        s, GRPC_ERROR_NONE, s->send_trailing_md_op,
         "op_state_machine scheduling send-trailing-metadata-on-complete");
     s->send_trailing_md_op = nullptr;
   }
@@ -656,31 +649,36 @@
           "op_state_machine %p scheduling on_complete errors for already "
           "recvd initial md %p",
           s, new_err);
-      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
+      fail_helper_locked(s, GRPC_ERROR_REF(new_err));
       goto done;
     }
 
     if (s->to_read_initial_md_filled) {
       s->initial_md_recvd = true;
       new_err = fill_in_metadata(
-          exec_ctx, s, &s->to_read_initial_md, s->to_read_initial_md_flags,
+          s, &s->to_read_initial_md, s->to_read_initial_md_flags,
           s->recv_initial_md_op->payload->recv_initial_metadata
               .recv_initial_metadata,
           s->recv_initial_md_op->payload->recv_initial_metadata.recv_flags,
           nullptr);
       s->recv_initial_md_op->payload->recv_initial_metadata
           .recv_initial_metadata->deadline = s->deadline;
-      grpc_metadata_batch_clear(exec_ctx, &s->to_read_initial_md);
+      if (s->recv_initial_md_op->payload->recv_initial_metadata
+              .trailing_metadata_available != nullptr) {
+        *s->recv_initial_md_op->payload->recv_initial_metadata
+             .trailing_metadata_available =
+            (other != nullptr && other->send_trailing_md_op != nullptr);
+      }
+      grpc_metadata_batch_clear(&s->to_read_initial_md);
       s->to_read_initial_md_filled = false;
       INPROC_LOG(GPR_DEBUG,
                  "op_state_machine %p scheduling initial-metadata-ready %p", s,
                  new_err);
-      GRPC_CLOSURE_SCHED(exec_ctx,
-                         s->recv_initial_md_op->payload->recv_initial_metadata
+      GRPC_CLOSURE_SCHED(s->recv_initial_md_op->payload->recv_initial_metadata
                              .recv_initial_metadata_ready,
                          GRPC_ERROR_REF(new_err));
       complete_if_batch_end_locked(
-          exec_ctx, s, new_err, s->recv_initial_md_op,
+          s, new_err, s->recv_initial_md_op,
           "op_state_machine scheduling recv-initial-metadata-on-complete");
       s->recv_initial_md_op = nullptr;
 
@@ -688,20 +686,20 @@
         INPROC_LOG(GPR_DEBUG,
                    "op_state_machine %p scheduling on_complete errors2 %p", s,
                    new_err);
-        fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
+        fail_helper_locked(s, GRPC_ERROR_REF(new_err));
         goto done;
       }
     }
   }
   if (s->recv_message_op) {
     if (other && other->send_message_op) {
-      message_transfer_locked(exec_ctx, other, s);
-      maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
+      message_transfer_locked(other, s);
+      maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
     }
   }
   if (s->recv_trailing_md_op && s->t->is_client && other &&
       other->send_message_op) {
-    maybe_schedule_op_closure_locked(exec_ctx, other, GRPC_ERROR_NONE);
+    maybe_schedule_op_closure_locked(other, GRPC_ERROR_NONE);
   }
   if (s->to_read_trailing_md_filled) {
     if (s->trailing_md_recvd) {
@@ -712,7 +710,7 @@
           "op_state_machine %p scheduling on_complete errors for already "
           "recvd trailing md %p",
           s, new_err);
-      fail_helper_locked(exec_ctx, s, GRPC_ERROR_REF(new_err));
+      fail_helper_locked(s, GRPC_ERROR_REF(new_err));
       goto done;
     }
     if (s->recv_message_op != nullptr) {
@@ -720,11 +718,10 @@
       // satisfied
       INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
       GRPC_CLOSURE_SCHED(
-          exec_ctx,
           s->recv_message_op->payload->recv_message.recv_message_ready,
           GRPC_ERROR_NONE);
       complete_if_batch_end_locked(
-          exec_ctx, s, new_err, s->recv_message_op,
+          s, new_err, s->recv_message_op,
           "op_state_machine scheduling recv-message-on-complete");
       s->recv_message_op = nullptr;
     }
@@ -732,7 +729,7 @@
       // Nothing further will try to receive from this stream, so finish off
       // any outstanding send_message op
       complete_if_batch_end_locked(
-          exec_ctx, s, new_err, s->send_message_op,
+          s, new_err, s->send_message_op,
           "op_state_machine scheduling send-message-on-complete");
       s->send_message_op = nullptr;
     }
@@ -740,11 +737,11 @@
       // We wanted trailing metadata and we got it
       s->trailing_md_recvd = true;
       new_err =
-          fill_in_metadata(exec_ctx, s, &s->to_read_trailing_md, 0,
+          fill_in_metadata(s, &s->to_read_trailing_md, 0,
                            s->recv_trailing_md_op->payload
                                ->recv_trailing_metadata.recv_trailing_metadata,
                            nullptr, nullptr);
-      grpc_metadata_batch_clear(exec_ctx, &s->to_read_trailing_md);
+      grpc_metadata_batch_clear(&s->to_read_trailing_md);
       s->to_read_trailing_md_filled = false;
 
       // We should schedule the recv_trailing_md_op completion if
@@ -756,7 +753,7 @@
         INPROC_LOG(GPR_DEBUG,
                    "op_state_machine %p scheduling trailing-md-on-complete %p",
                    s, new_err);
-        GRPC_CLOSURE_SCHED(exec_ctx, s->recv_trailing_md_op->on_complete,
+        GRPC_CLOSURE_SCHED(s->recv_trailing_md_op->on_complete,
                            GRPC_ERROR_REF(new_err));
         s->recv_trailing_md_op = nullptr;
         needs_close = true;
@@ -777,10 +774,10 @@
     // recv_message_op
     INPROC_LOG(GPR_DEBUG, "op_state_machine %p scheduling message-ready", s);
     GRPC_CLOSURE_SCHED(
-        exec_ctx, s->recv_message_op->payload->recv_message.recv_message_ready,
+        s->recv_message_op->payload->recv_message.recv_message_ready,
         GRPC_ERROR_NONE);
     complete_if_batch_end_locked(
-        exec_ctx, s, new_err, s->recv_message_op,
+        s, new_err, s->recv_message_op,
         "op_state_machine scheduling recv-message-on-complete");
     s->recv_message_op = nullptr;
   }
@@ -789,7 +786,7 @@
     // Nothing further will try to receive from this stream, so finish off
     // any outstanding send_message op
     complete_if_batch_end_locked(
-        exec_ctx, s, new_err, s->send_message_op,
+        s, new_err, s->send_message_op,
         "op_state_machine scheduling send-message-on-complete");
     s->send_message_op = nullptr;
   }
@@ -805,22 +802,21 @@
   }
 done:
   if (needs_close) {
-    close_other_side_locked(exec_ctx, s, "op_state_machine");
-    close_stream_locked(exec_ctx, s);
+    close_other_side_locked(s, "op_state_machine");
+    close_stream_locked(s);
   }
   gpr_mu_unlock(mu);
   GRPC_ERROR_UNREF(new_err);
 }
 
-static bool cancel_stream_locked(grpc_exec_ctx* exec_ctx, inproc_stream* s,
-                                 grpc_error* error) {
+static bool cancel_stream_locked(inproc_stream* s, grpc_error* error) {
   bool ret = false;  // was the cancel accepted
   INPROC_LOG(GPR_DEBUG, "cancel_stream %p with %s", s,
              grpc_error_string(error));
   if (s->cancel_self_error == GRPC_ERROR_NONE) {
     ret = true;
     s->cancel_self_error = GRPC_ERROR_REF(error);
-    maybe_schedule_op_closure_locked(exec_ctx, s, s->cancel_self_error);
+    maybe_schedule_op_closure_locked(s, s->cancel_self_error);
     // Send trailing md to the other side indicating cancellation, even if we
     // already have
     s->trailing_md_sent = true;
@@ -834,15 +830,14 @@
                                     : &other->to_read_trailing_md;
     bool* destfilled = (other == nullptr) ? &s->write_buffer_trailing_md_filled
                                           : &other->to_read_trailing_md_filled;
-    fill_in_metadata(exec_ctx, s, &cancel_md, 0, dest, nullptr, destfilled);
-    grpc_metadata_batch_destroy(exec_ctx, &cancel_md);
+    fill_in_metadata(s, &cancel_md, 0, dest, nullptr, destfilled);
+    grpc_metadata_batch_destroy(&cancel_md);
 
     if (other != nullptr) {
       if (other->cancel_other_error == GRPC_ERROR_NONE) {
         other->cancel_other_error = GRPC_ERROR_REF(s->cancel_self_error);
       }
-      maybe_schedule_op_closure_locked(exec_ctx, other,
-                                       other->cancel_other_error);
+      maybe_schedule_op_closure_locked(other, other->cancel_other_error);
     } else if (s->write_buffer_cancel_error == GRPC_ERROR_NONE) {
       s->write_buffer_cancel_error = GRPC_ERROR_REF(s->cancel_self_error);
     }
@@ -852,21 +847,20 @@
     // md, now's the chance
     if (!s->t->is_client && s->trailing_md_recvd && s->recv_trailing_md_op) {
       complete_if_batch_end_locked(
-          exec_ctx, s, s->cancel_self_error, s->recv_trailing_md_op,
+          s, s->cancel_self_error, s->recv_trailing_md_op,
           "cancel_stream scheduling trailing-md-on-complete");
       s->recv_trailing_md_op = nullptr;
     }
   }
 
-  close_other_side_locked(exec_ctx, s, "cancel_stream:other_side");
-  close_stream_locked(exec_ctx, s);
+  close_other_side_locked(s, "cancel_stream:other_side");
+  close_stream_locked(s);
 
   GRPC_ERROR_UNREF(error);
   return ret;
 }
 
-static void perform_stream_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                              grpc_stream* gs,
+static void perform_stream_op(grpc_transport* gt, grpc_stream* gs,
                               grpc_transport_stream_op_batch* op) {
   INPROC_LOG(GPR_DEBUG, "perform_stream_op %p %p %p", gt, gs, op);
   inproc_stream* s = (inproc_stream*)gs;
@@ -892,7 +886,7 @@
   if (op->cancel_stream) {
     // Call cancel_stream_locked without ref'ing the cancel_error because
     // this function is responsible to make sure that that field gets unref'ed
-    cancel_stream_locked(exec_ctx, s, op->payload->cancel_stream.cancel_error);
+    cancel_stream_locked(s, op->payload->cancel_stream.cancel_error);
     // this op can complete without an error
   } else if (s->cancel_self_error != GRPC_ERROR_NONE) {
     // already self-canceled so still give it an error
@@ -932,8 +926,7 @@
       } else {
         if (!other || !other->closed) {
           fill_in_metadata(
-              exec_ctx, s,
-              op->payload->send_initial_metadata.send_initial_metadata,
+              s, op->payload->send_initial_metadata.send_initial_metadata,
               op->payload->send_initial_metadata.send_initial_metadata_flags,
               dest, destflags, destfilled);
         }
@@ -945,7 +938,7 @@
           s->initial_md_sent = true;
         }
       }
-      maybe_schedule_op_closure_locked(exec_ctx, other, error);
+      maybe_schedule_op_closure_locked(other, error);
     }
   }
 
@@ -985,7 +978,7 @@
         (op->recv_message && other && (other->send_message_op != nullptr)) ||
         (s->to_read_trailing_md_filled || s->trailing_md_recvd)) {
       if (!s->op_closure_scheduled) {
-        GRPC_CLOSURE_SCHED(exec_ctx, &s->op_closure, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(&s->op_closure, GRPC_ERROR_NONE);
         s->op_closure_scheduled = true;
       }
     } else {
@@ -995,12 +988,20 @@
     if (error != GRPC_ERROR_NONE) {
       // Schedule op's closures that we didn't push to op state machine
       if (op->recv_initial_metadata) {
+        if (op->payload->recv_initial_metadata.trailing_metadata_available !=
+            nullptr) {
+          // Set to true unconditionally, because we're failing the call, so
+          // even if we haven't actually seen the send_trailing_metadata op
+          // from the other side, we're going to return trailing metadata
+          // anyway.
+          *op->payload->recv_initial_metadata.trailing_metadata_available =
+              true;
+        }
         INPROC_LOG(
             GPR_DEBUG,
             "perform_stream_op error %p scheduling initial-metadata-ready %p",
             s, error);
         GRPC_CLOSURE_SCHED(
-            exec_ctx,
             op->payload->recv_initial_metadata.recv_initial_metadata_ready,
             GRPC_ERROR_REF(error));
       }
@@ -1009,28 +1010,26 @@
             GPR_DEBUG,
             "perform_stream_op error %p scheduling recv message-ready %p", s,
             error);
-        GRPC_CLOSURE_SCHED(exec_ctx,
-                           op->payload->recv_message.recv_message_ready,
+        GRPC_CLOSURE_SCHED(op->payload->recv_message.recv_message_ready,
                            GRPC_ERROR_REF(error));
       }
     }
     INPROC_LOG(GPR_DEBUG, "perform_stream_op %p scheduling on_complete %p", s,
                error);
-    GRPC_CLOSURE_SCHED(exec_ctx, on_complete, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(on_complete, GRPC_ERROR_REF(error));
   }
   if (needs_close) {
-    close_other_side_locked(exec_ctx, s, "perform_stream_op:other_side");
-    close_stream_locked(exec_ctx, s);
+    close_other_side_locked(s, "perform_stream_op:other_side");
+    close_stream_locked(s);
   }
   gpr_mu_unlock(mu);
   GRPC_ERROR_UNREF(error);
 }
 
-static void close_transport_locked(grpc_exec_ctx* exec_ctx,
-                                   inproc_transport* t) {
+static void close_transport_locked(inproc_transport* t) {
   INPROC_LOG(GPR_DEBUG, "close_transport %p %d", t, t->is_closed);
   grpc_connectivity_state_set(
-      exec_ctx, &t->connectivity, GRPC_CHANNEL_SHUTDOWN,
+      &t->connectivity, GRPC_CHANNEL_SHUTDOWN,
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Closing transport."),
       "close transport");
   if (!t->is_closed) {
@@ -1039,7 +1038,7 @@
     while (t->stream_list != nullptr) {
       // cancel_stream_locked also adjusts stream list
       cancel_stream_locked(
-          exec_ctx, t->stream_list,
+          t->stream_list,
           grpc_error_set_int(
               GRPC_ERROR_CREATE_FROM_STATIC_STRING("Transport closed"),
               GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE));
@@ -1047,14 +1046,13 @@
   }
 }
 
-static void perform_transport_op(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                                 grpc_transport_op* op) {
+static void perform_transport_op(grpc_transport* gt, grpc_transport_op* op) {
   inproc_transport* t = (inproc_transport*)gt;
   INPROC_LOG(GPR_DEBUG, "perform_transport_op %p %p", t, op);
   gpr_mu_lock(&t->mu->mu);
   if (op->on_connectivity_state_change) {
     grpc_connectivity_state_notify_on_state_change(
-        exec_ctx, &t->connectivity, op->connectivity_state,
+        &t->connectivity, op->connectivity_state,
         op->on_connectivity_state_change);
   }
   if (op->set_accept_stream) {
@@ -1062,7 +1060,7 @@
     t->accept_stream_data = op->set_accept_stream_user_data;
   }
   if (op->on_consumed) {
-    GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
   }
 
   bool do_close = false;
@@ -1076,71 +1074,67 @@
   }
 
   if (do_close) {
-    close_transport_locked(exec_ctx, t);
+    close_transport_locked(t);
   }
   gpr_mu_unlock(&t->mu->mu);
 }
 
-static void destroy_stream(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                           grpc_stream* gs,
+static void destroy_stream(grpc_transport* gt, grpc_stream* gs,
                            grpc_closure* then_schedule_closure) {
   INPROC_LOG(GPR_DEBUG, "destroy_stream %p %p", gs, then_schedule_closure);
   inproc_stream* s = (inproc_stream*)gs;
   s->closure_at_destroy = then_schedule_closure;
-  really_destroy_stream(exec_ctx, s);
+  really_destroy_stream(s);
 }
 
-static void destroy_transport(grpc_exec_ctx* exec_ctx, grpc_transport* gt) {
+static void destroy_transport(grpc_transport* gt) {
   inproc_transport* t = (inproc_transport*)gt;
   INPROC_LOG(GPR_DEBUG, "destroy_transport %p", t);
   gpr_mu_lock(&t->mu->mu);
-  close_transport_locked(exec_ctx, t);
+  close_transport_locked(t);
   gpr_mu_unlock(&t->mu->mu);
-  unref_transport(exec_ctx, t->other_side);
-  unref_transport(exec_ctx, t);
+  unref_transport(t->other_side);
+  unref_transport(t);
 }
 
 /*******************************************************************************
  * INTEGRATION GLUE
  */
 
-static void set_pollset(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                        grpc_stream* gs, grpc_pollset* pollset) {
+static void set_pollset(grpc_transport* gt, grpc_stream* gs,
+                        grpc_pollset* pollset) {
   // Nothing to do here
 }
 
-static void set_pollset_set(grpc_exec_ctx* exec_ctx, grpc_transport* gt,
-                            grpc_stream* gs, grpc_pollset_set* pollset_set) {
+static void set_pollset_set(grpc_transport* gt, grpc_stream* gs,
+                            grpc_pollset_set* pollset_set) {
   // Nothing to do here
 }
 
-static grpc_endpoint* get_endpoint(grpc_exec_ctx* exec_ctx, grpc_transport* t) {
-  return nullptr;
-}
+static grpc_endpoint* get_endpoint(grpc_transport* t) { return nullptr; }
 
 /*******************************************************************************
  * GLOBAL INIT AND DESTROY
  */
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void do_nothing(void* arg, grpc_error* error) {}
 
 void grpc_inproc_transport_init(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_CLOSURE_INIT(&do_nothing_closure, do_nothing, nullptr,
                     grpc_schedule_on_exec_ctx);
   g_empty_slice = grpc_slice_from_static_buffer(nullptr, 0);
 
   grpc_slice key_tmp = grpc_slice_from_static_string(":path");
   g_fake_path_key = grpc_slice_intern(key_tmp);
-  grpc_slice_unref_internal(&exec_ctx, key_tmp);
+  grpc_slice_unref_internal(key_tmp);
 
   g_fake_path_value = grpc_slice_from_static_string("/");
 
   grpc_slice auth_tmp = grpc_slice_from_static_string(":authority");
   g_fake_auth_key = grpc_slice_intern(auth_tmp);
-  grpc_slice_unref_internal(&exec_ctx, auth_tmp);
+  grpc_slice_unref_internal(auth_tmp);
 
   g_fake_auth_value = grpc_slice_from_static_string("inproc-fail");
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static const grpc_transport_vtable inproc_vtable = {
@@ -1152,8 +1146,7 @@
 /*******************************************************************************
  * Main inproc transport functions
  */
-static void inproc_transports_create(grpc_exec_ctx* exec_ctx,
-                                     grpc_transport** server_transport,
+static void inproc_transports_create(grpc_transport** server_transport,
                                      const grpc_channel_args* server_args,
                                      grpc_transport** client_transport,
                                      const grpc_channel_args* client_args) {
@@ -1190,7 +1183,7 @@
   GRPC_API_TRACE("grpc_inproc_channel_create(server=%p, args=%p)", 2,
                  (server, args));
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   const grpc_channel_args* server_args = grpc_server_get_channel_args(server);
 
@@ -1205,30 +1198,26 @@
 
   grpc_transport* server_transport;
   grpc_transport* client_transport;
-  inproc_transports_create(&exec_ctx, &server_transport, server_args,
-                           &client_transport, client_args);
+  inproc_transports_create(&server_transport, server_args, &client_transport,
+                           client_args);
 
-  grpc_server_setup_transport(&exec_ctx, server, server_transport, nullptr,
-                              server_args);
-  grpc_channel* channel =
-      grpc_channel_create(&exec_ctx, "inproc", client_args,
-                          GRPC_CLIENT_DIRECT_CHANNEL, client_transport);
+  grpc_server_setup_transport(server, server_transport, nullptr, server_args);
+  grpc_channel* channel = grpc_channel_create(
+      "inproc", client_args, GRPC_CLIENT_DIRECT_CHANNEL, client_transport);
 
   // Free up created channel args
-  grpc_channel_args_destroy(&exec_ctx, client_args);
+  grpc_channel_args_destroy(client_args);
 
   // Now finish scheduled operations
-  grpc_exec_ctx_finish(&exec_ctx);
 
   return channel;
 }
 
 void grpc_inproc_transport_shutdown(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_unref_internal(&exec_ctx, g_empty_slice);
-  grpc_slice_unref_internal(&exec_ctx, g_fake_path_key);
-  grpc_slice_unref_internal(&exec_ctx, g_fake_path_value);
-  grpc_slice_unref_internal(&exec_ctx, g_fake_auth_key);
-  grpc_slice_unref_internal(&exec_ctx, g_fake_auth_value);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_unref_internal(g_empty_slice);
+  grpc_slice_unref_internal(g_fake_path_key);
+  grpc_slice_unref_internal(g_fake_path_value);
+  grpc_slice_unref_internal(g_fake_auth_key);
+  grpc_slice_unref_internal(g_fake_auth_value);
 }
diff --git a/src/core/ext/transport/inproc/inproc_transport.h b/src/core/ext/transport/inproc/inproc_transport.h
index f27789a..7c0453e 100644
--- a/src/core/ext/transport/inproc/inproc_transport.h
+++ b/src/core/ext/transport/inproc/inproc_transport.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/transport/transport_impl.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_channel* grpc_inproc_channel_create(grpc_server* server,
                                          grpc_channel_args* args,
                                          void* reserved);
@@ -34,8 +30,4 @@
 void grpc_inproc_transport_init(void);
 void grpc_inproc_transport_shutdown(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_EXT_TRANSPORT_INPROC_INPROC_TRANSPORT_H */
diff --git a/src/core/lib/backoff/backoff.cc b/src/core/lib/backoff/backoff.cc
index dc754dd..41f625a 100644
--- a/src/core/lib/backoff/backoff.cc
+++ b/src/core/lib/backoff/backoff.cc
@@ -18,63 +18,53 @@
 
 #include "src/core/lib/backoff/backoff.h"
 
+#include <algorithm>
+
 #include <grpc/support/useful.h>
 
-void grpc_backoff_init(grpc_backoff* backoff, grpc_millis initial_backoff,
-                       double multiplier, double jitter,
-                       grpc_millis min_connect_timeout,
-                       grpc_millis max_backoff) {
-  backoff->initial_backoff = initial_backoff;
-  backoff->multiplier = multiplier;
-  backoff->jitter = jitter;
-  backoff->min_connect_timeout = min_connect_timeout;
-  backoff->max_backoff = max_backoff;
-  backoff->rng_state = (uint32_t)gpr_now(GPR_CLOCK_REALTIME).tv_nsec;
+namespace grpc_core {
+
+namespace {
+
+/* Generate a random number between 0 and 1. We roll our own RNG because seeding
+ * rand() modifies a global variable we have no control over. */
+double generate_uniform_random_number(uint32_t* rng_state) {
+  constexpr uint32_t two_raise_31 = uint32_t(1) << 31;
+  *rng_state = (1103515245 * *rng_state + 12345) % two_raise_31;
+  return *rng_state / static_cast<double>(two_raise_31);
 }
 
-grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx* exec_ctx,
-                                       grpc_backoff* backoff) {
-  backoff->current_backoff = backoff->initial_backoff;
-  const grpc_millis initial_timeout =
-      GPR_MAX(backoff->initial_backoff, backoff->min_connect_timeout);
-  const grpc_millis now = grpc_exec_ctx_now(exec_ctx);
-  const grpc_backoff_result result = {now + initial_timeout,
-                                      now + backoff->current_backoff};
-  return result;
-}
-
-/* Generate a random number between 0 and 1. */
-static double generate_uniform_random_number(uint32_t* rng_state) {
-  *rng_state = (1103515245 * *rng_state + 12345) % ((uint32_t)1 << 31);
-  return *rng_state / (double)((uint32_t)1 << 31);
-}
-
-static double generate_uniform_random_number_between(uint32_t* rng_state,
-                                                     double a, double b) {
+double generate_uniform_random_number_between(uint32_t* rng_state, double a,
+                                              double b) {
   if (a == b) return a;
   if (a > b) GPR_SWAP(double, a, b);  // make sure a < b
   const double range = b - a;
   return a + generate_uniform_random_number(rng_state) * range;
 }
+}  // namespace
 
-grpc_backoff_result grpc_backoff_step(grpc_exec_ctx* exec_ctx,
-                                      grpc_backoff* backoff) {
-  backoff->current_backoff = (grpc_millis)(GPR_MIN(
-      backoff->current_backoff * backoff->multiplier, backoff->max_backoff));
+BackOff::BackOff(const Options& options) : options_(options) {
+  rng_state_ = static_cast<uint32_t>(gpr_now(GPR_CLOCK_REALTIME).tv_nsec);
+}
+
+grpc_millis BackOff::Begin() {
+  current_backoff_ = options_.initial_backoff();
+  return current_backoff_ + grpc_core::ExecCtx::Get()->Now();
+}
+
+grpc_millis BackOff::Step() {
+  current_backoff_ =
+      (grpc_millis)(std::min(current_backoff_ * options_.multiplier(),
+                             (double)options_.max_backoff()));
   const double jitter = generate_uniform_random_number_between(
-      &backoff->rng_state, -backoff->jitter * backoff->current_backoff,
-      backoff->jitter * backoff->current_backoff);
-  const grpc_millis current_timeout =
-      GPR_MAX((grpc_millis)(backoff->current_backoff + jitter),
-              backoff->min_connect_timeout);
-  const grpc_millis next_timeout = GPR_MIN(
-      (grpc_millis)(backoff->current_backoff + jitter), backoff->max_backoff);
-  const grpc_millis now = grpc_exec_ctx_now(exec_ctx);
-  const grpc_backoff_result result = {now + current_timeout,
-                                      now + next_timeout};
-  return result;
+      &rng_state_, -options_.jitter() * current_backoff_,
+      options_.jitter() * current_backoff_);
+  const grpc_millis next_timeout = (grpc_millis)(current_backoff_ + jitter);
+  return next_timeout + grpc_core::ExecCtx::Get()->Now();
 }
 
-void grpc_backoff_reset(grpc_backoff* backoff) {
-  backoff->current_backoff = backoff->initial_backoff;
-}
+void BackOff::Reset() { current_backoff_ = options_.initial_backoff(); }
+
+void BackOff::SetRandomSeed(uint32_t seed) { rng_state_ = seed; }
+
+}  // namespace grpc_core
diff --git a/src/core/lib/backoff/backoff.h b/src/core/lib/backoff/backoff.h
index 1067281..84ef9b8 100644
--- a/src/core/lib/backoff/backoff.h
+++ b/src/core/lib/backoff/backoff.h
@@ -21,63 +21,69 @@
 
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+namespace grpc_core {
 
-typedef struct {
-  /// const:  how long to wait after the first failure before retrying
-  grpc_millis initial_backoff;
+/// Implementation of the backoff mechanism described in
+/// doc/connection-backoff.md
+class BackOff {
+ public:
+  class Options;
 
-  /// const: factor with which to multiply backoff after a failed retry
-  double multiplier;
+  /// Initialize backoff machinery - does not need to be destroyed
+  explicit BackOff(const Options& options);
 
-  /// const: amount to randomize backoffs
-  double jitter;
+  /// Begin retry loop: returns the deadline to be used for the next attempt,
+  /// following the backoff strategy.
+  grpc_millis Begin();
+  /// Step a retry loop: returns the deadline to be used for the next attempt,
+  /// following the backoff strategy.
+  grpc_millis Step();
+  /// Reset the backoff, so the next grpc_backoff_step will be a
+  /// grpc_backoff_begin.
+  void Reset();
 
-  /// const: minimum time between retries
-  grpc_millis min_connect_timeout;
+  void SetRandomSeed(unsigned int seed);
 
-  /// const: maximum time between retries
-  grpc_millis max_backoff;
+  class Options {
+   public:
+    Options& set_initial_backoff(grpc_millis initial_backoff) {
+      initial_backoff_ = initial_backoff;
+      return *this;
+    }
+    Options& set_multiplier(double multiplier) {
+      multiplier_ = multiplier;
+      return *this;
+    }
+    Options& set_jitter(double jitter) {
+      jitter_ = jitter;
+      return *this;
+    }
+    Options& set_max_backoff(grpc_millis max_backoff) {
+      max_backoff_ = max_backoff;
+      return *this;
+    }
+    /// how long to wait after the first failure before retrying
+    grpc_millis initial_backoff() const { return initial_backoff_; }
+    /// factor with which to multiply backoff after a failed retry
+    double multiplier() const { return multiplier_; }
+    /// amount to randomize backoffs
+    double jitter() const { return jitter_; }
+    /// maximum time between retries
+    grpc_millis max_backoff() const { return max_backoff_; }
 
+   private:
+    grpc_millis initial_backoff_;
+    double multiplier_;
+    double jitter_;
+    grpc_millis max_backoff_;
+  };  // class Options
+
+ private:
+  const Options options_;
   /// current delay before retries
-  grpc_millis current_backoff;
+  grpc_millis current_backoff_;
+  uint32_t rng_state_;
+};
 
-  /// random number generator
-  uint32_t rng_state;
-} grpc_backoff;
-
-typedef struct {
-  /// Deadline to be used for the current attempt.
-  grpc_millis current_deadline;
-
-  /// Deadline to be used for the next attempt, following the backoff strategy.
-  grpc_millis next_attempt_start_time;
-} grpc_backoff_result;
-
-/// Initialize backoff machinery - does not need to be destroyed
-void grpc_backoff_init(grpc_backoff* backoff, grpc_millis initial_backoff,
-                       double multiplier, double jitter,
-                       grpc_millis min_connect_timeout,
-                       grpc_millis max_backoff);
-
-/// Begin retry loop: returns the deadlines to be used for the current attempt
-/// and the subsequent retry, if any.
-grpc_backoff_result grpc_backoff_begin(grpc_exec_ctx* exec_ctx,
-                                       grpc_backoff* backoff);
-
-/// Step a retry loop: returns the deadlines to be used for the current attempt
-/// and the subsequent retry, if any.
-grpc_backoff_result grpc_backoff_step(grpc_exec_ctx* exec_ctx,
-                                      grpc_backoff* backoff);
-
-/// Reset the backoff, so the next grpc_backoff_step will be a
-/// grpc_backoff_begin.
-void grpc_backoff_reset(grpc_backoff* backoff);
-
-#ifdef __cplusplus
-}
-#endif
-
+}  // namespace grpc_core
 #endif /* GRPC_CORE_LIB_BACKOFF_BACKOFF_H */
diff --git a/src/core/lib/channel/channel_args.cc b/src/core/lib/channel/channel_args.cc
index 735fcbe..578475b 100644
--- a/src/core/lib/channel/channel_args.cc
+++ b/src/core/lib/channel/channel_args.cc
@@ -188,7 +188,7 @@
   return b;
 }
 
-void grpc_channel_args_destroy(grpc_exec_ctx* exec_ctx, grpc_channel_args* a) {
+void grpc_channel_args_destroy(grpc_channel_args* a) {
   size_t i;
   if (!a) return;
   for (i = 0; i < a->num_args; i++) {
@@ -199,8 +199,7 @@
       case GRPC_ARG_INTEGER:
         break;
       case GRPC_ARG_POINTER:
-        a->args[i].value.pointer.vtable->destroy(exec_ctx,
-                                                 a->args[i].value.pointer.p);
+        a->args[i].value.pointer.vtable->destroy(a->args[i].value.pointer.p);
         break;
     }
     gpr_free(a->args[i].key);
@@ -299,8 +298,7 @@
 }
 
 grpc_channel_args* grpc_channel_args_compression_algorithm_set_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
-    grpc_compression_algorithm algorithm, int state) {
+    grpc_channel_args** a, grpc_compression_algorithm algorithm, int state) {
   int* states_arg = nullptr;
   grpc_channel_args* result = *a;
   const int states_arg_found =
@@ -333,15 +331,15 @@
       GPR_BITCLEAR((unsigned*)&tmp.value.integer, algorithm);
     }
     result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
-    grpc_channel_args_destroy(exec_ctx, *a);
+    grpc_channel_args_destroy(*a);
     *a = result;
   }
   return result;
 }
 
 grpc_channel_args* grpc_channel_args_stream_compression_algorithm_set_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
-    grpc_stream_compression_algorithm algorithm, int state) {
+    grpc_channel_args** a, grpc_stream_compression_algorithm algorithm,
+    int state) {
   int* states_arg = nullptr;
   grpc_channel_args* result = *a;
   const int states_arg_found =
@@ -375,7 +373,7 @@
       GPR_BITCLEAR((unsigned*)&tmp.value.integer, algorithm);
     }
     result = grpc_channel_args_copy_and_add(*a, &tmp, 1);
-    grpc_channel_args_destroy(exec_ctx, *a);
+    grpc_channel_args_destroy(*a);
     *a = result;
   }
   return result;
diff --git a/src/core/lib/channel/channel_args.h b/src/core/lib/channel/channel_args.h
index d36761d..9c7d06f 100644
--- a/src/core/lib/channel/channel_args.h
+++ b/src/core/lib/channel/channel_args.h
@@ -23,10 +23,6 @@
 #include <grpc/grpc.h>
 #include "src/core/lib/iomgr/socket_mutator.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // Channel args are intentionally immutable, to avoid the need for locking.
 
 /** Copy the arguments in \a src into a new instance */
@@ -57,7 +53,7 @@
                                            const grpc_channel_args* b);
 
 /** Destroy arguments created by \a grpc_channel_args_copy */
-void grpc_channel_args_destroy(grpc_exec_ctx* exec_ctx, grpc_channel_args* a);
+void grpc_channel_args_destroy(grpc_channel_args* a);
 
 /** Returns the compression algorithm set in \a a. */
 grpc_compression_algorithm grpc_channel_args_get_compression_algorithm(
@@ -89,8 +85,7 @@
  * modified to point to the returned instance (which may be different from the
  * input value of \a a). */
 grpc_channel_args* grpc_channel_args_compression_algorithm_set_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
-    grpc_compression_algorithm algorithm, int enabled);
+    grpc_channel_args** a, grpc_compression_algorithm algorithm, int enabled);
 
 /** Sets the support for the given stream compression algorithm. By default, all
  * stream compression algorithms are enabled. It's an error to disable an
@@ -100,8 +95,8 @@
  * modified to point to the returned instance (which may be different from the
  * input value of \a a). */
 grpc_channel_args* grpc_channel_args_stream_compression_algorithm_set_state(
-    grpc_exec_ctx* exec_ctx, grpc_channel_args** a,
-    grpc_stream_compression_algorithm algorithm, int enabled);
+    grpc_channel_args** a, grpc_stream_compression_algorithm algorithm,
+    int enabled);
 
 /** Returns the bitset representing the support state (true for enabled, false
  * for disabled) for compression algorithms.
@@ -153,8 +148,4 @@
 grpc_arg grpc_channel_arg_pointer_create(char* name, void* value,
                                          const grpc_arg_pointer_vtable* vtable);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_ARGS_H */
diff --git a/src/core/lib/channel/channel_stack.cc b/src/core/lib/channel/channel_stack.cc
index 7629d18..195fe0b 100644
--- a/src/core/lib/channel/channel_stack.cc
+++ b/src/core/lib/channel/channel_stack.cc
@@ -88,8 +88,8 @@
 }
 
 grpc_error* grpc_channel_stack_init(
-    grpc_exec_ctx* exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
-    void* destroy_arg, const grpc_channel_filter** filters, size_t filter_count,
+    int initial_refs, grpc_iomgr_cb_func destroy, void* destroy_arg,
+    const grpc_channel_filter** filters, size_t filter_count,
     const grpc_channel_args* channel_args, grpc_transport* optional_transport,
     const char* name, grpc_channel_stack* stack) {
   size_t call_size =
@@ -117,8 +117,7 @@
     args.is_last = i == (filter_count - 1);
     elems[i].filter = filters[i];
     elems[i].channel_data = user_data;
-    grpc_error* error =
-        elems[i].filter->init_channel_elem(exec_ctx, &elems[i], &args);
+    grpc_error* error = elems[i].filter->init_channel_elem(&elems[i], &args);
     if (error != GRPC_ERROR_NONE) {
       if (first_error == GRPC_ERROR_NONE) {
         first_error = error;
@@ -138,20 +137,18 @@
   return first_error;
 }
 
-void grpc_channel_stack_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_stack* stack) {
+void grpc_channel_stack_destroy(grpc_channel_stack* stack) {
   grpc_channel_element* channel_elems = CHANNEL_ELEMS_FROM_STACK(stack);
   size_t count = stack->count;
   size_t i;
 
   /* destroy per-filter data */
   for (i = 0; i < count; i++) {
-    channel_elems[i].filter->destroy_channel_elem(exec_ctx, &channel_elems[i]);
+    channel_elems[i].filter->destroy_channel_elem(&channel_elems[i]);
   }
 }
 
-grpc_error* grpc_call_stack_init(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_stack* channel_stack,
+grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
                                  int initial_refs, grpc_iomgr_cb_func destroy,
                                  void* destroy_arg,
                                  const grpc_call_element_args* elem_args) {
@@ -174,8 +171,8 @@
     call_elems[i].filter = channel_elems[i].filter;
     call_elems[i].channel_data = channel_elems[i].channel_data;
     call_elems[i].call_data = user_data;
-    grpc_error* error = call_elems[i].filter->init_call_elem(
-        exec_ctx, &call_elems[i], elem_args);
+    grpc_error* error =
+        call_elems[i].filter->init_call_elem(&call_elems[i], elem_args);
     if (error != GRPC_ERROR_NONE) {
       if (first_error == GRPC_ERROR_NONE) {
         first_error = error;
@@ -189,8 +186,7 @@
   return first_error;
 }
 
-void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
-                                                grpc_call_stack* call_stack,
+void grpc_call_stack_set_pollset_or_pollset_set(grpc_call_stack* call_stack,
                                                 grpc_polling_entity* pollent) {
   size_t count = call_stack->count;
   grpc_call_element* call_elems;
@@ -203,18 +199,16 @@
 
   /* init per-filter data */
   for (i = 0; i < count; i++) {
-    call_elems[i].filter->set_pollset_or_pollset_set(exec_ctx, &call_elems[i],
-                                                     pollent);
+    call_elems[i].filter->set_pollset_or_pollset_set(&call_elems[i], pollent);
     user_data +=
         ROUND_UP_TO_ALIGNMENT_SIZE(call_elems[i].filter->sizeof_call_data);
   }
 }
 
 void grpc_call_stack_ignore_set_pollset_or_pollset_set(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_polling_entity* pollent) {}
+    grpc_call_element* elem, grpc_polling_entity* pollent) {}
 
-void grpc_call_stack_destroy(grpc_exec_ctx* exec_ctx, grpc_call_stack* stack,
+void grpc_call_stack_destroy(grpc_call_stack* stack,
                              const grpc_call_final_info* final_info,
                              grpc_closure* then_schedule_closure) {
   grpc_call_element* elems = CALL_ELEMS_FROM_STACK(stack);
@@ -224,29 +218,27 @@
   /* destroy per-filter data */
   for (i = 0; i < count; i++) {
     elems[i].filter->destroy_call_elem(
-        exec_ctx, &elems[i], final_info,
+        &elems[i], final_info,
         i == count - 1 ? then_schedule_closure : nullptr);
   }
 }
 
-void grpc_call_next_op(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+void grpc_call_next_op(grpc_call_element* elem,
                        grpc_transport_stream_op_batch* op) {
   grpc_call_element* next_elem = elem + 1;
   GRPC_CALL_LOG_OP(GPR_INFO, next_elem, op);
-  next_elem->filter->start_transport_stream_op_batch(exec_ctx, next_elem, op);
+  next_elem->filter->start_transport_stream_op_batch(next_elem, op);
 }
 
-void grpc_channel_next_get_info(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_element* elem,
+void grpc_channel_next_get_info(grpc_channel_element* elem,
                                 const grpc_channel_info* channel_info) {
   grpc_channel_element* next_elem = elem + 1;
-  next_elem->filter->get_channel_info(exec_ctx, next_elem, channel_info);
+  next_elem->filter->get_channel_info(next_elem, channel_info);
 }
 
-void grpc_channel_next_op(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
-                          grpc_transport_op* op) {
+void grpc_channel_next_op(grpc_channel_element* elem, grpc_transport_op* op) {
   grpc_channel_element* next_elem = elem + 1;
-  next_elem->filter->start_transport_op(exec_ctx, next_elem, op);
+  next_elem->filter->start_transport_op(next_elem, op);
 }
 
 grpc_channel_stack* grpc_channel_stack_from_top_element(
diff --git a/src/core/lib/channel/channel_stack.h b/src/core/lib/channel/channel_stack.h
index 830c112..716866b 100644
--- a/src/core/lib/channel/channel_stack.h
+++ b/src/core/lib/channel/channel_stack.h
@@ -45,10 +45,6 @@
 #include "src/core/lib/support/arena.h"
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_channel_element grpc_channel_element;
 typedef struct grpc_call_element grpc_call_element;
 
@@ -100,14 +96,12 @@
 typedef struct {
   /* Called to eg. send/receive data on a call.
      See grpc_call_next_op on how to call the next element in the stack */
-  void (*start_transport_stream_op_batch)(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_element* elem,
+  void (*start_transport_stream_op_batch)(grpc_call_element* elem,
                                           grpc_transport_stream_op_batch* op);
   /* Called to handle channel level operations - e.g. new calls, or transport
      closure.
      See grpc_channel_next_op on how to call the next element in the stack */
-  void (*start_transport_op)(grpc_exec_ctx* exec_ctx,
-                             grpc_channel_element* elem, grpc_transport_op* op);
+  void (*start_transport_op)(grpc_channel_element* elem, grpc_transport_op* op);
 
   /* sizeof(per call data) */
   size_t sizeof_call_data;
@@ -120,11 +114,9 @@
      transport and is on the server. Most filters want to ignore this
      argument.
      Implementations may assume that elem->call_data is all zeros. */
-  grpc_error* (*init_call_elem)(grpc_exec_ctx* exec_ctx,
-                                grpc_call_element* elem,
+  grpc_error* (*init_call_elem)(grpc_call_element* elem,
                                 const grpc_call_element_args* args);
-  void (*set_pollset_or_pollset_set)(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_element* elem,
+  void (*set_pollset_or_pollset_set)(grpc_call_element* elem,
                                      grpc_polling_entity* pollent);
   /* Destroy per call data.
      The filter does not need to do any chaining.
@@ -132,7 +124,7 @@
      \a then_schedule_closure that should be passed to GRPC_CLOSURE_SCHED when
      destruction is complete. \a final_info contains data about the completed
      call, mainly for reporting purposes. */
-  void (*destroy_call_elem)(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+  void (*destroy_call_elem)(grpc_call_element* elem,
                             const grpc_call_final_info* final_info,
                             grpc_closure* then_schedule_closure);
 
@@ -145,16 +137,14 @@
      useful for asserting correct configuration by upper layer code.
      The filter does not need to do any chaining.
      Implementations may assume that elem->call_data is all zeros. */
-  grpc_error* (*init_channel_elem)(grpc_exec_ctx* exec_ctx,
-                                   grpc_channel_element* elem,
+  grpc_error* (*init_channel_elem)(grpc_channel_element* elem,
                                    grpc_channel_element_args* args);
   /* Destroy per channel data.
      The filter does not need to do any chaining */
-  void (*destroy_channel_elem)(grpc_exec_ctx* exec_ctx,
-                               grpc_channel_element* elem);
+  void (*destroy_channel_elem)(grpc_channel_element* elem);
 
   /* Implement grpc_channel_get_info() */
-  void (*get_channel_info)(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+  void (*get_channel_info)(grpc_channel_element* elem,
                            const grpc_channel_info* channel_info);
 
   /* The name of this filter */
@@ -212,68 +202,62 @@
                                size_t filter_count);
 /* Initialize a channel stack given some filters */
 grpc_error* grpc_channel_stack_init(
-    grpc_exec_ctx* exec_ctx, int initial_refs, grpc_iomgr_cb_func destroy,
-    void* destroy_arg, const grpc_channel_filter** filters, size_t filter_count,
+    int initial_refs, grpc_iomgr_cb_func destroy, void* destroy_arg,
+    const grpc_channel_filter** filters, size_t filter_count,
     const grpc_channel_args* args, grpc_transport* optional_transport,
     const char* name, grpc_channel_stack* stack);
 /* Destroy a channel stack */
-void grpc_channel_stack_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_stack* stack);
+void grpc_channel_stack_destroy(grpc_channel_stack* stack);
 
 /* Initialize a call stack given a channel stack. transport_server_data is
    expected to be NULL on a client, or an opaque transport owned pointer on the
    server. */
-grpc_error* grpc_call_stack_init(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_stack* channel_stack,
+grpc_error* grpc_call_stack_init(grpc_channel_stack* channel_stack,
                                  int initial_refs, grpc_iomgr_cb_func destroy,
                                  void* destroy_arg,
                                  const grpc_call_element_args* elem_args);
 /* Set a pollset or a pollset_set for a call stack: must occur before the first
  * op is started */
-void grpc_call_stack_set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
-                                                grpc_call_stack* call_stack,
+void grpc_call_stack_set_pollset_or_pollset_set(grpc_call_stack* call_stack,
                                                 grpc_polling_entity* pollent);
 
 #ifndef NDEBUG
 #define GRPC_CALL_STACK_REF(call_stack, reason) \
   grpc_stream_ref(&(call_stack)->refcount, reason)
-#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
-  grpc_stream_unref(exec_ctx, &(call_stack)->refcount, reason)
+#define GRPC_CALL_STACK_UNREF(call_stack, reason) \
+  grpc_stream_unref(&(call_stack)->refcount, reason)
 #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
   grpc_stream_ref(&(channel_stack)->refcount, reason)
-#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
-  grpc_stream_unref(exec_ctx, &(channel_stack)->refcount, reason)
+#define GRPC_CHANNEL_STACK_UNREF(channel_stack, reason) \
+  grpc_stream_unref(&(channel_stack)->refcount, reason)
 #else
 #define GRPC_CALL_STACK_REF(call_stack, reason) \
   grpc_stream_ref(&(call_stack)->refcount)
-#define GRPC_CALL_STACK_UNREF(exec_ctx, call_stack, reason) \
-  grpc_stream_unref(exec_ctx, &(call_stack)->refcount)
+#define GRPC_CALL_STACK_UNREF(call_stack, reason) \
+  grpc_stream_unref(&(call_stack)->refcount)
 #define GRPC_CHANNEL_STACK_REF(channel_stack, reason) \
   grpc_stream_ref(&(channel_stack)->refcount)
-#define GRPC_CHANNEL_STACK_UNREF(exec_ctx, channel_stack, reason) \
-  grpc_stream_unref(exec_ctx, &(channel_stack)->refcount)
+#define GRPC_CHANNEL_STACK_UNREF(channel_stack, reason) \
+  grpc_stream_unref(&(channel_stack)->refcount)
 #endif
 
 /* Destroy a call stack */
-void grpc_call_stack_destroy(grpc_exec_ctx* exec_ctx, grpc_call_stack* stack,
+void grpc_call_stack_destroy(grpc_call_stack* stack,
                              const grpc_call_final_info* final_info,
                              grpc_closure* then_schedule_closure);
 
 /* Ignore set pollset{_set} - used by filters if they don't care about pollsets
  * at all. Does nothing. */
 void grpc_call_stack_ignore_set_pollset_or_pollset_set(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_polling_entity* pollent);
+    grpc_call_element* elem, grpc_polling_entity* pollent);
 /* Call the next operation in a call stack */
-void grpc_call_next_op(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+void grpc_call_next_op(grpc_call_element* elem,
                        grpc_transport_stream_op_batch* op);
 /* Call the next operation (depending on call directionality) in a channel
    stack */
-void grpc_channel_next_op(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
-                          grpc_transport_op* op);
+void grpc_channel_next_op(grpc_channel_element* elem, grpc_transport_op* op);
 /* Pass through a request to get_channel_info() to the next child element */
-void grpc_channel_next_get_info(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_element* elem,
+void grpc_channel_next_get_info(grpc_channel_element* elem,
                                 const grpc_channel_info* channel_info);
 
 /* Given the top element of a channel stack, get the channel stack itself */
@@ -291,8 +275,4 @@
 #define GRPC_CALL_LOG_OP(sev, elem, op) \
   if (grpc_trace_channel.enabled()) grpc_call_log_op(sev, elem, op)
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_H */
diff --git a/src/core/lib/channel/channel_stack_builder.cc b/src/core/lib/channel/channel_stack_builder.cc
index 77b7854..fcba826 100644
--- a/src/core/lib/channel/channel_stack_builder.cc
+++ b/src/core/lib/channel/channel_stack_builder.cc
@@ -150,10 +150,9 @@
 }
 
 void grpc_channel_stack_builder_set_channel_arguments(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
-    const grpc_channel_args* args) {
+    grpc_channel_stack_builder* builder, const grpc_channel_args* args) {
   if (builder->args != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, builder->args);
+    grpc_channel_args_destroy(builder->args);
   }
   builder->args = grpc_channel_args_copy(args);
 }
@@ -241,8 +240,7 @@
   return true;
 }
 
-void grpc_channel_stack_builder_destroy(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_stack_builder* builder) {
+void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder* builder) {
   filter_node* p = builder->begin.next;
   while (p != &builder->end) {
     filter_node* next = p->next;
@@ -250,16 +248,15 @@
     p = next;
   }
   if (builder->args != nullptr) {
-    grpc_channel_args_destroy(exec_ctx, builder->args);
+    grpc_channel_args_destroy(builder->args);
   }
   gpr_free(builder->target);
   gpr_free(builder);
 }
 
 grpc_error* grpc_channel_stack_builder_finish(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
-    size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy,
-    void* destroy_arg, void** result) {
+    grpc_channel_stack_builder* builder, size_t prefix_bytes, int initial_refs,
+    grpc_iomgr_cb_func destroy, void* destroy_arg, void** result) {
   // count the number of filters
   size_t num_filters = 0;
   for (filter_node* p = builder->begin.next; p != &builder->end; p = p->next) {
@@ -284,12 +281,12 @@
       (grpc_channel_stack*)((char*)(*result) + prefix_bytes);
   // and initialize it
   grpc_error* error = grpc_channel_stack_init(
-      exec_ctx, initial_refs, destroy,
-      destroy_arg == nullptr ? *result : destroy_arg, filters, num_filters,
-      builder->args, builder->transport, builder->name, channel_stack);
+      initial_refs, destroy, destroy_arg == nullptr ? *result : destroy_arg,
+      filters, num_filters, builder->args, builder->transport, builder->name,
+      channel_stack);
 
   if (error != GRPC_ERROR_NONE) {
-    grpc_channel_stack_destroy(exec_ctx, channel_stack);
+    grpc_channel_stack_destroy(channel_stack);
     gpr_free(*result);
     *result = nullptr;
   } else {
@@ -305,7 +302,7 @@
     }
   }
 
-  grpc_channel_stack_builder_destroy(exec_ctx, builder);
+  grpc_channel_stack_builder_destroy(builder);
   gpr_free((grpc_channel_filter**)filters);
 
   return error;
diff --git a/src/core/lib/channel/channel_stack_builder.h b/src/core/lib/channel/channel_stack_builder.h
index 8e3ec2e..d00ddc6 100644
--- a/src/core/lib/channel/channel_stack_builder.h
+++ b/src/core/lib/channel/channel_stack_builder.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/channel/channel_args.h"
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// grpc_channel_stack_builder offers a programmatic interface to selected
 /// and order channel filters
 typedef struct grpc_channel_stack_builder grpc_channel_stack_builder;
@@ -58,8 +54,7 @@
 
 /// Set channel arguments: copies args
 void grpc_channel_stack_builder_set_channel_arguments(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
-    const grpc_channel_args* args);
+    grpc_channel_stack_builder* builder, const grpc_channel_args* args);
 
 /// Return a borrowed pointer to the channel arguments
 const grpc_channel_args* grpc_channel_stack_builder_get_channel_arguments(
@@ -152,18 +147,12 @@
 /// \a initial_refs, \a destroy, \a destroy_arg are as per
 /// grpc_channel_stack_init
 grpc_error* grpc_channel_stack_builder_finish(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
-    size_t prefix_bytes, int initial_refs, grpc_iomgr_cb_func destroy,
-    void* destroy_arg, void** result);
+    grpc_channel_stack_builder* builder, size_t prefix_bytes, int initial_refs,
+    grpc_iomgr_cb_func destroy, void* destroy_arg, void** result);
 
 /// Destroy the builder without creating a channel stack
-void grpc_channel_stack_builder_destroy(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_stack_builder* builder);
+void grpc_channel_stack_builder_destroy(grpc_channel_stack_builder* builder);
 
 extern grpc_core::TraceFlag grpc_trace_channel_stack_builder;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_CHANNEL_CHANNEL_STACK_BUILDER_H */
diff --git a/src/core/lib/channel/connected_channel.cc b/src/core/lib/channel/connected_channel.cc
index af2f88a..9d07cff 100644
--- a/src/core/lib/channel/connected_channel.cc
+++ b/src/core/lib/channel/connected_channel.cc
@@ -51,17 +51,14 @@
   callback_state recv_message_ready;
 } call_data;
 
-static void run_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void run_in_call_combiner(void* arg, grpc_error* error) {
   callback_state* state = (callback_state*)arg;
-  GRPC_CALL_COMBINER_START(exec_ctx, state->call_combiner,
-                           state->original_closure, GRPC_ERROR_REF(error),
-                           state->reason);
+  GRPC_CALL_COMBINER_START(state->call_combiner, state->original_closure,
+                           GRPC_ERROR_REF(error), state->reason);
 }
 
-static void run_cancel_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
-  run_in_call_combiner(exec_ctx, arg, error);
+static void run_cancel_in_call_combiner(void* arg, grpc_error* error) {
+  run_in_call_combiner(arg, error);
   gpr_free(arg);
 }
 
@@ -98,8 +95,7 @@
 /* Intercept a call operation and either push it directly up or translate it
    into transport stream operations */
 static void con_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   if (batch->recv_initial_metadata) {
@@ -126,58 +122,52 @@
     callback_state* state = get_state_for_batch(calld, batch);
     intercept_callback(calld, state, false, "on_complete", &batch->on_complete);
   }
-  grpc_transport_perform_stream_op(exec_ctx, chand->transport,
-                                   TRANSPORT_STREAM_FROM_CALL_DATA(calld),
-                                   batch);
-  GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
-                          "passed batch to transport");
+  grpc_transport_perform_stream_op(
+      chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld), batch);
+  GRPC_CALL_COMBINER_STOP(calld->call_combiner, "passed batch to transport");
 }
 
-static void con_start_transport_op(grpc_exec_ctx* exec_ctx,
-                                   grpc_channel_element* elem,
+static void con_start_transport_op(grpc_channel_element* elem,
                                    grpc_transport_op* op) {
   channel_data* chand = (channel_data*)elem->channel_data;
-  grpc_transport_perform_op(exec_ctx, chand->transport, op);
+  grpc_transport_perform_op(chand->transport, op);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   calld->call_combiner = args->call_combiner;
   int r = grpc_transport_init_stream(
-      exec_ctx, chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
+      chand->transport, TRANSPORT_STREAM_FROM_CALL_DATA(calld),
       &args->call_stack->refcount, args->server_transport_data, args->arena);
   return r == 0 ? GRPC_ERROR_NONE
                 : GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                       "transport stream initialization failed");
 }
 
-static void set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
-                                       grpc_call_element* elem,
+static void set_pollset_or_pollset_set(grpc_call_element* elem,
                                        grpc_polling_entity* pollent) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
-  grpc_transport_set_pops(exec_ctx, chand->transport,
+  grpc_transport_set_pops(chand->transport,
                           TRANSPORT_STREAM_FROM_CALL_DATA(calld), pollent);
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* then_schedule_closure) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
-  grpc_transport_destroy_stream(exec_ctx, chand->transport,
+  grpc_transport_destroy_stream(chand->transport,
                                 TRANSPORT_STREAM_FROM_CALL_DATA(calld),
                                 then_schedule_closure);
 }
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   channel_data* cd = (channel_data*)elem->channel_data;
   GPR_ASSERT(args->is_last);
@@ -186,17 +176,15 @@
 }
 
 /* Destructor for channel_data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   channel_data* cd = (channel_data*)elem->channel_data;
   if (cd->transport) {
-    grpc_transport_destroy(exec_ctx, cd->transport);
+    grpc_transport_destroy(cd->transport);
   }
 }
 
 /* No-op. */
-static void con_get_channel_info(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem,
+static void con_get_channel_info(grpc_channel_element* elem,
                                  const grpc_channel_info* channel_info) {}
 
 const grpc_channel_filter grpc_connected_filter = {
@@ -230,8 +218,7 @@
       grpc_transport_stream_size((grpc_transport*)t);
 }
 
-bool grpc_add_connected_filter(grpc_exec_ctx* exec_ctx,
-                               grpc_channel_stack_builder* builder,
+bool grpc_add_connected_filter(grpc_channel_stack_builder* builder,
                                void* arg_must_be_null) {
   GPR_ASSERT(arg_must_be_null == nullptr);
   grpc_transport* t = grpc_channel_stack_builder_get_transport(builder);
diff --git a/src/core/lib/channel/connected_channel.h b/src/core/lib/channel/connected_channel.h
index cca1973..91de802 100644
--- a/src/core/lib/channel/connected_channel.h
+++ b/src/core/lib/channel/connected_channel.h
@@ -21,21 +21,12 @@
 
 #include "src/core/lib/channel/channel_stack_builder.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_connected_filter;
 
-bool grpc_add_connected_filter(grpc_exec_ctx* exec_ctx,
-                               grpc_channel_stack_builder* builder,
+bool grpc_add_connected_filter(grpc_channel_stack_builder* builder,
                                void* arg_must_be_null);
 
 /* Debug helper to dig the transport stream out of a call element */
 grpc_stream* grpc_connected_channel_get_stream(grpc_call_element* elem);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_CHANNEL_CONNECTED_CHANNEL_H */
diff --git a/src/core/lib/channel/handshaker.cc b/src/core/lib/channel/handshaker.cc
index aae1b35..dcb149c 100644
--- a/src/core/lib/channel/handshaker.cc
+++ b/src/core/lib/channel/handshaker.cc
@@ -34,23 +34,20 @@
   handshaker->vtable = vtable;
 }
 
-void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx,
-                             grpc_handshaker* handshaker) {
-  handshaker->vtable->destroy(exec_ctx, handshaker);
+void grpc_handshaker_destroy(grpc_handshaker* handshaker) {
+  handshaker->vtable->destroy(handshaker);
 }
 
-void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
-                              grpc_handshaker* handshaker, grpc_error* why) {
-  handshaker->vtable->shutdown(exec_ctx, handshaker, why);
+void grpc_handshaker_shutdown(grpc_handshaker* handshaker, grpc_error* why) {
+  handshaker->vtable->shutdown(handshaker, why);
 }
 
-void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
-                                  grpc_handshaker* handshaker,
+void grpc_handshaker_do_handshake(grpc_handshaker* handshaker,
                                   grpc_tcp_server_acceptor* acceptor,
                                   grpc_closure* on_handshake_done,
                                   grpc_handshaker_args* args) {
-  handshaker->vtable->do_handshake(exec_ctx, handshaker, acceptor,
-                                   on_handshake_done, args);
+  handshaker->vtable->do_handshake(handshaker, acceptor, on_handshake_done,
+                                   args);
 }
 
 //
@@ -116,9 +113,9 @@
 }
 
 void grpc_handshake_manager_pending_list_shutdown_all(
-    grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why) {
+    grpc_handshake_manager* head, grpc_error* why) {
   while (head != nullptr) {
-    grpc_handshake_manager_shutdown(exec_ctx, head, GRPC_ERROR_REF(why));
+    grpc_handshake_manager_shutdown(head, GRPC_ERROR_REF(why));
     head = head->next;
   }
   GRPC_ERROR_UNREF(why);
@@ -145,11 +142,10 @@
   gpr_mu_unlock(&mgr->mu);
 }
 
-static void grpc_handshake_manager_unref(grpc_exec_ctx* exec_ctx,
-                                         grpc_handshake_manager* mgr) {
+static void grpc_handshake_manager_unref(grpc_handshake_manager* mgr) {
   if (gpr_unref(&mgr->refs)) {
     for (size_t i = 0; i < mgr->count; ++i) {
-      grpc_handshaker_destroy(exec_ctx, mgr->handshakers[i]);
+      grpc_handshaker_destroy(mgr->handshakers[i]);
     }
     gpr_free(mgr->handshakers);
     gpr_mu_destroy(&mgr->mu);
@@ -157,19 +153,17 @@
   }
 }
 
-void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx,
-                                    grpc_handshake_manager* mgr) {
-  grpc_handshake_manager_unref(exec_ctx, mgr);
+void grpc_handshake_manager_destroy(grpc_handshake_manager* mgr) {
+  grpc_handshake_manager_unref(mgr);
 }
 
-void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
-                                     grpc_handshake_manager* mgr,
+void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr,
                                      grpc_error* why) {
   gpr_mu_lock(&mgr->mu);
   // Shutdown the handshaker that's currently in progress, if any.
   if (!mgr->shutdown && mgr->index > 0) {
     mgr->shutdown = true;
-    grpc_handshaker_shutdown(exec_ctx, mgr->handshakers[mgr->index - 1],
+    grpc_handshaker_shutdown(mgr->handshakers[mgr->index - 1],
                              GRPC_ERROR_REF(why));
   }
   gpr_mu_unlock(&mgr->mu);
@@ -179,8 +173,7 @@
 // Helper function to call either the next handshaker or the
 // on_handshake_done callback.
 // Returns true if we've scheduled the on_handshake_done callback.
-static bool call_next_handshaker_locked(grpc_exec_ctx* exec_ctx,
-                                        grpc_handshake_manager* mgr,
+static bool call_next_handshaker_locked(grpc_handshake_manager* mgr,
                                         grpc_error* error) {
   GPR_ASSERT(mgr->index <= mgr->count);
   // If we got an error or we've been shut down or we're exiting early or
@@ -190,13 +183,12 @@
       mgr->index == mgr->count) {
     // Cancel deadline timer, since we're invoking the on_handshake_done
     // callback now.
-    grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
-    GRPC_CLOSURE_SCHED(exec_ctx, &mgr->on_handshake_done, error);
+    grpc_timer_cancel(&mgr->deadline_timer);
+    GRPC_CLOSURE_SCHED(&mgr->on_handshake_done, error);
     mgr->shutdown = true;
   } else {
-    grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
-                                 mgr->acceptor, &mgr->call_next_handshaker,
-                                 &mgr->args);
+    grpc_handshaker_do_handshake(mgr->handshakers[mgr->index], mgr->acceptor,
+                                 &mgr->call_next_handshaker, &mgr->args);
   }
   ++mgr->index;
   return mgr->shutdown;
@@ -204,33 +196,31 @@
 
 // A function used as the handshaker-done callback when chaining
 // handshakers together.
-static void call_next_handshaker(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void call_next_handshaker(void* arg, grpc_error* error) {
   grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
   gpr_mu_lock(&mgr->mu);
-  bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_REF(error));
+  bool done = call_next_handshaker_locked(mgr, GRPC_ERROR_REF(error));
   gpr_mu_unlock(&mgr->mu);
   // If we're invoked the final callback, we won't be coming back
   // to this function, so we can release our reference to the
   // handshake manager.
   if (done) {
-    grpc_handshake_manager_unref(exec_ctx, mgr);
+    grpc_handshake_manager_unref(mgr);
   }
 }
 
 // Callback invoked when deadline is exceeded.
-static void on_timeout(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_timeout(void* arg, grpc_error* error) {
   grpc_handshake_manager* mgr = (grpc_handshake_manager*)arg;
   if (error == GRPC_ERROR_NONE) {  // Timer fired, rather than being cancelled.
     grpc_handshake_manager_shutdown(
-        exec_ctx, mgr,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out"));
+        mgr, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Handshake timed out"));
   }
-  grpc_handshake_manager_unref(exec_ctx, mgr);
+  grpc_handshake_manager_unref(mgr);
 }
 
 void grpc_handshake_manager_do_handshake(
-    grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
+    grpc_handshake_manager* mgr, grpc_pollset_set* interested_parties,
     grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
     grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
     grpc_iomgr_cb_func on_handshake_done, void* user_data) {
@@ -239,6 +229,7 @@
   GPR_ASSERT(!mgr->shutdown);
   // Construct handshaker args.  These will be passed through all
   // handshakers and eventually be freed by the on_handshake_done callback.
+  mgr->args.interested_parties = interested_parties;
   mgr->args.endpoint = endpoint;
   mgr->args.args = grpc_channel_args_copy(channel_args);
   mgr->args.user_data = user_data;
@@ -255,12 +246,12 @@
   gpr_ref(&mgr->refs);
   GRPC_CLOSURE_INIT(&mgr->on_timeout, on_timeout, mgr,
                     grpc_schedule_on_exec_ctx);
-  grpc_timer_init(exec_ctx, &mgr->deadline_timer, deadline, &mgr->on_timeout);
+  grpc_timer_init(&mgr->deadline_timer, deadline, &mgr->on_timeout);
   // Start first handshaker, which also owns a ref.
   gpr_ref(&mgr->refs);
-  bool done = call_next_handshaker_locked(exec_ctx, mgr, GRPC_ERROR_NONE);
+  bool done = call_next_handshaker_locked(mgr, GRPC_ERROR_NONE);
   gpr_mu_unlock(&mgr->mu);
   if (done) {
-    grpc_handshake_manager_unref(exec_ctx, mgr);
+    grpc_handshake_manager_unref(mgr);
   }
 }
diff --git a/src/core/lib/channel/handshaker.h b/src/core/lib/channel/handshaker.h
index 8ed38c1..68e5463 100644
--- a/src/core/lib/channel/handshaker.h
+++ b/src/core/lib/channel/handshaker.h
@@ -26,10 +26,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/iomgr/tcp_server.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// Handshakers are used to perform initial handshakes on a connection
 /// before the client sends the initial request.  Some examples of what
 /// a handshaker can be used for includes support for HTTP CONNECT on
@@ -58,6 +54,7 @@
 /// For the on_handshake_done callback, all members are input arguments,
 /// which the callback takes ownership of.
 typedef struct {
+  grpc_pollset_set* interested_parties;
   grpc_endpoint* endpoint;
   grpc_channel_args* args;
   grpc_slice_buffer* read_buffer;
@@ -71,18 +68,17 @@
 
 typedef struct {
   /// Destroys the handshaker.
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker);
+  void (*destroy)(grpc_handshaker* handshaker);
 
   /// Shuts down the handshaker (e.g., to clean up when the operation is
   /// aborted in the middle).
-  void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker,
-                   grpc_error* why);
+  void (*shutdown)(grpc_handshaker* handshaker, grpc_error* why);
 
   /// Performs handshaking, modifying \a args as needed (e.g., to
   /// replace \a endpoint with a wrapped endpoint).
   /// When finished, invokes \a on_handshake_done.
   /// \a acceptor will be NULL for client-side handshakers.
-  void (*do_handshake)(grpc_exec_ctx* exec_ctx, grpc_handshaker* handshaker,
+  void (*do_handshake)(grpc_handshaker* handshaker,
                        grpc_tcp_server_acceptor* acceptor,
                        grpc_closure* on_handshake_done,
                        grpc_handshaker_args* args);
@@ -98,12 +94,9 @@
 void grpc_handshaker_init(const grpc_handshaker_vtable* vtable,
                           grpc_handshaker* handshaker);
 
-void grpc_handshaker_destroy(grpc_exec_ctx* exec_ctx,
-                             grpc_handshaker* handshaker);
-void grpc_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
-                              grpc_handshaker* handshaker, grpc_error* why);
-void grpc_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
-                                  grpc_handshaker* handshaker,
+void grpc_handshaker_destroy(grpc_handshaker* handshaker);
+void grpc_handshaker_shutdown(grpc_handshaker* handshaker, grpc_error* why);
+void grpc_handshaker_do_handshake(grpc_handshaker* handshaker,
                                   grpc_tcp_server_acceptor* acceptor,
                                   grpc_closure* on_handshake_done,
                                   grpc_handshaker_args* args);
@@ -123,23 +116,23 @@
                                 grpc_handshaker* handshaker);
 
 /// Destroys the handshake manager.
-void grpc_handshake_manager_destroy(grpc_exec_ctx* exec_ctx,
-                                    grpc_handshake_manager* mgr);
+void grpc_handshake_manager_destroy(grpc_handshake_manager* mgr);
 
 /// Shuts down the handshake manager (e.g., to clean up when the operation is
 /// aborted in the middle).
 /// The caller must still call grpc_handshake_manager_destroy() after
 /// calling this function.
-void grpc_handshake_manager_shutdown(grpc_exec_ctx* exec_ctx,
-                                     grpc_handshake_manager* mgr,
+void grpc_handshake_manager_shutdown(grpc_handshake_manager* mgr,
                                      grpc_error* why);
 
 /// Invokes handshakers in the order they were added.
+/// \a interested_parties may be non-nullptr to provide a pollset_set that
+/// may be used during handshaking. Ownership is not taken.
 /// Takes ownership of \a endpoint, and then passes that ownership to
 /// the \a on_handshake_done callback.
 /// Does NOT take ownership of \a channel_args.  Instead, makes a copy before
 /// invoking the first handshaker.
-/// \a acceptor will be NULL for client-side handshakers.
+/// \a acceptor will be nullptr for client-side handshakers.
 ///
 /// When done, invokes \a on_handshake_done with a grpc_handshaker_args
 /// object as its argument.  If the callback is invoked with error !=
@@ -147,7 +140,7 @@
 /// the necessary clean-up.  Otherwise, the callback takes ownership of
 /// the arguments.
 void grpc_handshake_manager_do_handshake(
-    grpc_exec_ctx* exec_ctx, grpc_handshake_manager* mgr,
+    grpc_handshake_manager* mgr, grpc_pollset_set* interested_parties,
     grpc_endpoint* endpoint, const grpc_channel_args* channel_args,
     grpc_millis deadline, grpc_tcp_server_acceptor* acceptor,
     grpc_iomgr_cb_func on_handshake_done, void* user_data);
@@ -166,10 +159,6 @@
 /// Shutdown all pending handshake managers on the server side.
 // Not thread-safe. Caller needs to synchronize.
 void grpc_handshake_manager_pending_list_shutdown_all(
-    grpc_exec_ctx* exec_ctx, grpc_handshake_manager* head, grpc_error* why);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_handshake_manager* head, grpc_error* why);
 
 #endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_H */
diff --git a/src/core/lib/channel/handshaker_factory.cc b/src/core/lib/channel/handshaker_factory.cc
index 015006a..2380d98 100644
--- a/src/core/lib/channel/handshaker_factory.cc
+++ b/src/core/lib/channel/handshaker_factory.cc
@@ -21,19 +21,19 @@
 #include <grpc/support/log.h>
 
 void grpc_handshaker_factory_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+    grpc_handshaker_factory* handshaker_factory, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr) {
   if (handshaker_factory != nullptr) {
     GPR_ASSERT(handshaker_factory->vtable != nullptr);
-    handshaker_factory->vtable->add_handshakers(exec_ctx, handshaker_factory,
-                                                args, handshake_mgr);
+    handshaker_factory->vtable->add_handshakers(handshaker_factory, args,
+                                                handshake_mgr);
   }
 }
 
 void grpc_handshaker_factory_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory) {
+    grpc_handshaker_factory* handshaker_factory) {
   if (handshaker_factory != nullptr) {
     GPR_ASSERT(handshaker_factory->vtable != nullptr);
-    handshaker_factory->vtable->destroy(exec_ctx, handshaker_factory);
+    handshaker_factory->vtable->destroy(handshaker_factory);
   }
 }
diff --git a/src/core/lib/channel/handshaker_factory.h b/src/core/lib/channel/handshaker_factory.h
index 63d9b5a..8a7c015 100644
--- a/src/core/lib/channel/handshaker_factory.h
+++ b/src/core/lib/channel/handshaker_factory.h
@@ -24,21 +24,15 @@
 #include "src/core/lib/channel/handshaker.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // A handshaker factory is used to create handshakers.
 
 typedef struct grpc_handshaker_factory grpc_handshaker_factory;
 
 typedef struct {
-  void (*add_handshakers)(grpc_exec_ctx* exec_ctx,
-                          grpc_handshaker_factory* handshaker_factory,
+  void (*add_handshakers)(grpc_handshaker_factory* handshaker_factory,
                           const grpc_channel_args* args,
                           grpc_handshake_manager* handshake_mgr);
-  void (*destroy)(grpc_exec_ctx* exec_ctx,
-                  grpc_handshaker_factory* handshaker_factory);
+  void (*destroy)(grpc_handshaker_factory* handshaker_factory);
 } grpc_handshaker_factory_vtable;
 
 struct grpc_handshaker_factory {
@@ -46,14 +40,10 @@
 };
 
 void grpc_handshaker_factory_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr);
+    grpc_handshaker_factory* handshaker_factory, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr);
 
 void grpc_handshaker_factory_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_handshaker_factory* handshaker_factory);
 
 #endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_FACTORY_H */
diff --git a/src/core/lib/channel/handshaker_registry.cc b/src/core/lib/channel/handshaker_registry.cc
index c6bc87d..098eabf 100644
--- a/src/core/lib/channel/handshaker_registry.cc
+++ b/src/core/lib/channel/handshaker_registry.cc
@@ -47,18 +47,17 @@
 }
 
 static void grpc_handshaker_factory_list_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory_list* list,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+    grpc_handshaker_factory_list* list, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr) {
   for (size_t i = 0; i < list->num_factories; ++i) {
-    grpc_handshaker_factory_add_handshakers(exec_ctx, list->list[i], args,
-                                            handshake_mgr);
+    grpc_handshaker_factory_add_handshakers(list->list[i], args, handshake_mgr);
   }
 }
 
 static void grpc_handshaker_factory_list_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory_list* list) {
+    grpc_handshaker_factory_list* list) {
   for (size_t i = 0; i < list->num_factories; ++i) {
-    grpc_handshaker_factory_destroy(exec_ctx, list->list[i]);
+    grpc_handshaker_factory_destroy(list->list[i]);
   }
   gpr_free(list->list);
 }
@@ -74,10 +73,9 @@
   memset(g_handshaker_factory_lists, 0, sizeof(g_handshaker_factory_lists));
 }
 
-void grpc_handshaker_factory_registry_shutdown(grpc_exec_ctx* exec_ctx) {
+void grpc_handshaker_factory_registry_shutdown() {
   for (size_t i = 0; i < NUM_HANDSHAKER_TYPES; ++i) {
-    grpc_handshaker_factory_list_destroy(exec_ctx,
-                                         &g_handshaker_factory_lists[i]);
+    grpc_handshaker_factory_list_destroy(&g_handshaker_factory_lists[i]);
   }
 }
 
@@ -88,11 +86,9 @@
       &g_handshaker_factory_lists[handshaker_type], at_start, factory);
 }
 
-void grpc_handshakers_add(grpc_exec_ctx* exec_ctx,
-                          grpc_handshaker_type handshaker_type,
+void grpc_handshakers_add(grpc_handshaker_type handshaker_type,
                           const grpc_channel_args* args,
                           grpc_handshake_manager* handshake_mgr) {
   grpc_handshaker_factory_list_add_handshakers(
-      exec_ctx, &g_handshaker_factory_lists[handshaker_type], args,
-      handshake_mgr);
+      &g_handshaker_factory_lists[handshaker_type], args, handshake_mgr);
 }
diff --git a/src/core/lib/channel/handshaker_registry.h b/src/core/lib/channel/handshaker_registry.h
index ddd280b..0b05531 100644
--- a/src/core/lib/channel/handshaker_registry.h
+++ b/src/core/lib/channel/handshaker_registry.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/channel/handshaker_factory.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   HANDSHAKER_CLIENT = 0,
   HANDSHAKER_SERVER,
@@ -35,7 +31,7 @@
 } grpc_handshaker_type;
 
 void grpc_handshaker_factory_registry_init();
-void grpc_handshaker_factory_registry_shutdown(grpc_exec_ctx* exec_ctx);
+void grpc_handshaker_factory_registry_shutdown();
 
 /// Registers a new handshaker factory.  Takes ownership.
 /// If \a at_start is true, the new handshaker will be at the beginning of
@@ -44,13 +40,8 @@
                                       grpc_handshaker_type handshaker_type,
                                       grpc_handshaker_factory* factory);
 
-void grpc_handshakers_add(grpc_exec_ctx* exec_ctx,
-                          grpc_handshaker_type handshaker_type,
+void grpc_handshakers_add(grpc_handshaker_type handshaker_type,
                           const grpc_channel_args* args,
                           grpc_handshake_manager* handshake_mgr);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_CHANNEL_HANDSHAKER_REGISTRY_H */
diff --git a/src/core/lib/compression/algorithm_metadata.h b/src/core/lib/compression/algorithm_metadata.h
index 17caf58..08feafc 100644
--- a/src/core/lib/compression/algorithm_metadata.h
+++ b/src/core/lib/compression/algorithm_metadata.h
@@ -22,10 +22,6 @@
 #include <grpc/compression.h>
 #include "src/core/lib/transport/metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Return compression algorithm based metadata value */
 grpc_slice grpc_compression_algorithm_slice(
     grpc_compression_algorithm algorithm);
@@ -53,8 +49,4 @@
 grpc_stream_compression_algorithm grpc_stream_compression_algorithm_from_slice(
     grpc_slice str);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_COMPRESSION_ALGORITHM_METADATA_H */
diff --git a/src/core/lib/compression/message_compress.cc b/src/core/lib/compression/message_compress.cc
index c051e28..aa43a53 100644
--- a/src/core/lib/compression/message_compress.cc
+++ b/src/core/lib/compression/message_compress.cc
@@ -29,8 +29,8 @@
 
 #define OUTPUT_BLOCK_SIZE 1024
 
-static int zlib_body(grpc_exec_ctx* exec_ctx, z_stream* zs,
-                     grpc_slice_buffer* input, grpc_slice_buffer* output,
+static int zlib_body(z_stream* zs, grpc_slice_buffer* input,
+                     grpc_slice_buffer* output,
                      int (*flate)(z_stream* zs, int flush)) {
   int r;
   int flush;
@@ -74,7 +74,7 @@
   return 1;
 
 error:
-  grpc_slice_unref_internal(exec_ctx, outbuf);
+  grpc_slice_unref_internal(outbuf);
   return 0;
 }
 
@@ -84,8 +84,8 @@
 
 static void zfree_gpr(void* opaque, void* address) { gpr_free(address); }
 
-static int zlib_compress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input,
-                         grpc_slice_buffer* output, int gzip) {
+static int zlib_compress(grpc_slice_buffer* input, grpc_slice_buffer* output,
+                         int gzip) {
   z_stream zs;
   int r;
   size_t i;
@@ -97,11 +97,10 @@
   r = deflateInit2(&zs, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 15 | (gzip ? 16 : 0),
                    8, Z_DEFAULT_STRATEGY);
   GPR_ASSERT(r == Z_OK);
-  r = zlib_body(exec_ctx, &zs, input, output, deflate) &&
-      output->length < input->length;
+  r = zlib_body(&zs, input, output, deflate) && output->length < input->length;
   if (!r) {
     for (i = count_before; i < output->count; i++) {
-      grpc_slice_unref_internal(exec_ctx, output->slices[i]);
+      grpc_slice_unref_internal(output->slices[i]);
     }
     output->count = count_before;
     output->length = length_before;
@@ -110,8 +109,8 @@
   return r;
 }
 
-static int zlib_decompress(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* input,
-                           grpc_slice_buffer* output, int gzip) {
+static int zlib_decompress(grpc_slice_buffer* input, grpc_slice_buffer* output,
+                           int gzip) {
   z_stream zs;
   int r;
   size_t i;
@@ -122,10 +121,10 @@
   zs.zfree = zfree_gpr;
   r = inflateInit2(&zs, 15 | (gzip ? 16 : 0));
   GPR_ASSERT(r == Z_OK);
-  r = zlib_body(exec_ctx, &zs, input, output, inflate);
+  r = zlib_body(&zs, input, output, inflate);
   if (!r) {
     for (i = count_before; i < output->count; i++) {
-      grpc_slice_unref_internal(exec_ctx, output->slices[i]);
+      grpc_slice_unref_internal(output->slices[i]);
     }
     output->count = count_before;
     output->length = length_before;
@@ -142,8 +141,7 @@
   return 1;
 }
 
-static int compress_inner(grpc_exec_ctx* exec_ctx,
-                          grpc_compression_algorithm algorithm,
+static int compress_inner(grpc_compression_algorithm algorithm,
                           grpc_slice_buffer* input, grpc_slice_buffer* output) {
   switch (algorithm) {
     case GRPC_COMPRESS_NONE:
@@ -151,9 +149,9 @@
          rely on that here */
       return 0;
     case GRPC_COMPRESS_DEFLATE:
-      return zlib_compress(exec_ctx, input, output, 0);
+      return zlib_compress(input, output, 0);
     case GRPC_COMPRESS_GZIP:
-      return zlib_compress(exec_ctx, input, output, 1);
+      return zlib_compress(input, output, 1);
     case GRPC_COMPRESS_ALGORITHMS_COUNT:
       break;
   }
@@ -161,26 +159,24 @@
   return 0;
 }
 
-int grpc_msg_compress(grpc_exec_ctx* exec_ctx,
-                      grpc_compression_algorithm algorithm,
+int grpc_msg_compress(grpc_compression_algorithm algorithm,
                       grpc_slice_buffer* input, grpc_slice_buffer* output) {
-  if (!compress_inner(exec_ctx, algorithm, input, output)) {
+  if (!compress_inner(algorithm, input, output)) {
     copy(input, output);
     return 0;
   }
   return 1;
 }
 
-int grpc_msg_decompress(grpc_exec_ctx* exec_ctx,
-                        grpc_compression_algorithm algorithm,
+int grpc_msg_decompress(grpc_compression_algorithm algorithm,
                         grpc_slice_buffer* input, grpc_slice_buffer* output) {
   switch (algorithm) {
     case GRPC_COMPRESS_NONE:
       return copy(input, output);
     case GRPC_COMPRESS_DEFLATE:
-      return zlib_decompress(exec_ctx, input, output, 0);
+      return zlib_decompress(input, output, 0);
     case GRPC_COMPRESS_GZIP:
-      return zlib_decompress(exec_ctx, input, output, 1);
+      return zlib_decompress(input, output, 1);
     case GRPC_COMPRESS_ALGORITHMS_COUNT:
       break;
   }
diff --git a/src/core/lib/compression/message_compress.h b/src/core/lib/compression/message_compress.h
index fffe175..c963fcc 100644
--- a/src/core/lib/compression/message_compress.h
+++ b/src/core/lib/compression/message_compress.h
@@ -22,26 +22,16 @@
 #include <grpc/compression.h>
 #include <grpc/slice_buffer.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* compress 'input' to 'output' using 'algorithm'.
    On success, appends compressed slices to output and returns 1.
    On failure, appends uncompressed slices to output and returns 0. */
-int grpc_msg_compress(grpc_exec_ctx* exec_ctx,
-                      grpc_compression_algorithm algorithm,
+int grpc_msg_compress(grpc_compression_algorithm algorithm,
                       grpc_slice_buffer* input, grpc_slice_buffer* output);
 
 /* decompress 'input' to 'output' using 'algorithm'.
    On success, appends slices to output and returns 1.
    On failure, output is unchanged, and returns 0. */
-int grpc_msg_decompress(grpc_exec_ctx* exec_ctx,
-                        grpc_compression_algorithm algorithm,
+int grpc_msg_decompress(grpc_compression_algorithm algorithm,
                         grpc_slice_buffer* input, grpc_slice_buffer* output);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_COMPRESSION_MESSAGE_COMPRESS_H */
diff --git a/src/core/lib/compression/stream_compression.cc b/src/core/lib/compression/stream_compression.cc
index 1ccbe16..b4b3e52 100644
--- a/src/core/lib/compression/stream_compression.cc
+++ b/src/core/lib/compression/stream_compression.cc
@@ -21,7 +21,7 @@
 #include "src/core/lib/compression/stream_compression.h"
 #include "src/core/lib/compression/stream_compression_gzip.h"
 
-extern "C" const grpc_stream_compression_vtable
+extern const grpc_stream_compression_vtable
     grpc_stream_compression_identity_vtable;
 
 bool grpc_stream_compress(grpc_stream_compression_context* ctx,
diff --git a/src/core/lib/compression/stream_compression.h b/src/core/lib/compression/stream_compression.h
index b56c142..8322835 100644
--- a/src/core/lib/compression/stream_compression.h
+++ b/src/core/lib/compression/stream_compression.h
@@ -26,10 +26,6 @@
 
 #include "src/core/lib/transport/static_metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_stream_compression_vtable grpc_stream_compression_vtable;
 
 /* Stream compression/decompression context */
@@ -115,8 +111,4 @@
 int grpc_stream_compression_method_parse(
     grpc_slice value, bool is_compress, grpc_stream_compression_method* method);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
diff --git a/src/core/lib/compression/stream_compression_gzip.cc b/src/core/lib/compression/stream_compression_gzip.cc
index 0c3fdd1..897f391 100644
--- a/src/core/lib/compression/stream_compression_gzip.cc
+++ b/src/core/lib/compression/stream_compression_gzip.cc
@@ -40,7 +40,7 @@
   /* Full flush is not allowed when inflating. */
   GPR_ASSERT(!(ctx->flate == inflate && (flush == Z_FINISH)));
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   int r;
   bool eoc = false;
   size_t original_max_output_size = max_output_size;
@@ -57,8 +57,8 @@
       r = ctx->flate(&ctx->zs, Z_NO_FLUSH);
       if (r < 0 && r != Z_BUF_ERROR) {
         gpr_log(GPR_ERROR, "zlib error (%d)", r);
-        grpc_slice_unref_internal(&exec_ctx, slice_out);
-        grpc_exec_ctx_finish(&exec_ctx);
+        grpc_slice_unref_internal(slice_out);
+
         return false;
       } else if (r == Z_STREAM_END && ctx->flate == inflate) {
         eoc = true;
@@ -69,7 +69,7 @@
             grpc_slice_sub(slice, GRPC_SLICE_LENGTH(slice) - ctx->zs.avail_in,
                            GRPC_SLICE_LENGTH(slice)));
       }
-      grpc_slice_unref_internal(&exec_ctx, slice);
+      grpc_slice_unref_internal(slice);
     }
     if (flush != 0 && ctx->zs.avail_out > 0 && !eoc) {
       GPR_ASSERT(in->length == 0);
@@ -88,8 +88,8 @@
             break;
           default:
             gpr_log(GPR_ERROR, "zlib error (%d)", r);
-            grpc_slice_unref_internal(&exec_ctx, slice_out);
-            grpc_exec_ctx_finish(&exec_ctx);
+            grpc_slice_unref_internal(slice_out);
+
             return false;
         }
       } else if (flush == Z_FINISH) {
@@ -104,8 +104,8 @@
             break;
           default:
             gpr_log(GPR_ERROR, "zlib error (%d)", r);
-            grpc_slice_unref_internal(&exec_ctx, slice_out);
-            grpc_exec_ctx_finish(&exec_ctx);
+            grpc_slice_unref_internal(slice_out);
+
             return false;
         }
       }
@@ -114,14 +114,15 @@
     if (ctx->zs.avail_out == 0) {
       grpc_slice_buffer_add(out, slice_out);
     } else if (ctx->zs.avail_out < slice_size) {
-      slice_out.data.refcounted.length -= ctx->zs.avail_out;
+      size_t len = GRPC_SLICE_LENGTH(slice_out);
+      GRPC_SLICE_SET_LENGTH(slice_out, len - ctx->zs.avail_out);
       grpc_slice_buffer_add(out, slice_out);
     } else {
-      grpc_slice_unref_internal(&exec_ctx, slice_out);
+      grpc_slice_unref_internal(slice_out);
     }
     max_output_size -= (slice_size - ctx->zs.avail_out);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   if (end_of_context) {
     *end_of_context = eoc;
   }
diff --git a/src/core/lib/compression/stream_compression_gzip.h b/src/core/lib/compression/stream_compression_gzip.h
index a3f1b04..7cf49a0 100644
--- a/src/core/lib/compression/stream_compression_gzip.h
+++ b/src/core/lib/compression/stream_compression_gzip.h
@@ -21,14 +21,6 @@
 
 #include "src/core/lib/compression/stream_compression.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_stream_compression_vtable grpc_stream_compression_gzip_vtable;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
diff --git a/src/core/lib/compression/stream_compression_identity.h b/src/core/lib/compression/stream_compression_identity.h
index 3a729fa..41926e9 100644
--- a/src/core/lib/compression/stream_compression_identity.h
+++ b/src/core/lib/compression/stream_compression_identity.h
@@ -21,15 +21,7 @@
 
 #include "src/core/lib/compression/stream_compression.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_stream_compression_vtable
     grpc_stream_compression_identity_vtable;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
diff --git a/src/core/lib/debug/stats.cc b/src/core/lib/debug/stats.cc
index 7d2af71..0b39b2b 100644
--- a/src/core/lib/debug/stats.cc
+++ b/src/core/lib/debug/stats.cc
@@ -62,9 +62,9 @@
   }
 }
 
-int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx* exec_ctx, int value,
-                                      const int* table, int table_size) {
-  GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx);
+int grpc_stats_histo_find_bucket_slow(int value, const int* table,
+                                      int table_size) {
+  GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS();
   const int* const start = table;
   while (table_size > 0) {
     int step = table_size / 2;
diff --git a/src/core/lib/debug/stats.h b/src/core/lib/debug/stats.h
index 1c19e72..02eed5e 100644
--- a/src/core/lib/debug/stats.h
+++ b/src/core/lib/debug/stats.h
@@ -23,10 +23,6 @@
 #include "src/core/lib/debug/stats_data.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_stats_data {
   gpr_atm counters[GRPC_STATS_COUNTER_COUNT];
   gpr_atm histograms[GRPC_STATS_HISTOGRAM_BUCKETS];
@@ -34,17 +30,15 @@
 
 extern grpc_stats_data* grpc_stats_per_cpu_storage;
 
-#define GRPC_THREAD_STATS_DATA(exec_ctx) \
-  (&grpc_stats_per_cpu_storage[(exec_ctx)->starting_cpu])
+#define GRPC_THREAD_STATS_DATA() \
+  (&grpc_stats_per_cpu_storage[grpc_core::ExecCtx::Get()->starting_cpu()])
 
-#define GRPC_STATS_INC_COUNTER(exec_ctx, ctr) \
-  (gpr_atm_no_barrier_fetch_add(              \
-      &GRPC_THREAD_STATS_DATA((exec_ctx))->counters[(ctr)], 1))
+#define GRPC_STATS_INC_COUNTER(ctr) \
+  (gpr_atm_no_barrier_fetch_add(&GRPC_THREAD_STATS_DATA()->counters[(ctr)], 1))
 
-#define GRPC_STATS_INC_HISTOGRAM(exec_ctx, histogram, index) \
-  (gpr_atm_no_barrier_fetch_add(                             \
-      &GRPC_THREAD_STATS_DATA((exec_ctx))                    \
-           ->histograms[histogram##_FIRST_SLOT + (index)],   \
+#define GRPC_STATS_INC_HISTOGRAM(histogram, index)                             \
+  (gpr_atm_no_barrier_fetch_add(                                               \
+      &GRPC_THREAD_STATS_DATA()->histograms[histogram##_FIRST_SLOT + (index)], \
       1))
 
 void grpc_stats_init(void);
@@ -54,16 +48,12 @@
 void grpc_stats_diff(const grpc_stats_data* b, const grpc_stats_data* a,
                      grpc_stats_data* c);
 char* grpc_stats_data_as_json(const grpc_stats_data* data);
-int grpc_stats_histo_find_bucket_slow(grpc_exec_ctx* exec_ctx, int value,
-                                      const int* table, int table_size);
+int grpc_stats_histo_find_bucket_slow(int value, const int* table,
+                                      int table_size);
 double grpc_stats_histo_percentile(const grpc_stats_data* data,
                                    grpc_stats_histograms histogram,
                                    double percentile);
 size_t grpc_stats_histo_count(const grpc_stats_data* data,
                               grpc_stats_histograms histogram);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
diff --git a/src/core/lib/debug/stats_data.cc b/src/core/lib/debug/stats_data.cc
index 17e15f4..996ed3d 100644
--- a/src/core/lib/debug/stats_data.cc
+++ b/src/core/lib/debug/stats_data.cc
@@ -342,11 +342,10 @@
     42, 42, 43, 44, 44, 45, 46, 46, 47, 48, 48, 49, 49, 50, 50, 51, 51};
 const int grpc_stats_table_8[9] = {0, 1, 2, 4, 7, 13, 23, 39, 64};
 const uint8_t grpc_stats_table_9[9] = {0, 0, 1, 2, 2, 3, 4, 4, 5};
-void grpc_stats_inc_call_initial_size(grpc_exec_ctx* exec_ctx, int value) {
+void grpc_stats_inc_call_initial_size(int value) {
   value = GPR_CLAMP(value, 0, 262144);
   if (value < 6) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
-                             value);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, value);
     return;
   }
   union {
@@ -359,19 +358,17 @@
         grpc_stats_table_1[((_val.uint - 4618441417868443648ull) >> 49)] + 6;
     _bkt.dbl = grpc_stats_table_0[bucket];
     bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
-                             bucket);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE, bucket);
     return;
   }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_0, 64));
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_CALL_INITIAL_SIZE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_0, 64));
 }
-void grpc_stats_inc_poll_events_returned(grpc_exec_ctx* exec_ctx, int value) {
+void grpc_stats_inc_poll_events_returned(int value) {
   value = GPR_CLAMP(value, 0, 1024);
   if (value < 29) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                             GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, value);
     return;
   }
   union {
@@ -384,204 +381,17 @@
         grpc_stats_table_3[((_val.uint - 4628855992006737920ull) >> 47)] + 29;
     _bkt.dbl = grpc_stats_table_2[bucket];
     bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                             GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                           GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_2, 128));
-}
-void grpc_stats_inc_tcp_write_size(grpc_exec_ctx* exec_ctx, int value) {
-  value = GPR_CLAMP(value, 0, 16777216);
-  if (value < 5) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
-                             value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4683743612465315840ull) {
-    int bucket =
-        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
-    _bkt.dbl = grpc_stats_table_4[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
-                             bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_4, 64));
-}
-void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx* exec_ctx, int value) {
-  value = GPR_CLAMP(value, 0, 1024);
-  if (value < 13) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                             GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4637863191261478912ull) {
-    int bucket =
-        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
-    _bkt.dbl = grpc_stats_table_6[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                             GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_6, 64));
-}
-void grpc_stats_inc_tcp_read_size(grpc_exec_ctx* exec_ctx, int value) {
-  value = GPR_CLAMP(value, 0, 16777216);
-  if (value < 5) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
-                             value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4683743612465315840ull) {
-    int bucket =
-        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
-    _bkt.dbl = grpc_stats_table_4[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
-                             bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_4, 64));
-}
-void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx* exec_ctx, int value) {
-  value = GPR_CLAMP(value, 0, 16777216);
-  if (value < 5) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
-                             value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4683743612465315840ull) {
-    int bucket =
-        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
-    _bkt.dbl = grpc_stats_table_4[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
-                             bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_4, 64));
-}
-void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx* exec_ctx,
-                                            int value) {
-  value = GPR_CLAMP(value, 0, 1024);
-  if (value < 13) {
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4637863191261478912ull) {
-    int bucket =
-        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
-    _bkt.dbl = grpc_stats_table_6[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE, bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                           GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_6, 64));
-}
-void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx* exec_ctx,
-                                            int value) {
-  value = GPR_CLAMP(value, 0, 16777216);
-  if (value < 5) {
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4683743612465315840ull) {
-    int bucket =
-        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
-    _bkt.dbl = grpc_stats_table_4[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE, bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                           GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_4, 64));
-}
-void grpc_stats_inc_http2_send_initial_metadata_per_write(
-    grpc_exec_ctx* exec_ctx, int value) {
-  value = GPR_CLAMP(value, 0, 1024);
-  if (value < 13) {
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
-        value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4637863191261478912ull) {
-    int bucket =
-        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
-    _bkt.dbl = grpc_stats_table_6[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
-        bucket);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED, bucket);
     return;
   }
   GRPC_STATS_INC_HISTOGRAM(
-      (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
-      grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
-                                        64));
+      GRPC_STATS_HISTOGRAM_POLL_EVENTS_RETURNED,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_2, 128));
 }
-void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx* exec_ctx,
-                                                 int value) {
-  value = GPR_CLAMP(value, 0, 1024);
-  if (value < 13) {
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, value);
+void grpc_stats_inc_tcp_write_size(int value) {
+  value = GPR_CLAMP(value, 0, 16777216);
+  if (value < 5) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, value);
     return;
   }
   union {
@@ -589,55 +399,142 @@
     uint64_t uint;
   } _val, _bkt;
   _val.dbl = value;
-  if (_val.uint < 4637863191261478912ull) {
+  if (_val.uint < 4683743612465315840ull) {
     int bucket =
-        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
-    _bkt.dbl = grpc_stats_table_6[bucket];
+        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+    _bkt.dbl = grpc_stats_table_4[bucket];
     bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE, bucket);
-    return;
-  }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                           GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_6, 64));
-}
-void grpc_stats_inc_http2_send_trailing_metadata_per_write(
-    grpc_exec_ctx* exec_ctx, int value) {
-  value = GPR_CLAMP(value, 0, 1024);
-  if (value < 13) {
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
-        value);
-    return;
-  }
-  union {
-    double dbl;
-    uint64_t uint;
-  } _val, _bkt;
-  _val.dbl = value;
-  if (_val.uint < 4637863191261478912ull) {
-    int bucket =
-        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
-    _bkt.dbl = grpc_stats_table_6[bucket];
-    bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
-        bucket);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE, bucket);
     return;
   }
   GRPC_STATS_INC_HISTOGRAM(
-      (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
-      grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_6,
-                                        64));
+      GRPC_STATS_HISTOGRAM_TCP_WRITE_SIZE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
 }
-void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx* exec_ctx,
-                                                 int value) {
+void grpc_stats_inc_tcp_write_iov_size(int value) {
+  value = GPR_CLAMP(value, 0, 1024);
+  if (value < 13) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4637863191261478912ull) {
+    int bucket =
+        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+    _bkt.dbl = grpc_stats_table_6[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE, bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_TCP_WRITE_IOV_SIZE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_tcp_read_size(int value) {
+  value = GPR_CLAMP(value, 0, 16777216);
+  if (value < 5) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4683743612465315840ull) {
+    int bucket =
+        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+    _bkt.dbl = grpc_stats_table_4[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_SIZE, bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_TCP_READ_SIZE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_read_offer(int value) {
+  value = GPR_CLAMP(value, 0, 16777216);
+  if (value < 5) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4683743612465315840ull) {
+    int bucket =
+        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+    _bkt.dbl = grpc_stats_table_4[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER, bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_TCP_READ_OFFER,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_tcp_read_offer_iov_size(int value) {
+  value = GPR_CLAMP(value, 0, 1024);
+  if (value < 13) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+                             value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4637863191261478912ull) {
+    int bucket =
+        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+    _bkt.dbl = grpc_stats_table_6[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+                             bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_TCP_READ_OFFER_IOV_SIZE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_message_size(int value) {
+  value = GPR_CLAMP(value, 0, 16777216);
+  if (value < 5) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+                             value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4683743612465315840ull) {
+    int bucket =
+        grpc_stats_table_5[((_val.uint - 4617315517961601024ull) >> 50)] + 5;
+    _bkt.dbl = grpc_stats_table_4[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+                             bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_SIZE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_4, 64));
+}
+void grpc_stats_inc_http2_send_initial_metadata_per_write(int value) {
   value = GPR_CLAMP(value, 0, 1024);
   if (value < 13) {
     GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, value);
+        GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, value);
     return;
   }
   union {
@@ -651,19 +548,92 @@
     _bkt.dbl = grpc_stats_table_6[bucket];
     bucket -= (_val.uint < _bkt.uint);
     GRPC_STATS_INC_HISTOGRAM(
-        (exec_ctx), GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE, bucket);
+        GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE, bucket);
     return;
   }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                           GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_6, 64));
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_HTTP2_SEND_INITIAL_METADATA_PER_WRITE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
 }
-void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx* exec_ctx, int value) {
+void grpc_stats_inc_http2_send_message_per_write(int value) {
+  value = GPR_CLAMP(value, 0, 1024);
+  if (value < 13) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+                             value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4637863191261478912ull) {
+    int bucket =
+        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+    _bkt.dbl = grpc_stats_table_6[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+                             bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_HTTP2_SEND_MESSAGE_PER_WRITE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(int value) {
+  value = GPR_CLAMP(value, 0, 1024);
+  if (value < 13) {
+    GRPC_STATS_INC_HISTOGRAM(
+        GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4637863191261478912ull) {
+    int bucket =
+        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+    _bkt.dbl = grpc_stats_table_6[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(
+        GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE, bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_HTTP2_SEND_TRAILING_METADATA_PER_WRITE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_http2_send_flowctl_per_write(int value) {
+  value = GPR_CLAMP(value, 0, 1024);
+  if (value < 13) {
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+                             value);
+    return;
+  }
+  union {
+    double dbl;
+    uint64_t uint;
+  } _val, _bkt;
+  _val.dbl = value;
+  if (_val.uint < 4637863191261478912ull) {
+    int bucket =
+        grpc_stats_table_7[((_val.uint - 4623507967449235456ull) >> 48)] + 13;
+    _bkt.dbl = grpc_stats_table_6[bucket];
+    bucket -= (_val.uint < _bkt.uint);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+                             bucket);
+    return;
+  }
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_HTTP2_SEND_FLOWCTL_PER_WRITE,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_6, 64));
+}
+void grpc_stats_inc_server_cqs_checked(int value) {
   value = GPR_CLAMP(value, 0, 64);
   if (value < 3) {
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                             GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, value);
     return;
   }
   union {
@@ -676,13 +646,12 @@
         grpc_stats_table_9[((_val.uint - 4613937818241073152ull) >> 51)] + 3;
     _bkt.dbl = grpc_stats_table_8[bucket];
     bucket -= (_val.uint < _bkt.uint);
-    GRPC_STATS_INC_HISTOGRAM((exec_ctx),
-                             GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
+    GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED, bucket);
     return;
   }
-  GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
-                           grpc_stats_histo_find_bucket_slow(
-                               (exec_ctx), value, grpc_stats_table_8, 8));
+  GRPC_STATS_INC_HISTOGRAM(
+      GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED,
+      grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_8, 8));
 }
 const int grpc_stats_histo_buckets[13] = {64, 128, 64, 64, 64, 64, 64,
                                           64, 64,  64, 64, 64, 8};
@@ -694,7 +663,7 @@
     grpc_stats_table_6, grpc_stats_table_4, grpc_stats_table_6,
     grpc_stats_table_6, grpc_stats_table_6, grpc_stats_table_6,
     grpc_stats_table_8};
-void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx* exec_ctx, int x) = {
+void (*const grpc_stats_inc_histogram[13])(int x) = {
     grpc_stats_inc_call_initial_size,
     grpc_stats_inc_poll_events_returned,
     grpc_stats_inc_tcp_write_size,
diff --git a/src/core/lib/debug/stats_data.h b/src/core/lib/debug/stats_data.h
index fbfcce8..4504be3 100644
--- a/src/core/lib/debug/stats_data.h
+++ b/src/core/lib/debug/stats_data.h
@@ -24,10 +24,6 @@
 #include <inttypes.h>
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED,
   GRPC_STATS_COUNTER_SERVER_CALLS_CREATED,
@@ -176,334 +172,262 @@
   GRPC_STATS_HISTOGRAM_SERVER_CQS_CHECKED_BUCKETS = 8,
   GRPC_STATS_HISTOGRAM_BUCKETS = 840
 } grpc_stats_histogram_constants;
-#define GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
-#define GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
-#define GRPC_STATS_INC_CQS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CQS_CREATED)
-#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
-#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
-#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
-#define GRPC_STATS_INC_SYSCALL_POLL(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_POLL)
-#define GRPC_STATS_INC_SYSCALL_WAIT(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WAIT)
-#define GRPC_STATS_INC_POLLSET_KICK(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK)
-#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                           \
-                         GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
-#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
-#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
-#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
-#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
-#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
-#define GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_WRITE)
-#define GRPC_STATS_INC_SYSCALL_READ(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SYSCALL_READ)
-#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
-#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
-#define GRPC_STATS_INC_HTTP2_OP_BATCHES(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
-#define GRPC_STATS_INC_HTTP2_OP_CANCEL(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_CANCEL)
-#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                            \
-                         GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA)
-#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE)
-#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                             \
-                         GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA)
-#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                            \
-                         GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA)
-#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE)
-#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                             \
-                         GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
-#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
-#define GRPC_STATS_INC_HTTP2_PINGS_SENT(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
-#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
-#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
-#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
-#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                  \
-      (exec_ctx),                                                          \
+#define GRPC_STATS_INC_CLIENT_CALLS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CALLS_CREATED)
+#define GRPC_STATS_INC_SERVER_CALLS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CALLS_CREATED)
+#define GRPC_STATS_INC_CQS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQS_CREATED)
+#define GRPC_STATS_INC_CLIENT_CHANNELS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_CHANNELS_CREATED)
+#define GRPC_STATS_INC_CLIENT_SUBCHANNELS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CLIENT_SUBCHANNELS_CREATED)
+#define GRPC_STATS_INC_SERVER_CHANNELS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_CHANNELS_CREATED)
+#define GRPC_STATS_INC_SYSCALL_POLL() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_POLL)
+#define GRPC_STATS_INC_SYSCALL_WAIT() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WAIT)
+#define GRPC_STATS_INC_POLLSET_KICK() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK)
+#define GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_WITHOUT_POLLER)
+#define GRPC_STATS_INC_POLLSET_KICKED_AGAIN() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICKED_AGAIN)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_FD)
+#define GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_WAKEUP_CV)
+#define GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_POLLSET_KICK_OWN_THREAD)
+#define GRPC_STATS_INC_HISTOGRAM_SLOW_LOOKUPS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HISTOGRAM_SLOW_LOOKUPS)
+#define GRPC_STATS_INC_SYSCALL_WRITE() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_WRITE)
+#define GRPC_STATS_INC_SYSCALL_READ() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SYSCALL_READ)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLERS_CREATED)
+#define GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_TCP_BACKUP_POLLER_POLLS)
+#define GRPC_STATS_INC_HTTP2_OP_BATCHES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_BATCHES)
+#define GRPC_STATS_INC_HTTP2_OP_CANCEL() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_CANCEL)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_INITIAL_METADATA() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_MESSAGE() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_OP_SEND_TRAILING_METADATA() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_INITIAL_METADATA() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_MESSAGE() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_OP_RECV_TRAILING_METADATA() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_OP_RECV_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_SETTINGS_WRITES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SETTINGS_WRITES)
+#define GRPC_STATS_INC_HTTP2_PINGS_SENT() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PINGS_SENT)
+#define GRPC_STATS_INC_HTTP2_WRITES_BEGUN() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_BEGUN)
+#define GRPC_STATS_INC_HTTP2_WRITES_OFFLOADED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_OFFLOADED)
+#define GRPC_STATS_INC_HTTP2_WRITES_CONTINUED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_WRITES_CONTINUED)
+#define GRPC_STATS_INC_HTTP2_PARTIAL_WRITES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_PARTIAL_WRITES)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE() \
+  GRPC_STATS_INC_COUNTER(                                          \
       GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_INITIAL_WRITE)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                     \
-      (exec_ctx),                                                             \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                 \
-      (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA( \
-    exec_ctx)                                                             \
-  GRPC_STATS_INC_COUNTER(                                                 \
-      (exec_ctx),                                                         \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA( \
-    exec_ctx)                                                              \
-  GRPC_STATS_INC_COUNTER(                                                  \
-      (exec_ctx),                                                          \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                    \
-      (exec_ctx),                                                            \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                   \
-      (exec_ctx),                                                           \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                \
-      (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                               \
-      (exec_ctx), GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                   \
-      (exec_ctx),                                                           \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL( \
-    exec_ctx)                                                           \
-  GRPC_STATS_INC_COUNTER(                                               \
-      (exec_ctx),                                                       \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL( \
-    exec_ctx)                                                              \
-  GRPC_STATS_INC_COUNTER(                                                  \
-      (exec_ctx),                                                          \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                  \
-      (exec_ctx),                                                          \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING( \
-    exec_ctx)                                                          \
-  GRPC_STATS_INC_COUNTER(                                              \
-      (exec_ctx),                                                      \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING( \
-    exec_ctx)                                                                         \
-  GRPC_STATS_INC_COUNTER(                                                             \
-      (exec_ctx),                                                                     \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE( \
-    exec_ctx)                                                                        \
-  GRPC_STATS_INC_COUNTER(                                                            \
-      (exec_ctx),                                                                    \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                     \
-      (exec_ctx),                                                             \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                   \
-      (exec_ctx),                                                           \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED( \
-    exec_ctx)                                                                        \
-  GRPC_STATS_INC_COUNTER(                                                            \
-      (exec_ctx),                                                                    \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                  \
-      (exec_ctx),                                                          \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
-#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM(exec_ctx) \
-  GRPC_STATS_INC_COUNTER(                                                     \
-      (exec_ctx),                                                             \
-      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
-#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                         \
-                         GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN)
-#define GRPC_STATS_INC_HPACK_RECV_INDEXED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
-#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
-#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
-#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
-#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
-#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
-#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
-#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
-#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
-#define GRPC_STATS_INC_HPACK_RECV_BINARY(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
-#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
-#define GRPC_STATS_INC_HPACK_SEND_INDEXED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
-#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
-#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
-#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
-#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
-#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
-#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
-#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
-#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
-#define GRPC_STATS_INC_HPACK_SEND_BINARY(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
-#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
-#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
-#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                            \
-                         GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS)
-#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx) \
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM() \
   GRPC_STATS_INC_COUNTER(                                             \
-      (exec_ctx), GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS)
-#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                      \
-                         GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
-#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                           \
-                         GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED)
-#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx) \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_START_NEW_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE() \
+  GRPC_STATS_INC_COUNTER(                                         \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_MESSAGE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA() \
+  GRPC_STATS_INC_COUNTER(                                                  \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_INITIAL_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA() \
+  GRPC_STATS_INC_COUNTER(                                                   \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_TRAILING_METADATA)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING() \
   GRPC_STATS_INC_COUNTER(                                            \
-      (exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS)
-#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx) \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RETRY_SEND_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS() \
   GRPC_STATS_INC_COUNTER(                                           \
-      (exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL)
-#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED)
-#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                            \
-                         GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
-#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                           \
-                         GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
-#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                        \
-                         GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
-#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                       \
-                         GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
-#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
-#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
-#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
-#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                             \
-                         GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
-#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                          \
-                         GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES)
-#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(exec_ctx) \
-  GRPC_STATS_INC_COUNTER((exec_ctx),                           \
-                         GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES)
-#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(exec_ctx) \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CONTINUE_PINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT() \
+  GRPC_STATS_INC_COUNTER(                                        \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_GOAWAY_SENT)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM() \
+  GRPC_STATS_INC_COUNTER(                                       \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API() \
   GRPC_STATS_INC_COUNTER(                                           \
-      (exec_ctx), GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES)
-#define GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, value) \
-  grpc_stats_inc_call_initial_size((exec_ctx), (int)(value))
-void grpc_stats_inc_call_initial_size(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, value) \
-  grpc_stats_inc_poll_events_returned((exec_ctx), (int)(value))
-void grpc_stats_inc_poll_events_returned(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, value) \
-  grpc_stats_inc_tcp_write_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_write_size(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, value) \
-  grpc_stats_inc_tcp_write_iov_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_write_iov_size(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, value) \
-  grpc_stats_inc_tcp_read_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_read_size(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, value) \
-  grpc_stats_inc_tcp_read_offer((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_read_offer(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, value) \
-  grpc_stats_inc_tcp_read_offer_iov_size((exec_ctx), (int)(value))
-void grpc_stats_inc_tcp_read_offer_iov_size(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(exec_ctx, value) \
-  grpc_stats_inc_http2_send_message_size((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_message_size(grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(exec_ctx, value) \
-  grpc_stats_inc_http2_send_initial_metadata_per_write((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_initial_metadata_per_write(
-    grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(exec_ctx, value) \
-  grpc_stats_inc_http2_send_message_per_write((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_message_per_write(grpc_exec_ctx* exec_ctx,
-                                                 int x);
-#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(exec_ctx, value) \
-  grpc_stats_inc_http2_send_trailing_metadata_per_write((exec_ctx),            \
-                                                        (int)(value))
-void grpc_stats_inc_http2_send_trailing_metadata_per_write(
-    grpc_exec_ctx* exec_ctx, int x);
-#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(exec_ctx, value) \
-  grpc_stats_inc_http2_send_flowctl_per_write((exec_ctx), (int)(value))
-void grpc_stats_inc_http2_send_flowctl_per_write(grpc_exec_ctx* exec_ctx,
-                                                 int x);
-#define GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, value) \
-  grpc_stats_inc_server_cqs_checked((exec_ctx), (int)(value))
-void grpc_stats_inc_server_cqs_checked(grpc_exec_ctx* exec_ctx, int x);
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_CLOSE_FROM_API)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL() \
+  GRPC_STATS_INC_COUNTER(                                                \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_STREAM_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL() \
+  GRPC_STATS_INC_COUNTER(                                                   \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS() \
+  GRPC_STATS_INC_COUNTER(                                          \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_SEND_SETTINGS)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING() \
+  GRPC_STATS_INC_COUNTER(                                               \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_BDP_ESTIMATOR_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING() \
+  GRPC_STATS_INC_COUNTER(                                                              \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_SETTING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE() \
+  GRPC_STATS_INC_COUNTER(                                                             \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FLOW_CONTROL_UNSTALLED_BY_UPDATE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING() \
+  GRPC_STATS_INC_COUNTER(                                             \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_APPLICATION_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING() \
+  GRPC_STATS_INC_COUNTER(                                           \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_KEEPALIVE_PING)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED() \
+  GRPC_STATS_INC_COUNTER(                                                             \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_TRANSPORT_FLOW_CONTROL_UNSTALLED)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE() \
+  GRPC_STATS_INC_COUNTER(                                          \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_PING_RESPONSE)
+#define GRPC_STATS_INC_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM() \
+  GRPC_STATS_INC_COUNTER(                                             \
+      GRPC_STATS_COUNTER_HTTP2_INITIATE_WRITE_DUE_TO_FORCE_RST_STREAM)
+#define GRPC_STATS_INC_HTTP2_SPURIOUS_WRITES_BEGUN() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HTTP2_SPURIOUS_WRITES_BEGUN)
+#define GRPC_STATS_INC_HPACK_RECV_INDEXED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_INDEXED)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_INCIDX_V() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NOTIDX_V() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_RECV_LITHDR_NVRIDX_V() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_RECV_UNCOMPRESSED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_RECV_HUFFMAN() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY)
+#define GRPC_STATS_INC_HPACK_RECV_BINARY_BASE64() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_RECV_BINARY_BASE64)
+#define GRPC_STATS_INC_HPACK_SEND_INDEXED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_INDEXED)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_INCIDX_V() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_INCIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NOTIDX_V() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NOTIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX)
+#define GRPC_STATS_INC_HPACK_SEND_LITHDR_NVRIDX_V() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_LITHDR_NVRIDX_V)
+#define GRPC_STATS_INC_HPACK_SEND_UNCOMPRESSED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_UNCOMPRESSED)
+#define GRPC_STATS_INC_HPACK_SEND_HUFFMAN() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_HUFFMAN)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY)
+#define GRPC_STATS_INC_HPACK_SEND_BINARY_BASE64() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_HPACK_SEND_BINARY_BASE64)
+#define GRPC_STATS_INC_COMBINER_LOCKS_INITIATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_INITIATED)
+#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS() \
+  GRPC_STATS_INC_COUNTER(                                     \
+      GRPC_STATS_COUNTER_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS)
+#define GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_COMBINER_LOCKS_OFFLOADED)
+#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_INITIATED)
+#define GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS)
+#define GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_SET_NOTIFY_ON_CANCEL)
+#define GRPC_STATS_INC_CALL_COMBINER_CANCELLED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CALL_COMBINER_CANCELLED)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_SHORT_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_LONG_ITEMS)
+#define GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_SCHEDULED_TO_SELF)
+#define GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_WAKEUP_INITIATED)
+#define GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_QUEUE_DRAINED)
+#define GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_EXECUTOR_PUSH_RETRIES)
+#define GRPC_STATS_INC_SERVER_REQUESTED_CALLS() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_REQUESTED_CALLS)
+#define GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_SERVER_SLOWPATH_REQUESTS_QUEUED)
+#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_FAILURES)
+#define GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRYLOCK_SUCCESSES)
+#define GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES() \
+  GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES)
+#define GRPC_STATS_INC_CALL_INITIAL_SIZE(value) \
+  grpc_stats_inc_call_initial_size((int)(value))
+void grpc_stats_inc_call_initial_size(int x);
+#define GRPC_STATS_INC_POLL_EVENTS_RETURNED(value) \
+  grpc_stats_inc_poll_events_returned((int)(value))
+void grpc_stats_inc_poll_events_returned(int x);
+#define GRPC_STATS_INC_TCP_WRITE_SIZE(value) \
+  grpc_stats_inc_tcp_write_size((int)(value))
+void grpc_stats_inc_tcp_write_size(int x);
+#define GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(value) \
+  grpc_stats_inc_tcp_write_iov_size((int)(value))
+void grpc_stats_inc_tcp_write_iov_size(int x);
+#define GRPC_STATS_INC_TCP_READ_SIZE(value) \
+  grpc_stats_inc_tcp_read_size((int)(value))
+void grpc_stats_inc_tcp_read_size(int x);
+#define GRPC_STATS_INC_TCP_READ_OFFER(value) \
+  grpc_stats_inc_tcp_read_offer((int)(value))
+void grpc_stats_inc_tcp_read_offer(int x);
+#define GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(value) \
+  grpc_stats_inc_tcp_read_offer_iov_size((int)(value))
+void grpc_stats_inc_tcp_read_offer_iov_size(int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_SIZE(value) \
+  grpc_stats_inc_http2_send_message_size((int)(value))
+void grpc_stats_inc_http2_send_message_size(int x);
+#define GRPC_STATS_INC_HTTP2_SEND_INITIAL_METADATA_PER_WRITE(value) \
+  grpc_stats_inc_http2_send_initial_metadata_per_write((int)(value))
+void grpc_stats_inc_http2_send_initial_metadata_per_write(int x);
+#define GRPC_STATS_INC_HTTP2_SEND_MESSAGE_PER_WRITE(value) \
+  grpc_stats_inc_http2_send_message_per_write((int)(value))
+void grpc_stats_inc_http2_send_message_per_write(int x);
+#define GRPC_STATS_INC_HTTP2_SEND_TRAILING_METADATA_PER_WRITE(value) \
+  grpc_stats_inc_http2_send_trailing_metadata_per_write((int)(value))
+void grpc_stats_inc_http2_send_trailing_metadata_per_write(int x);
+#define GRPC_STATS_INC_HTTP2_SEND_FLOWCTL_PER_WRITE(value) \
+  grpc_stats_inc_http2_send_flowctl_per_write((int)(value))
+void grpc_stats_inc_http2_send_flowctl_per_write(int x);
+#define GRPC_STATS_INC_SERVER_CQS_CHECKED(value) \
+  grpc_stats_inc_server_cqs_checked((int)(value))
+void grpc_stats_inc_server_cqs_checked(int x);
 extern const int grpc_stats_histo_buckets[13];
 extern const int grpc_stats_histo_start[13];
 extern const int* const grpc_stats_histo_bucket_boundaries[13];
-extern void (*const grpc_stats_inc_histogram[13])(grpc_exec_ctx* exec_ctx,
-                                                  int x);
-
-#ifdef __cplusplus
-}
-#endif
+extern void (*const grpc_stats_inc_histogram[13])(int x);
 
 #endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */
diff --git a/src/core/lib/debug/trace.h b/src/core/lib/debug/trace.h
index b58c16f..69ddd80 100644
--- a/src/core/lib/debug/trace.h
+++ b/src/core/lib/debug/trace.h
@@ -23,25 +23,15 @@
 #include <grpc/support/port_platform.h>
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_tracer_init(const char* env_var_name);
 void grpc_tracer_shutdown(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #if defined(__has_feature)
 #if __has_feature(thread_sanitizer)
 #define GRPC_THREADSAFE_TRACER
 #endif
 #endif
 
-#ifdef __cplusplus
-
 namespace grpc_core {
 
 class TraceFlag;
@@ -110,6 +100,4 @@
 
 }  // namespace grpc_core
 
-#endif  // __cplusplus
-
 #endif /* GRPC_CORE_LIB_DEBUG_TRACE_H */
diff --git a/src/core/lib/http/format_request.h b/src/core/lib/http/format_request.h
index 3205480..c191965 100644
--- a/src/core/lib/http/format_request.h
+++ b/src/core/lib/http/format_request.h
@@ -22,10 +22,6 @@
 #include <grpc/slice.h>
 #include "src/core/lib/http/httpcli.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_slice grpc_httpcli_format_get_request(const grpc_httpcli_request* request);
 grpc_slice grpc_httpcli_format_post_request(const grpc_httpcli_request* request,
                                             const char* body_bytes,
@@ -33,8 +29,4 @@
 grpc_slice grpc_httpcli_format_connect_request(
     const grpc_httpcli_request* request);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_HTTP_FORMAT_REQUEST_H */
diff --git a/src/core/lib/http/httpcli.cc b/src/core/lib/http/httpcli.cc
index 73b484b..ed874c4 100644
--- a/src/core/lib/http/httpcli.cc
+++ b/src/core/lib/http/httpcli.cc
@@ -63,13 +63,11 @@
 static grpc_httpcli_get_override g_get_override = nullptr;
 static grpc_httpcli_post_override g_post_override = nullptr;
 
-static void plaintext_handshake(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_endpoint* endpoint, const char* host,
-                                grpc_millis deadline,
-                                void (*on_done)(grpc_exec_ctx* exec_ctx,
-                                                void* arg,
+static void plaintext_handshake(void* arg, grpc_endpoint* endpoint,
+                                const char* host, grpc_millis deadline,
+                                void (*on_done)(void* arg,
                                                 grpc_endpoint* endpoint)) {
-  on_done(exec_ctx, arg, endpoint);
+  on_done(arg, endpoint);
 }
 
 const grpc_httpcli_handshaker grpc_httpcli_plaintext = {"http",
@@ -79,34 +77,31 @@
   context->pollset_set = grpc_pollset_set_create();
 }
 
-void grpc_httpcli_context_destroy(grpc_exec_ctx* exec_ctx,
-                                  grpc_httpcli_context* context) {
-  grpc_pollset_set_destroy(exec_ctx, context->pollset_set);
+void grpc_httpcli_context_destroy(grpc_httpcli_context* context) {
+  grpc_pollset_set_destroy(context->pollset_set);
 }
 
-static void next_address(grpc_exec_ctx* exec_ctx, internal_request* req,
-                         grpc_error* due_to_error);
+static void next_address(internal_request* req, grpc_error* due_to_error);
 
-static void finish(grpc_exec_ctx* exec_ctx, internal_request* req,
-                   grpc_error* error) {
-  grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
+static void finish(internal_request* req, grpc_error* error) {
+  grpc_polling_entity_del_from_pollset_set(req->pollent,
                                            req->context->pollset_set);
-  GRPC_CLOSURE_SCHED(exec_ctx, req->on_done, error);
+  GRPC_CLOSURE_SCHED(req->on_done, error);
   grpc_http_parser_destroy(&req->parser);
   if (req->addresses != nullptr) {
     grpc_resolved_addresses_destroy(req->addresses);
   }
   if (req->ep != nullptr) {
-    grpc_endpoint_destroy(exec_ctx, req->ep);
+    grpc_endpoint_destroy(req->ep);
   }
-  grpc_slice_unref_internal(exec_ctx, req->request_text);
+  grpc_slice_unref_internal(req->request_text);
   gpr_free(req->host);
   gpr_free(req->ssl_host_override);
   grpc_iomgr_unregister_object(&req->iomgr_obj);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &req->incoming);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &req->outgoing);
+  grpc_slice_buffer_destroy_internal(&req->incoming);
+  grpc_slice_buffer_destroy_internal(&req->outgoing);
   GRPC_ERROR_UNREF(req->overall_error);
-  grpc_resource_quota_unref_internal(exec_ctx, req->resource_quota);
+  grpc_resource_quota_unref_internal(req->resource_quota);
   gpr_free(req);
 }
 
@@ -124,12 +119,11 @@
   gpr_free(addr_text);
 }
 
-static void do_read(grpc_exec_ctx* exec_ctx, internal_request* req) {
-  grpc_endpoint_read(exec_ctx, req->ep, &req->incoming, &req->on_read);
+static void do_read(internal_request* req) {
+  grpc_endpoint_read(req->ep, &req->incoming, &req->on_read);
 }
 
-static void on_read(grpc_exec_ctx* exec_ctx, void* user_data,
-                    grpc_error* error) {
+static void on_read(void* user_data, grpc_error* error) {
   internal_request* req = (internal_request*)user_data;
   size_t i;
 
@@ -139,77 +133,70 @@
       grpc_error* err = grpc_http_parser_parse(
           &req->parser, req->incoming.slices[i], nullptr);
       if (err != GRPC_ERROR_NONE) {
-        finish(exec_ctx, req, err);
+        finish(req, err);
         return;
       }
     }
   }
 
   if (error == GRPC_ERROR_NONE) {
-    do_read(exec_ctx, req);
+    do_read(req);
   } else if (!req->have_read_byte) {
-    next_address(exec_ctx, req, GRPC_ERROR_REF(error));
+    next_address(req, GRPC_ERROR_REF(error));
   } else {
-    finish(exec_ctx, req, grpc_http_parser_eof(&req->parser));
+    finish(req, grpc_http_parser_eof(&req->parser));
   }
 }
 
-static void on_written(grpc_exec_ctx* exec_ctx, internal_request* req) {
-  do_read(exec_ctx, req);
-}
+static void on_written(internal_request* req) { do_read(req); }
 
-static void done_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void done_write(void* arg, grpc_error* error) {
   internal_request* req = (internal_request*)arg;
   if (error == GRPC_ERROR_NONE) {
-    on_written(exec_ctx, req);
+    on_written(req);
   } else {
-    next_address(exec_ctx, req, GRPC_ERROR_REF(error));
+    next_address(req, GRPC_ERROR_REF(error));
   }
 }
 
-static void start_write(grpc_exec_ctx* exec_ctx, internal_request* req) {
+static void start_write(internal_request* req) {
   grpc_slice_ref_internal(req->request_text);
   grpc_slice_buffer_add(&req->outgoing, req->request_text);
-  grpc_endpoint_write(exec_ctx, req->ep, &req->outgoing, &req->done_write);
+  grpc_endpoint_write(req->ep, &req->outgoing, &req->done_write);
 }
 
-static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_endpoint* ep) {
+static void on_handshake_done(void* arg, grpc_endpoint* ep) {
   internal_request* req = (internal_request*)arg;
 
   if (!ep) {
-    next_address(
-        exec_ctx, req,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unexplained handshake failure"));
+    next_address(req, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                          "Unexplained handshake failure"));
     return;
   }
 
   req->ep = ep;
-  start_write(exec_ctx, req);
+  start_write(req);
 }
 
-static void on_connected(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
+static void on_connected(void* arg, grpc_error* error) {
   internal_request* req = (internal_request*)arg;
 
   if (!req->ep) {
-    next_address(exec_ctx, req, GRPC_ERROR_REF(error));
+    next_address(req, GRPC_ERROR_REF(error));
     return;
   }
   req->handshaker->handshake(
-      exec_ctx, req, req->ep,
-      req->ssl_host_override ? req->ssl_host_override : req->host,
+      req, req->ep, req->ssl_host_override ? req->ssl_host_override : req->host,
       req->deadline, on_handshake_done);
 }
 
-static void next_address(grpc_exec_ctx* exec_ctx, internal_request* req,
-                         grpc_error* error) {
+static void next_address(internal_request* req, grpc_error* error) {
   grpc_resolved_address* addr;
   if (error != GRPC_ERROR_NONE) {
     append_error(req, error);
   }
   if (req->next_address == req->addresses->naddrs) {
-    finish(exec_ctx, req,
+    finish(req,
            GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                "Failed HTTP requests to all targets", &req->overall_error, 1));
     return;
@@ -221,23 +208,21 @@
       (char*)GRPC_ARG_RESOURCE_QUOTA, req->resource_quota,
       grpc_resource_quota_arg_vtable());
   grpc_channel_args args = {1, &arg};
-  grpc_tcp_client_connect(exec_ctx, &req->connected, &req->ep,
-                          req->context->pollset_set, &args, addr,
-                          req->deadline);
+  grpc_tcp_client_connect(&req->connected, &req->ep, req->context->pollset_set,
+                          &args, addr, req->deadline);
 }
 
-static void on_resolved(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_resolved(void* arg, grpc_error* error) {
   internal_request* req = (internal_request*)arg;
   if (error != GRPC_ERROR_NONE) {
-    finish(exec_ctx, req, GRPC_ERROR_REF(error));
+    finish(req, GRPC_ERROR_REF(error));
     return;
   }
   req->next_address = 0;
-  next_address(exec_ctx, req, GRPC_ERROR_NONE);
+  next_address(req, GRPC_ERROR_NONE);
 }
 
-static void internal_request_begin(grpc_exec_ctx* exec_ctx,
-                                   grpc_httpcli_context* context,
+static void internal_request_begin(grpc_httpcli_context* context,
                                    grpc_polling_entity* pollent,
                                    grpc_resource_quota* resource_quota,
                                    const grpc_httpcli_request* request,
@@ -267,33 +252,31 @@
   req->ssl_host_override = gpr_strdup(request->ssl_host_override);
 
   GPR_ASSERT(pollent);
-  grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent,
+  grpc_polling_entity_add_to_pollset_set(req->pollent,
                                          req->context->pollset_set);
   grpc_resolve_address(
-      exec_ctx, request->host, req->handshaker->default_port,
-      req->context->pollset_set,
+      request->host, req->handshaker->default_port, req->context->pollset_set,
       GRPC_CLOSURE_CREATE(on_resolved, req, grpc_schedule_on_exec_ctx),
       &req->addresses);
 }
 
-void grpc_httpcli_get(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+void grpc_httpcli_get(grpc_httpcli_context* context,
                       grpc_polling_entity* pollent,
                       grpc_resource_quota* resource_quota,
                       const grpc_httpcli_request* request, grpc_millis deadline,
                       grpc_closure* on_done, grpc_httpcli_response* response) {
   char* name;
-  if (g_get_override &&
-      g_get_override(exec_ctx, request, deadline, on_done, response)) {
+  if (g_get_override && g_get_override(request, deadline, on_done, response)) {
     return;
   }
   gpr_asprintf(&name, "HTTP:GET:%s:%s", request->host, request->http.path);
-  internal_request_begin(exec_ctx, context, pollent, resource_quota, request,
-                         deadline, on_done, response, name,
+  internal_request_begin(context, pollent, resource_quota, request, deadline,
+                         on_done, response, name,
                          grpc_httpcli_format_get_request(request));
   gpr_free(name);
 }
 
-void grpc_httpcli_post(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+void grpc_httpcli_post(grpc_httpcli_context* context,
                        grpc_polling_entity* pollent,
                        grpc_resource_quota* resource_quota,
                        const grpc_httpcli_request* request,
@@ -301,16 +284,14 @@
                        grpc_millis deadline, grpc_closure* on_done,
                        grpc_httpcli_response* response) {
   char* name;
-  if (g_post_override &&
-      g_post_override(exec_ctx, request, body_bytes, body_size, deadline,
-                      on_done, response)) {
+  if (g_post_override && g_post_override(request, body_bytes, body_size,
+                                         deadline, on_done, response)) {
     return;
   }
   gpr_asprintf(&name, "HTTP:POST:%s:%s", request->host, request->http.path);
   internal_request_begin(
-      exec_ctx, context, pollent, resource_quota, request, deadline, on_done,
-      response, name,
-      grpc_httpcli_format_post_request(request, body_bytes, body_size));
+      context, pollent, resource_quota, request, deadline, on_done, response,
+      name, grpc_httpcli_format_post_request(request, body_bytes, body_size));
   gpr_free(name);
 }
 
diff --git a/src/core/lib/http/httpcli.h b/src/core/lib/http/httpcli.h
index a341134..72d20cc 100644
--- a/src/core/lib/http/httpcli.h
+++ b/src/core/lib/http/httpcli.h
@@ -32,10 +32,6 @@
 /* User agent this library reports */
 #define GRPC_HTTPCLI_USER_AGENT "grpc-httpcli/0.0"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Tracks in-progress http requests
    TODO(ctiller): allow caching and capturing multiple requests for the
                   same content and combining them */
@@ -45,10 +41,9 @@
 
 typedef struct {
   const char* default_port;
-  void (*handshake)(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* endpoint,
-                    const char* host, grpc_millis deadline,
-                    void (*on_done)(grpc_exec_ctx* exec_ctx, void* arg,
-                                    grpc_endpoint* endpoint));
+  void (*handshake)(void* arg, grpc_endpoint* endpoint, const char* host,
+                    grpc_millis deadline,
+                    void (*on_done)(void* arg, grpc_endpoint* endpoint));
 } grpc_httpcli_handshaker;
 
 extern const grpc_httpcli_handshaker grpc_httpcli_plaintext;
@@ -72,8 +67,7 @@
 typedef struct grpc_http_response grpc_httpcli_response;
 
 void grpc_httpcli_context_init(grpc_httpcli_context* context);
-void grpc_httpcli_context_destroy(grpc_exec_ctx* exec_ctx,
-                                  grpc_httpcli_context* context);
+void grpc_httpcli_context_destroy(grpc_httpcli_context* context);
 
 /* Asynchronously perform a HTTP GET.
    'context' specifies the http context under which to do the get
@@ -84,7 +78,7 @@
      destroyed once the call returns
    'deadline' contains a deadline for the request (or gpr_inf_future)
    'on_response' is a callback to report results to */
-void grpc_httpcli_get(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+void grpc_httpcli_get(grpc_httpcli_context* context,
                       grpc_polling_entity* pollent,
                       grpc_resource_quota* resource_quota,
                       const grpc_httpcli_request* request, grpc_millis deadline,
@@ -105,7 +99,7 @@
      lifetime of the request
    'on_response' is a callback to report results to
    Does not support ?var1=val1&var2=val2 in the path. */
-void grpc_httpcli_post(grpc_exec_ctx* exec_ctx, grpc_httpcli_context* context,
+void grpc_httpcli_post(grpc_httpcli_context* context,
                        grpc_polling_entity* pollent,
                        grpc_resource_quota* resource_quota,
                        const grpc_httpcli_request* request,
@@ -114,21 +108,18 @@
                        grpc_httpcli_response* response);
 
 /* override functions return 1 if they handled the request, 0 otherwise */
-typedef int (*grpc_httpcli_get_override)(grpc_exec_ctx* exec_ctx,
-                                         const grpc_httpcli_request* request,
+typedef int (*grpc_httpcli_get_override)(const grpc_httpcli_request* request,
                                          grpc_millis deadline,
                                          grpc_closure* on_complete,
                                          grpc_httpcli_response* response);
-typedef int (*grpc_httpcli_post_override)(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    const char* body_bytes, size_t body_size, grpc_millis deadline,
-    grpc_closure* on_complete, grpc_httpcli_response* response);
+typedef int (*grpc_httpcli_post_override)(const grpc_httpcli_request* request,
+                                          const char* body_bytes,
+                                          size_t body_size,
+                                          grpc_millis deadline,
+                                          grpc_closure* on_complete,
+                                          grpc_httpcli_response* response);
 
 void grpc_httpcli_set_override(grpc_httpcli_get_override get,
                                grpc_httpcli_post_override post);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_HTTP_HTTPCLI_H */
diff --git a/src/core/lib/http/httpcli_security_connector.cc b/src/core/lib/http/httpcli_security_connector.cc
index d25fba5..bfb536a 100644
--- a/src/core/lib/http/httpcli_security_connector.cc
+++ b/src/core/lib/http/httpcli_security_connector.cc
@@ -38,8 +38,7 @@
   char* secure_peer_name;
 } grpc_httpcli_ssl_channel_security_connector;
 
-static void httpcli_ssl_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_security_connector* sc) {
+static void httpcli_ssl_destroy(grpc_security_connector* sc) {
   grpc_httpcli_ssl_channel_security_connector* c =
       (grpc_httpcli_ssl_channel_security_connector*)sc;
   if (c->handshaker_factory != nullptr) {
@@ -50,8 +49,7 @@
   gpr_free(sc);
 }
 
-static void httpcli_ssl_add_handshakers(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_security_connector* sc,
+static void httpcli_ssl_add_handshakers(grpc_channel_security_connector* sc,
                                         grpc_handshake_manager* handshake_mgr) {
   grpc_httpcli_ssl_channel_security_connector* c =
       (grpc_httpcli_ssl_channel_security_connector*)sc;
@@ -65,13 +63,11 @@
     }
   }
   grpc_handshake_manager_add(
-      handshake_mgr,
-      grpc_security_handshaker_create(
-          exec_ctx, tsi_create_adapter_handshaker(handshaker), &sc->base));
+      handshake_mgr, grpc_security_handshaker_create(
+                         tsi_create_adapter_handshaker(handshaker), &sc->base));
 }
 
-static void httpcli_ssl_check_peer(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* sc, tsi_peer peer,
+static void httpcli_ssl_check_peer(grpc_security_connector* sc, tsi_peer peer,
                                    grpc_auth_context** auth_context,
                                    grpc_closure* on_peer_checked) {
   grpc_httpcli_ssl_channel_security_connector* c =
@@ -87,7 +83,7 @@
     error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
     gpr_free(msg);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
+  GRPC_CLOSURE_SCHED(on_peer_checked, error);
   tsi_peer_destruct(&peer);
 }
 
@@ -104,8 +100,8 @@
     httpcli_ssl_destroy, httpcli_ssl_check_peer, httpcli_ssl_cmp};
 
 static grpc_security_status httpcli_ssl_channel_security_connector_create(
-    grpc_exec_ctx* exec_ctx, const char* pem_root_certs,
-    const char* secure_peer_name, grpc_channel_security_connector** sc) {
+    const char* pem_root_certs, const char* secure_peer_name,
+    grpc_channel_security_connector** sc) {
   tsi_result result = TSI_OK;
   grpc_httpcli_ssl_channel_security_connector* c;
 
@@ -128,12 +124,12 @@
   if (result != TSI_OK) {
     gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
             tsi_result_to_string(result));
-    httpcli_ssl_destroy(exec_ctx, &c->base.base);
+    httpcli_ssl_destroy(&c->base.base);
     *sc = nullptr;
     return GRPC_SECURITY_ERROR;
   }
   // We don't actually need a channel credentials object in this case,
-  // but we set it to a non-NULL address so that we don't trigger
+  // but we set it to a non-nullptr address so that we don't trigger
   // assertions in grpc_channel_security_connector_cmp().
   c->base.channel_creds = (grpc_channel_credentials*)1;
   c->base.add_handshakers = httpcli_ssl_add_handshakers;
@@ -144,40 +140,37 @@
 /* handshaker */
 
 typedef struct {
-  void (*func)(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* endpoint);
+  void (*func)(void* arg, grpc_endpoint* endpoint);
   void* arg;
   grpc_handshake_manager* handshake_mgr;
 } on_done_closure;
 
-static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* error) {
+static void on_handshake_done(void* arg, grpc_error* error) {
   grpc_handshaker_args* args = (grpc_handshaker_args*)arg;
   on_done_closure* c = (on_done_closure*)args->user_data;
   if (error != GRPC_ERROR_NONE) {
     const char* msg = grpc_error_string(error);
     gpr_log(GPR_ERROR, "Secure transport setup failed: %s", msg);
 
-    c->func(exec_ctx, c->arg, nullptr);
+    c->func(c->arg, nullptr);
   } else {
-    grpc_channel_args_destroy(exec_ctx, args->args);
-    grpc_slice_buffer_destroy_internal(exec_ctx, args->read_buffer);
+    grpc_channel_args_destroy(args->args);
+    grpc_slice_buffer_destroy_internal(args->read_buffer);
     gpr_free(args->read_buffer);
-    c->func(exec_ctx, c->arg, args->endpoint);
+    c->func(c->arg, args->endpoint);
   }
-  grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
+  grpc_handshake_manager_destroy(c->handshake_mgr);
   gpr_free(c);
 }
 
-static void ssl_handshake(grpc_exec_ctx* exec_ctx, void* arg,
-                          grpc_endpoint* tcp, const char* host,
+static void ssl_handshake(void* arg, grpc_endpoint* tcp, const char* host,
                           grpc_millis deadline,
-                          void (*on_done)(grpc_exec_ctx* exec_ctx, void* arg,
-                                          grpc_endpoint* endpoint)) {
+                          void (*on_done)(void* arg, grpc_endpoint* endpoint)) {
   on_done_closure* c = (on_done_closure*)gpr_malloc(sizeof(*c));
   const char* pem_root_certs = grpc_get_default_ssl_roots();
   if (pem_root_certs == nullptr) {
     gpr_log(GPR_ERROR, "Could not get default pem root certs.");
-    on_done(exec_ctx, arg, nullptr);
+    on_done(arg, nullptr);
     gpr_free(c);
     return;
   }
@@ -185,15 +178,16 @@
   c->arg = arg;
   grpc_channel_security_connector* sc = nullptr;
   GPR_ASSERT(httpcli_ssl_channel_security_connector_create(
-                 exec_ctx, pem_root_certs, host, &sc) == GRPC_SECURITY_OK);
+                 pem_root_certs, host, &sc) == GRPC_SECURITY_OK);
   grpc_arg channel_arg = grpc_security_connector_to_arg(&sc->base);
   grpc_channel_args args = {1, &channel_arg};
   c->handshake_mgr = grpc_handshake_manager_create();
-  grpc_handshakers_add(exec_ctx, HANDSHAKER_CLIENT, &args, c->handshake_mgr);
+  grpc_handshakers_add(HANDSHAKER_CLIENT, &args, c->handshake_mgr);
   grpc_handshake_manager_do_handshake(
-      exec_ctx, c->handshake_mgr, tcp, nullptr /* channel_args */, deadline,
-      nullptr /* acceptor */, on_handshake_done, c /* user_data */);
-  GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "httpcli");
+      c->handshake_mgr, nullptr /* interested_parties */, tcp,
+      nullptr /* channel_args */, deadline, nullptr /* acceptor */,
+      on_handshake_done, c /* user_data */);
+  GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "httpcli");
 }
 
 const grpc_httpcli_handshaker grpc_httpcli_ssl = {"https", ssl_handshake};
diff --git a/src/core/lib/http/parser.h b/src/core/lib/http/parser.h
index 391bd35..5fef448 100644
--- a/src/core/lib/http/parser.h
+++ b/src/core/lib/http/parser.h
@@ -27,10 +27,6 @@
 /* Maximum length of a header string of the form 'Key: Value\r\n' */
 #define GRPC_HTTP_PARSER_MAX_HEADER_LENGTH 4096
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A single header to be passed in a request */
 typedef struct grpc_http_header {
   char* key;
@@ -113,8 +109,4 @@
 
 extern grpc_core::TraceFlag grpc_http1_trace;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_HTTP_PARSER_H */
diff --git a/src/core/lib/iomgr/block_annotate.h b/src/core/lib/iomgr/block_annotate.h
index fcbfe9e..a57873a 100644
--- a/src/core/lib/iomgr/block_annotate.h
+++ b/src/core/lib/iomgr/block_annotate.h
@@ -19,17 +19,9 @@
 #ifndef GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H
 #define GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void gpr_thd_start_blocking_region();
 void gpr_thd_end_blocking_region();
 
-#ifdef __cplusplus
-}
-#endif
-
 /* These annotations identify the beginning and end of regions where
    the code may block for reasons other than synchronization functions.
    These include poll, epoll, and getaddrinfo. */
@@ -39,26 +31,27 @@
   do {                                        \
     gpr_thd_start_blocking_region();          \
   } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION     \
+  do {                                          \
+    gpr_thd_end_blocking_region();              \
+    grpc_core::ExecCtx::Get()->InvalidateNow(); \
+  } while (0)
 #define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
   do {                                                  \
     gpr_thd_end_blocking_region();                      \
   } while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
-  do {                                                        \
-    gpr_thd_end_blocking_region();                            \
-    grpc_exec_ctx_invalidate_now((ec));                       \
-  } while (0)
+
 #else
 #define GRPC_SCHEDULING_START_BLOCKING_REGION \
   do {                                        \
   } while (0)
+#define GRPC_SCHEDULING_END_BLOCKING_REGION     \
+  do {                                          \
+    grpc_core::ExecCtx::Get()->InvalidateNow(); \
+  } while (0)
 #define GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX \
   do {                                                  \
   } while (0)
-#define GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(ec) \
-  do {                                                        \
-    grpc_exec_ctx_invalidate_now((ec));                       \
-  } while (0)
 #endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_BLOCK_ANNOTATE_H */
diff --git a/src/core/lib/iomgr/call_combiner.cc b/src/core/lib/iomgr/call_combiner.cc
index b5910b4..a9f48fb 100644
--- a/src/core/lib/iomgr/call_combiner.cc
+++ b/src/core/lib/iomgr/call_combiner.cc
@@ -56,8 +56,7 @@
 #define DEBUG_FMT_ARGS
 #endif
 
-void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
-                              grpc_call_combiner* call_combiner,
+void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
                               grpc_closure* closure,
                               grpc_error* error DEBUG_ARGS,
                               const char* reason) {
@@ -75,15 +74,16 @@
     gpr_log(GPR_DEBUG, "  size: %" PRIdPTR " -> %" PRIdPTR, prev_size,
             prev_size + 1);
   }
-  GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
+  GRPC_STATS_INC_CALL_COMBINER_LOCKS_SCHEDULED_ITEMS();
   if (prev_size == 0) {
-    GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED(exec_ctx);
+    GRPC_STATS_INC_CALL_COMBINER_LOCKS_INITIATED();
+
     GPR_TIMER_MARK("call_combiner_initiate", 0);
     if (grpc_call_combiner_trace.enabled()) {
       gpr_log(GPR_DEBUG, "  EXECUTING IMMEDIATELY");
     }
     // Queue was empty, so execute this closure immediately.
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
+    GRPC_CLOSURE_SCHED(closure, error);
   } else {
     if (grpc_call_combiner_trace.enabled()) {
       gpr_log(GPR_INFO, "  QUEUING");
@@ -95,8 +95,7 @@
   GPR_TIMER_END("call_combiner_start", 0);
 }
 
-void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
-                             grpc_call_combiner* call_combiner DEBUG_ARGS,
+void grpc_call_combiner_stop(grpc_call_combiner* call_combiner DEBUG_ARGS,
                              const char* reason) {
   GPR_TIMER_BEGIN("call_combiner_stop", 0);
   if (grpc_call_combiner_trace.enabled()) {
@@ -131,7 +130,7 @@
         gpr_log(GPR_DEBUG, "  EXECUTING FROM QUEUE: closure=%p error=%s",
                 closure, grpc_error_string(closure->error_data.error));
       }
-      GRPC_CLOSURE_SCHED(exec_ctx, closure, closure->error_data.error);
+      GRPC_CLOSURE_SCHED(closure, closure->error_data.error);
       break;
     }
   } else if (grpc_call_combiner_trace.enabled()) {
@@ -140,10 +139,9 @@
   GPR_TIMER_END("call_combiner_stop", 0);
 }
 
-void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
-                                             grpc_call_combiner* call_combiner,
+void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
                                              grpc_closure* closure) {
-  GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL(exec_ctx);
+  GRPC_STATS_INC_CALL_COMBINER_SET_NOTIFY_ON_CANCEL();
   while (true) {
     // Decode original state.
     gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
@@ -157,7 +155,7 @@
                 "for pre-existing cancellation",
                 call_combiner, closure);
       }
-      GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_REF(original_error));
+      GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_REF(original_error));
       break;
     } else {
       if (gpr_atm_full_cas(&call_combiner->cancel_state, original_state,
@@ -176,7 +174,7 @@
                     "call_combiner=%p: scheduling old cancel callback=%p",
                     call_combiner, closure);
           }
-          GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+          GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
         }
         break;
       }
@@ -185,10 +183,9 @@
   }
 }
 
-void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
-                               grpc_call_combiner* call_combiner,
+void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
                                grpc_error* error) {
-  GRPC_STATS_INC_CALL_COMBINER_CANCELLED(exec_ctx);
+  GRPC_STATS_INC_CALL_COMBINER_CANCELLED();
   while (true) {
     gpr_atm original_state = gpr_atm_acq_load(&call_combiner->cancel_state);
     grpc_error* original_error = decode_cancel_state_error(original_state);
@@ -205,7 +202,7 @@
                   "call_combiner=%p: scheduling notify_on_cancel callback=%p",
                   call_combiner, notify_on_cancel);
         }
-        GRPC_CLOSURE_SCHED(exec_ctx, notify_on_cancel, GRPC_ERROR_REF(error));
+        GRPC_CLOSURE_SCHED(notify_on_cancel, GRPC_ERROR_REF(error));
       }
       break;
     }
diff --git a/src/core/lib/iomgr/call_combiner.h b/src/core/lib/iomgr/call_combiner.h
index 77420fa..9f7e6ce 100644
--- a/src/core/lib/iomgr/call_combiner.h
+++ b/src/core/lib/iomgr/call_combiner.h
@@ -27,10 +27,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/support/mpscq.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // A simple, lock-free mechanism for serializing activity related to a
 // single call.  This is similar to a combiner but is more lightweight.
 //
@@ -57,37 +53,29 @@
 void grpc_call_combiner_destroy(grpc_call_combiner* call_combiner);
 
 #ifndef NDEBUG
-#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error,   \
-                                 reason)                                    \
-  grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
-                           __FILE__, __LINE__, (reason))
-#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason)           \
-  grpc_call_combiner_stop((exec_ctx), (call_combiner), __FILE__, __LINE__, \
-                          (reason))
+#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason)   \
+  grpc_call_combiner_start((call_combiner), (closure), (error), __FILE__, \
+                           __LINE__, (reason))
+#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
+  grpc_call_combiner_stop((call_combiner), __FILE__, __LINE__, (reason))
 /// Starts processing \a closure on \a call_combiner.
-void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
-                              grpc_call_combiner* call_combiner,
+void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
                               grpc_closure* closure, grpc_error* error,
                               const char* file, int line, const char* reason);
 /// Yields the call combiner to the next closure in the queue, if any.
-void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
-                             grpc_call_combiner* call_combiner,
+void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
                              const char* file, int line, const char* reason);
 #else
-#define GRPC_CALL_COMBINER_START(exec_ctx, call_combiner, closure, error,   \
-                                 reason)                                    \
-  grpc_call_combiner_start((exec_ctx), (call_combiner), (closure), (error), \
-                           (reason))
-#define GRPC_CALL_COMBINER_STOP(exec_ctx, call_combiner, reason) \
-  grpc_call_combiner_stop((exec_ctx), (call_combiner), (reason))
+#define GRPC_CALL_COMBINER_START(call_combiner, closure, error, reason) \
+  grpc_call_combiner_start((call_combiner), (closure), (error), (reason))
+#define GRPC_CALL_COMBINER_STOP(call_combiner, reason) \
+  grpc_call_combiner_stop((call_combiner), (reason))
 /// Starts processing \a closure on \a call_combiner.
-void grpc_call_combiner_start(grpc_exec_ctx* exec_ctx,
-                              grpc_call_combiner* call_combiner,
+void grpc_call_combiner_start(grpc_call_combiner* call_combiner,
                               grpc_closure* closure, grpc_error* error,
                               const char* reason);
 /// Yields the call combiner to the next closure in the queue, if any.
-void grpc_call_combiner_stop(grpc_exec_ctx* exec_ctx,
-                             grpc_call_combiner* call_combiner,
+void grpc_call_combiner_stop(grpc_call_combiner* call_combiner,
                              const char* reason);
 #endif
 
@@ -113,17 +101,11 @@
 /// cancellation; this effectively unregisters the previously set closure.
 /// However, most filters will not need to explicitly unregister their
 /// callbacks, as this is done automatically when the call is destroyed.
-void grpc_call_combiner_set_notify_on_cancel(grpc_exec_ctx* exec_ctx,
-                                             grpc_call_combiner* call_combiner,
+void grpc_call_combiner_set_notify_on_cancel(grpc_call_combiner* call_combiner,
                                              grpc_closure* closure);
 
 /// Indicates that the call has been cancelled.
-void grpc_call_combiner_cancel(grpc_exec_ctx* exec_ctx,
-                               grpc_call_combiner* call_combiner,
+void grpc_call_combiner_cancel(grpc_call_combiner* call_combiner,
                                grpc_error* error);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_CALL_COMBINER_H */
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 46793dd..88af760 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -47,18 +47,15 @@
  *              describing what went wrong.
  *              Error contract: it is not the cb's job to unref this error;
  *              the closure scheduler will do that after the cb returns */
-typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error);
+typedef void (*grpc_iomgr_cb_func)(void* arg, grpc_error* error);
 
 typedef struct grpc_closure_scheduler grpc_closure_scheduler;
 
 typedef struct grpc_closure_scheduler_vtable {
   /* NOTE: for all these functions, closure->scheduler == the scheduler that was
            used to find this vtable */
-  void (*run)(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-              grpc_error* error);
-  void (*sched)(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                grpc_error* error);
+  void (*run)(grpc_closure* closure, grpc_error* error);
+  void (*sched)(grpc_closure* closure, grpc_error* error);
   const char* name;
 } grpc_closure_scheduler_vtable;
 
@@ -146,13 +143,12 @@
   grpc_closure wrapper;
 } wrapped_closure;
 
-inline void closure_wrapper(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+inline void closure_wrapper(void* arg, grpc_error* error) {
   wrapped_closure* wc = (wrapped_closure*)arg;
   grpc_iomgr_cb_func cb = wc->cb;
   void* cb_arg = wc->cb_arg;
   gpr_free(wc);
-  cb(exec_ctx, cb_arg, error);
+  cb(cb_arg, error);
 }
 
 }  // namespace closure_impl
@@ -247,12 +243,10 @@
 }
 
 #ifndef NDEBUG
-inline void grpc_closure_run(const char* file, int line,
-                             grpc_exec_ctx* exec_ctx, grpc_closure* c,
+inline void grpc_closure_run(const char* file, int line, grpc_closure* c,
                              grpc_error* error) {
 #else
-inline void grpc_closure_run(grpc_exec_ctx* exec_ctx, grpc_closure* c,
-                             grpc_error* error) {
+inline void grpc_closure_run(grpc_closure* c, grpc_error* error) {
 #endif
   GPR_TIMER_BEGIN("grpc_closure_run", 0);
   if (c != nullptr) {
@@ -262,7 +256,7 @@
     c->run = true;
 #endif
     assert(c->cb);
-    c->scheduler->vtable->run(exec_ctx, c, error);
+    c->scheduler->vtable->run(c, error);
   } else {
     GRPC_ERROR_UNREF(error);
   }
@@ -273,20 +267,17 @@
  *  Note that calling this at the end of a closure callback function itself is
  *  by definition safe. */
 #ifndef NDEBUG
-#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
-  grpc_closure_run(__FILE__, __LINE__, exec_ctx, closure, error)
+#define GRPC_CLOSURE_RUN(closure, error) \
+  grpc_closure_run(__FILE__, __LINE__, closure, error)
 #else
-#define GRPC_CLOSURE_RUN(exec_ctx, closure, error) \
-  grpc_closure_run(exec_ctx, closure, error)
+#define GRPC_CLOSURE_RUN(closure, error) grpc_closure_run(closure, error)
 #endif
 
 #ifndef NDEBUG
-inline void grpc_closure_sched(const char* file, int line,
-                               grpc_exec_ctx* exec_ctx, grpc_closure* c,
+inline void grpc_closure_sched(const char* file, int line, grpc_closure* c,
                                grpc_error* error) {
 #else
-inline void grpc_closure_sched(grpc_exec_ctx* exec_ctx, grpc_closure* c,
-                               grpc_error* error) {
+inline void grpc_closure_sched(grpc_closure* c, grpc_error* error) {
 #endif
   GPR_TIMER_BEGIN("grpc_closure_sched", 0);
   if (c != nullptr) {
@@ -305,7 +296,7 @@
     c->run = false;
 #endif
     assert(c->cb);
-    c->scheduler->vtable->sched(exec_ctx, c, error);
+    c->scheduler->vtable->sched(c, error);
   } else {
     GRPC_ERROR_UNREF(error);
   }
@@ -314,20 +305,17 @@
 
 /** Schedule a closure to be run. Does not need to be run from a safe point. */
 #ifndef NDEBUG
-#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
-  grpc_closure_sched(__FILE__, __LINE__, exec_ctx, closure, error)
+#define GRPC_CLOSURE_SCHED(closure, error) \
+  grpc_closure_sched(__FILE__, __LINE__, closure, error)
 #else
-#define GRPC_CLOSURE_SCHED(exec_ctx, closure, error) \
-  grpc_closure_sched(exec_ctx, closure, error)
+#define GRPC_CLOSURE_SCHED(closure, error) grpc_closure_sched(closure, error)
 #endif
 
 #ifndef NDEBUG
 inline void grpc_closure_list_sched(const char* file, int line,
-                                    grpc_exec_ctx* exec_ctx,
                                     grpc_closure_list* list) {
 #else
-inline void grpc_closure_list_sched(grpc_exec_ctx* exec_ctx,
-                                    grpc_closure_list* list) {
+inline void grpc_closure_list_sched(grpc_closure_list* list) {
 #endif
   grpc_closure* c = list->head;
   while (c != nullptr) {
@@ -347,7 +335,7 @@
     c->run = false;
 #endif
     assert(c->cb);
-    c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
+    c->scheduler->vtable->sched(c, c->error_data.error);
     c = next;
   }
   list->head = list->tail = nullptr;
@@ -356,11 +344,11 @@
 /** Schedule all closures in a list to be run. Does not need to be run from a
  * safe point. */
 #ifndef NDEBUG
-#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
-  grpc_closure_list_sched(__FILE__, __LINE__, exec_ctx, closure_list)
+#define GRPC_CLOSURE_LIST_SCHED(closure_list) \
+  grpc_closure_list_sched(__FILE__, __LINE__, closure_list)
 #else
-#define GRPC_CLOSURE_LIST_SCHED(exec_ctx, closure_list) \
-  grpc_closure_list_sched(exec_ctx, closure_list)
+#define GRPC_CLOSURE_LIST_SCHED(closure_list) \
+  grpc_closure_list_sched(closure_list)
 #endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.cc b/src/core/lib/iomgr/combiner.cc
index 15c009d..e4d7a6a 100644
--- a/src/core/lib/iomgr/combiner.cc
+++ b/src/core/lib/iomgr/combiner.cc
@@ -61,17 +61,15 @@
   gpr_refcount refs;
 };
 
-static void combiner_exec(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                          grpc_error* error);
-static void combiner_finally_exec(grpc_exec_ctx* exec_ctx,
-                                  grpc_closure* closure, grpc_error* error);
+static void combiner_exec(grpc_closure* closure, grpc_error* error);
+static void combiner_finally_exec(grpc_closure* closure, grpc_error* error);
 
 static const grpc_closure_scheduler_vtable scheduler = {
     combiner_exec, combiner_exec, "combiner:immediately"};
 static const grpc_closure_scheduler_vtable finally_scheduler = {
     combiner_finally_exec, combiner_finally_exec, "combiner:finally"};
 
-static void offload(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error);
+static void offload(void* arg, grpc_error* error);
 
 grpc_combiner* grpc_combiner_create(void) {
   grpc_combiner* lock = (grpc_combiner*)gpr_zalloc(sizeof(*lock));
@@ -87,19 +85,19 @@
   return lock;
 }
 
-static void really_destroy(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
+static void really_destroy(grpc_combiner* lock) {
   GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
   GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
   gpr_mpscq_destroy(&lock->queue);
   gpr_free(lock);
 }
 
-static void start_destroy(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
+static void start_destroy(grpc_combiner* lock) {
   gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
   GRPC_COMBINER_TRACE(gpr_log(
       GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
   if (old_state == 1) {
-    really_destroy(exec_ctx, lock);
+    really_destroy(lock);
   }
 }
 
@@ -115,11 +113,10 @@
 #define GRPC_COMBINER_DEBUG_SPAM(op, delta)
 #endif
 
-void grpc_combiner_unref(grpc_exec_ctx* exec_ctx,
-                         grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
+void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS) {
   GRPC_COMBINER_DEBUG_SPAM("UNREF", -1);
   if (gpr_unref(&lock->refs)) {
-    start_destroy(exec_ctx, lock);
+    start_destroy(lock);
   }
 }
 
@@ -129,23 +126,25 @@
   return lock;
 }
 
-static void push_last_on_exec_ctx(grpc_exec_ctx* exec_ctx,
-                                  grpc_combiner* lock) {
+static void push_last_on_exec_ctx(grpc_combiner* lock) {
   lock->next_combiner_on_this_exec_ctx = nullptr;
-  if (exec_ctx->active_combiner == nullptr) {
-    exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
+  if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
+    grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
+        grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
   } else {
-    exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
-    exec_ctx->last_combiner = lock;
+    grpc_core::ExecCtx::Get()
+        ->combiner_data()
+        ->last_combiner->next_combiner_on_this_exec_ctx = lock;
+    grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
   }
 }
 
-static void push_first_on_exec_ctx(grpc_exec_ctx* exec_ctx,
-                                   grpc_combiner* lock) {
-  lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
-  exec_ctx->active_combiner = lock;
+static void push_first_on_exec_ctx(grpc_combiner* lock) {
+  lock->next_combiner_on_this_exec_ctx =
+      grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
+  grpc_core::ExecCtx::Get()->combiner_data()->active_combiner = lock;
   if (lock->next_combiner_on_this_exec_ctx == nullptr) {
-    exec_ctx->last_combiner = lock;
+    grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = lock;
   }
 }
 
@@ -153,9 +152,8 @@
   ((grpc_combiner*)(((char*)((closure)->scheduler)) -            \
                     offsetof(grpc_combiner, scheduler_name)))
 
-static void combiner_exec(grpc_exec_ctx* exec_ctx, grpc_closure* cl,
-                          grpc_error* error) {
-  GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS(exec_ctx);
+static void combiner_exec(grpc_closure* cl, grpc_error* error) {
+  GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_ITEMS();
   GPR_TIMER_BEGIN("combiner.execute", 0);
   grpc_combiner* lock = COMBINER_FROM_CLOSURE_SCHEDULER(cl, scheduler);
   gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
@@ -163,19 +161,19 @@
                               "C:%p grpc_combiner_execute c=%p last=%" PRIdPTR,
                               lock, cl, last));
   if (last == 1) {
-    GRPC_STATS_INC_COMBINER_LOCKS_INITIATED(exec_ctx);
+    GRPC_STATS_INC_COMBINER_LOCKS_INITIATED();
     GPR_TIMER_MARK("combiner.initiated", 0);
     gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null,
-                             (gpr_atm)exec_ctx);
+                             (gpr_atm)grpc_core::ExecCtx::Get());
     // first element on this list: add it to the list of combiner locks
     // executing within this exec_ctx
-    push_last_on_exec_ctx(exec_ctx, lock);
+    push_last_on_exec_ctx(lock);
   } else {
     // there may be a race with setting here: if that happens, we may delay
     // offload for one or two actions, and that's fine
     gpr_atm initiator =
         gpr_atm_no_barrier_load(&lock->initiating_exec_ctx_or_null);
-    if (initiator != 0 && initiator != (gpr_atm)exec_ctx) {
+    if (initiator != 0 && initiator != (gpr_atm)grpc_core::ExecCtx::Get()) {
       gpr_atm_no_barrier_store(&lock->initiating_exec_ctx_or_null, 0);
     }
   }
@@ -186,29 +184,32 @@
   GPR_TIMER_END("combiner.execute", 0);
 }
 
-static void move_next(grpc_exec_ctx* exec_ctx) {
-  exec_ctx->active_combiner =
-      exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
-  if (exec_ctx->active_combiner == nullptr) {
-    exec_ctx->last_combiner = nullptr;
+static void move_next() {
+  grpc_core::ExecCtx::Get()->combiner_data()->active_combiner =
+      grpc_core::ExecCtx::Get()
+          ->combiner_data()
+          ->active_combiner->next_combiner_on_this_exec_ctx;
+  if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner == nullptr) {
+    grpc_core::ExecCtx::Get()->combiner_data()->last_combiner = nullptr;
   }
 }
 
-static void offload(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void offload(void* arg, grpc_error* error) {
   grpc_combiner* lock = (grpc_combiner*)arg;
-  push_last_on_exec_ctx(exec_ctx, lock);
+  push_last_on_exec_ctx(lock);
 }
 
-static void queue_offload(grpc_exec_ctx* exec_ctx, grpc_combiner* lock) {
-  GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED(exec_ctx);
-  move_next(exec_ctx);
+static void queue_offload(grpc_combiner* lock) {
+  GRPC_STATS_INC_COMBINER_LOCKS_OFFLOADED();
+  move_next();
   GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload", lock));
-  GRPC_CLOSURE_SCHED(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(&lock->offload, GRPC_ERROR_NONE);
 }
 
-bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx* exec_ctx) {
+bool grpc_combiner_continue_exec_ctx() {
   GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
-  grpc_combiner* lock = exec_ctx->active_combiner;
+  grpc_combiner* lock =
+      grpc_core::ExecCtx::Get()->combiner_data()->active_combiner;
   if (lock == nullptr) {
     GPR_TIMER_END("combiner.continue_exec_ctx", 0);
     return false;
@@ -223,15 +224,15 @@
                               "exec_ctx_ready_to_finish=%d "
                               "time_to_execute_final_list=%d",
                               lock, contended,
-                              grpc_exec_ctx_ready_to_finish(exec_ctx),
+                              grpc_core::ExecCtx::Get()->IsReadyToFinish(),
                               lock->time_to_execute_final_list));
 
-  if (contended && grpc_exec_ctx_ready_to_finish(exec_ctx) &&
+  if (contended && grpc_core::ExecCtx::Get()->IsReadyToFinish() &&
       grpc_executor_is_threaded()) {
     GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
     // this execution context wants to move on: schedule remaining work to be
     // picked up on the executor
-    queue_offload(exec_ctx, lock);
+    queue_offload(lock);
     GPR_TIMER_END("combiner.continue_exec_ctx", 0);
     return true;
   }
@@ -247,7 +248,7 @@
       // queue is in an inconsistent state: use this as a cue that we should
       // go off and do something else for a while (and come back later)
       GPR_TIMER_MARK("delay_busy", 0);
-      queue_offload(exec_ctx, lock);
+      queue_offload(lock);
       GPR_TIMER_END("combiner.continue_exec_ctx", 0);
       return true;
     }
@@ -257,7 +258,7 @@
 #ifndef NDEBUG
     cl->scheduled = false;
 #endif
-    cl->cb(exec_ctx, cl->cb_arg, cl_err);
+    cl->cb(cl->cb_arg, cl_err);
     GRPC_ERROR_UNREF(cl_err);
     GPR_TIMER_END("combiner.exec1", 0);
   } else {
@@ -274,7 +275,7 @@
 #ifndef NDEBUG
       c->scheduled = false;
 #endif
-      c->cb(exec_ctx, c->cb_arg, error);
+      c->cb(c->cb_arg, error);
       GRPC_ERROR_UNREF(error);
       c = next;
       GPR_TIMER_END("combiner.exec_1final", 0);
@@ -282,7 +283,7 @@
   }
 
   GPR_TIMER_MARK("unref", 0);
-  move_next(exec_ctx);
+  move_next();
   lock->time_to_execute_final_list = false;
   gpr_atm old_state =
       gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
@@ -311,7 +312,7 @@
       return true;
     case OLD_STATE_WAS(true, 1):
       // and one count, one orphaned --> unlocked and orphaned
-      really_destroy(exec_ctx, lock);
+      really_destroy(lock);
       GPR_TIMER_END("combiner.continue_exec_ctx", 0);
       return true;
     case OLD_STATE_WAS(false, 0):
@@ -321,27 +322,24 @@
       GPR_TIMER_END("combiner.continue_exec_ctx", 0);
       GPR_UNREACHABLE_CODE(return true);
   }
-  push_first_on_exec_ctx(exec_ctx, lock);
+  push_first_on_exec_ctx(lock);
   GPR_TIMER_END("combiner.continue_exec_ctx", 0);
   return true;
 }
 
-static void enqueue_finally(grpc_exec_ctx* exec_ctx, void* closure,
-                            grpc_error* error);
+static void enqueue_finally(void* closure, grpc_error* error);
 
-static void combiner_finally_exec(grpc_exec_ctx* exec_ctx,
-                                  grpc_closure* closure, grpc_error* error) {
-  GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS(exec_ctx);
+static void combiner_finally_exec(grpc_closure* closure, grpc_error* error) {
+  GRPC_STATS_INC_COMBINER_LOCKS_SCHEDULED_FINAL_ITEMS();
   grpc_combiner* lock =
       COMBINER_FROM_CLOSURE_SCHEDULER(closure, finally_scheduler);
-  GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG,
-                              "C:%p grpc_combiner_execute_finally c=%p; ac=%p",
-                              lock, closure, exec_ctx->active_combiner));
+  GRPC_COMBINER_TRACE(gpr_log(
+      GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p", lock,
+      closure, grpc_core::ExecCtx::Get()->combiner_data()->active_combiner));
   GPR_TIMER_BEGIN("combiner.execute_finally", 0);
-  if (exec_ctx->active_combiner != lock) {
+  if (grpc_core::ExecCtx::Get()->combiner_data()->active_combiner != lock) {
     GPR_TIMER_MARK("slowpath", 0);
-    GRPC_CLOSURE_SCHED(exec_ctx,
-                       GRPC_CLOSURE_CREATE(enqueue_finally, closure,
+    GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(enqueue_finally, closure,
                                            grpc_combiner_scheduler(lock)),
                        error);
     GPR_TIMER_END("combiner.execute_finally", 0);
@@ -355,10 +353,8 @@
   GPR_TIMER_END("combiner.execute_finally", 0);
 }
 
-static void enqueue_finally(grpc_exec_ctx* exec_ctx, void* closure,
-                            grpc_error* error) {
-  combiner_finally_exec(exec_ctx, (grpc_closure*)closure,
-                        GRPC_ERROR_REF(error));
+static void enqueue_finally(void* closure, grpc_error* error) {
+  combiner_finally_exec((grpc_closure*)closure, GRPC_ERROR_REF(error));
 }
 
 grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* combiner) {
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index e99b063..46b9ac5 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -26,10 +26,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/support/mpscq.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // Provides serialized access to some resource.
 // Each action queued on a combiner is executed serially in a borrowed thread.
 // The actual thread executing actions may change over time (but there will only
@@ -44,31 +40,25 @@
   , const char *file, int line, const char *reason
 #define GRPC_COMBINER_REF(combiner, reason) \
   grpc_combiner_ref((combiner), __FILE__, __LINE__, (reason))
-#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \
-  grpc_combiner_unref((exec_ctx), (combiner), __FILE__, __LINE__, (reason))
+#define GRPC_COMBINER_UNREF(combiner, reason) \
+  grpc_combiner_unref((combiner), __FILE__, __LINE__, (reason))
 #else
 #define GRPC_COMBINER_DEBUG_ARGS
 #define GRPC_COMBINER_REF(combiner, reason) grpc_combiner_ref((combiner))
-#define GRPC_COMBINER_UNREF(exec_ctx, combiner, reason) \
-  grpc_combiner_unref((exec_ctx), (combiner))
+#define GRPC_COMBINER_UNREF(combiner, reason) grpc_combiner_unref((combiner))
 #endif
 
 // Ref/unref the lock, for when we're sharing the lock ownership
 // Prefer to use the macros above
 grpc_combiner* grpc_combiner_ref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
-void grpc_combiner_unref(grpc_exec_ctx* exec_ctx,
-                         grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
+void grpc_combiner_unref(grpc_combiner* lock GRPC_COMBINER_DEBUG_ARGS);
 // Fetch a scheduler to schedule closures against
 grpc_closure_scheduler* grpc_combiner_scheduler(grpc_combiner* lock);
 // Scheduler to execute \a action within the lock just prior to unlocking.
 grpc_closure_scheduler* grpc_combiner_finally_scheduler(grpc_combiner* lock);
 
-bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx* exec_ctx);
+bool grpc_combiner_continue_exec_ctx();
 
 extern grpc_core::TraceFlag grpc_combiner_trace;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_COMBINER_H */
diff --git a/src/core/lib/iomgr/endpoint.cc b/src/core/lib/iomgr/endpoint.cc
index 5eab1d3..9d4b102 100644
--- a/src/core/lib/iomgr/endpoint.cc
+++ b/src/core/lib/iomgr/endpoint.cc
@@ -18,41 +18,35 @@
 
 #include "src/core/lib/iomgr/endpoint.h"
 
-void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                        grpc_slice_buffer* slices, grpc_closure* cb) {
-  ep->vtable->read(exec_ctx, ep, slices, cb);
+void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                        grpc_closure* cb) {
+  ep->vtable->read(ep, slices, cb);
 }
 
-void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                         grpc_slice_buffer* slices, grpc_closure* cb) {
-  ep->vtable->write(exec_ctx, ep, slices, cb);
+void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                         grpc_closure* cb) {
+  ep->vtable->write(ep, slices, cb);
 }
 
-void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                  grpc_pollset* pollset) {
-  ep->vtable->add_to_pollset(exec_ctx, ep, pollset);
+void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
+  ep->vtable->add_to_pollset(ep, pollset);
 }
 
-void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_endpoint* ep,
+void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep,
                                       grpc_pollset_set* pollset_set) {
-  ep->vtable->add_to_pollset_set(exec_ctx, ep, pollset_set);
+  ep->vtable->add_to_pollset_set(ep, pollset_set);
 }
 
-void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                           grpc_endpoint* ep,
+void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
                                            grpc_pollset_set* pollset_set) {
-  ep->vtable->delete_from_pollset_set(exec_ctx, ep, pollset_set);
+  ep->vtable->delete_from_pollset_set(ep, pollset_set);
 }
 
-void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                            grpc_error* why) {
-  ep->vtable->shutdown(exec_ctx, ep, why);
+void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
+  ep->vtable->shutdown(ep, why);
 }
 
-void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
-  ep->vtable->destroy(exec_ctx, ep);
-}
+void grpc_endpoint_destroy(grpc_endpoint* ep) { ep->vtable->destroy(ep); }
 
 char* grpc_endpoint_get_peer(grpc_endpoint* ep) {
   return ep->vtable->get_peer(ep);
diff --git a/src/core/lib/iomgr/endpoint.h b/src/core/lib/iomgr/endpoint.h
index 1b0a9e7..cd53099 100644
--- a/src/core/lib/iomgr/endpoint.h
+++ b/src/core/lib/iomgr/endpoint.h
@@ -26,10 +26,6 @@
 #include "src/core/lib/iomgr/pollset_set.h"
 #include "src/core/lib/iomgr/resource_quota.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* An endpoint caps a streaming channel between two communicating processes.
    Examples may be: a tcp socket, <stdin+stdout>, or some shared memory. */
 
@@ -37,18 +33,13 @@
 typedef struct grpc_endpoint_vtable grpc_endpoint_vtable;
 
 struct grpc_endpoint_vtable {
-  void (*read)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-               grpc_slice_buffer* slices, grpc_closure* cb);
-  void (*write)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                grpc_slice_buffer* slices, grpc_closure* cb);
-  void (*add_to_pollset)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                         grpc_pollset* pollset);
-  void (*add_to_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                             grpc_pollset_set* pollset);
-  void (*delete_from_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                  grpc_pollset_set* pollset);
-  void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep, grpc_error* why);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep);
+  void (*read)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb);
+  void (*write)(grpc_endpoint* ep, grpc_slice_buffer* slices, grpc_closure* cb);
+  void (*add_to_pollset)(grpc_endpoint* ep, grpc_pollset* pollset);
+  void (*add_to_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset);
+  void (*delete_from_pollset_set)(grpc_endpoint* ep, grpc_pollset_set* pollset);
+  void (*shutdown)(grpc_endpoint* ep, grpc_error* why);
+  void (*destroy)(grpc_endpoint* ep);
   grpc_resource_user* (*get_resource_user)(grpc_endpoint* ep);
   char* (*get_peer)(grpc_endpoint* ep);
   int (*get_fd)(grpc_endpoint* ep);
@@ -59,8 +50,8 @@
    indicates the endpoint is closed.
    Valid slices may be placed into \a slices even when the callback is
    invoked with error != GRPC_ERROR_NONE. */
-void grpc_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                        grpc_slice_buffer* slices, grpc_closure* cb);
+void grpc_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                        grpc_closure* cb);
 
 char* grpc_endpoint_get_peer(grpc_endpoint* ep);
 
@@ -78,26 +69,22 @@
    No guarantee is made to the content of slices after a write EXCEPT that
    it is a valid slice buffer.
    */
-void grpc_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                         grpc_slice_buffer* slices, grpc_closure* cb);
+void grpc_endpoint_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                         grpc_closure* cb);
 
 /* Causes any pending and future read/write callbacks to run immediately with
    success==0 */
-void grpc_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                            grpc_error* why);
-void grpc_endpoint_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep);
+void grpc_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why);
+void grpc_endpoint_destroy(grpc_endpoint* ep);
 
 /* Add an endpoint to a pollset or pollset_set, so that when the pollset is
    polled, events from this endpoint are considered */
-void grpc_endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                  grpc_pollset* pollset);
-void grpc_endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_endpoint* ep,
+void grpc_endpoint_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset);
+void grpc_endpoint_add_to_pollset_set(grpc_endpoint* ep,
                                       grpc_pollset_set* pollset_set);
 
 /* Delete an endpoint from a pollset_set */
-void grpc_endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                           grpc_endpoint* ep,
+void grpc_endpoint_delete_from_pollset_set(grpc_endpoint* ep,
                                            grpc_pollset_set* pollset_set);
 
 grpc_resource_user* grpc_endpoint_get_resource_user(grpc_endpoint* endpoint);
@@ -106,8 +93,4 @@
   const grpc_endpoint_vtable* vtable;
 };
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_H */
diff --git a/src/core/lib/iomgr/endpoint_pair.h b/src/core/lib/iomgr/endpoint_pair.h
index 219eea8..506ffc8 100644
--- a/src/core/lib/iomgr/endpoint_pair.h
+++ b/src/core/lib/iomgr/endpoint_pair.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/iomgr/endpoint.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   grpc_endpoint* client;
   grpc_endpoint* server;
@@ -33,8 +29,4 @@
 grpc_endpoint_pair grpc_iomgr_create_endpoint_pair(const char* name,
                                                    grpc_channel_args* args);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_ENDPOINT_PAIR_H */
diff --git a/src/core/lib/iomgr/endpoint_pair_posix.cc b/src/core/lib/iomgr/endpoint_pair_posix.cc
index f5f59f9..0b4aefd 100644
--- a/src/core/lib/iomgr/endpoint_pair_posix.cc
+++ b/src/core/lib/iomgr/endpoint_pair_posix.cc
@@ -54,18 +54,17 @@
   char* final_name;
   create_sockets(sv);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_asprintf(&final_name, "%s:client", name);
-  p.client = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], final_name), args,
+  p.client = grpc_tcp_create(grpc_fd_create(sv[1], final_name), args,
                              "socketpair-server");
   gpr_free(final_name);
   gpr_asprintf(&final_name, "%s:server", name);
-  p.server = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[0], final_name), args,
+  p.server = grpc_tcp_create(grpc_fd_create(sv[0], final_name), args,
                              "socketpair-client");
   gpr_free(final_name);
 
-  grpc_exec_ctx_finish(&exec_ctx);
   return p;
 }
 
diff --git a/src/core/lib/iomgr/endpoint_pair_windows.cc b/src/core/lib/iomgr/endpoint_pair_windows.cc
index afa995a..cc07ac0 100644
--- a/src/core/lib/iomgr/endpoint_pair_windows.cc
+++ b/src/core/lib/iomgr/endpoint_pair_windows.cc
@@ -72,14 +72,12 @@
   SOCKET sv[2];
   grpc_endpoint_pair p;
   create_sockets(sv);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  p.client = grpc_tcp_create(&exec_ctx,
-                             grpc_winsocket_create(sv[1], "endpoint:client"),
+  grpc_core::ExecCtx exec_ctx;
+  p.client = grpc_tcp_create(grpc_winsocket_create(sv[1], "endpoint:client"),
                              channel_args, "endpoint:server");
-  p.server = grpc_tcp_create(&exec_ctx,
-                             grpc_winsocket_create(sv[0], "endpoint:server"),
+  p.server = grpc_tcp_create(grpc_winsocket_create(sv[0], "endpoint:server"),
                              channel_args, "endpoint:client");
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return p;
 }
 
diff --git a/src/core/lib/iomgr/error.cc b/src/core/lib/iomgr/error.cc
index e6d640c..67c3caf 100644
--- a/src/core/lib/iomgr/error.cc
+++ b/src/core/lib/iomgr/error.cc
@@ -156,11 +156,7 @@
   }
 }
 
-static void unref_slice(grpc_slice slice) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_unref_internal(&exec_ctx, slice);
-  grpc_exec_ctx_finish(&exec_ctx);
-}
+static void unref_slice(grpc_slice slice) { grpc_slice_unref_internal(slice); }
 
 static void unref_strs(grpc_error* err) {
   for (size_t which = 0; which < GRPC_ERROR_STR_MAX; ++which) {
@@ -753,7 +749,7 @@
 
   if (!gpr_atm_rel_cas(&err->atomics.error_string, 0, (gpr_atm)out)) {
     gpr_free(out);
-    out = (char*)gpr_atm_no_barrier_load(&err->atomics.error_string);
+    out = (char*)gpr_atm_acq_load(&err->atomics.error_string);
   }
 
   GPR_TIMER_END("grpc_error_string", 0);
diff --git a/src/core/lib/iomgr/error.h b/src/core/lib/iomgr/error.h
index d10bf0b..8c72a43 100644
--- a/src/core/lib/iomgr/error.h
+++ b/src/core/lib/iomgr/error.h
@@ -29,10 +29,6 @@
 
 #include "src/core/lib/debug/trace.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// Opaque representation of an error.
 /// See https://github.com/grpc/grpc/blob/master/doc/core/grpc-error.md for a
 /// full write up of this object.
@@ -169,6 +165,8 @@
 grpc_error* grpc_error_set_int(grpc_error* src, grpc_error_ints which,
                                intptr_t value) GRPC_MUST_USE_RESULT;
 bool grpc_error_get_int(grpc_error* error, grpc_error_ints which, intptr_t* p);
+/// This call takes ownership of the slice; the error is responsible for
+/// eventually unref-ing it.
 grpc_error* grpc_error_set_str(grpc_error* src, grpc_error_strs which,
                                grpc_slice str) GRPC_MUST_USE_RESULT;
 /// Returns false if the specified string is not set.
@@ -178,7 +176,8 @@
 
 /// Add a child error: an error that is believed to have contributed to this
 /// error occurring. Allows root causing high level errors from lower level
-/// errors that contributed to them.
+/// errors that contributed to them. The src error takes ownership of the
+/// child error.
 grpc_error* grpc_error_add_child(grpc_error* src,
                                  grpc_error* child) GRPC_MUST_USE_RESULT;
 grpc_error* grpc_os_error(const char* file, int line, int err,
@@ -203,8 +202,4 @@
 #define GRPC_LOG_IF_ERROR(what, error) \
   grpc_log_if_error((what), (error), __FILE__, __LINE__)
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_ERROR_H */
diff --git a/src/core/lib/iomgr/error_internal.h b/src/core/lib/iomgr/error_internal.h
index d5ccbae..6cb09c2 100644
--- a/src/core/lib/iomgr/error_internal.h
+++ b/src/core/lib/iomgr/error_internal.h
@@ -25,10 +25,6 @@
 #include <grpc/support/sync.h>
 #include "src/core/lib/iomgr/error.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_linked_error grpc_linked_error;
 
 struct grpc_linked_error {
@@ -62,8 +58,4 @@
 
 bool grpc_error_is_special(struct grpc_error* err);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_ERROR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 2467b9a..1ab7e51 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -299,31 +299,29 @@
 /* if 'releasing_fd' is true, it means that we are going to detach the internal
  * fd from grpc_fd structure (i.e which means we should not be calling
  * shutdown() syscall on that fd) */
-static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                                 grpc_error* why, bool releasing_fd) {
-  if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
+static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
+                                 bool releasing_fd) {
+  if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
     if (!releasing_fd) {
       shutdown(fd->fd, SHUT_RDWR);
     }
-    fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
+    fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
   }
   GRPC_ERROR_UNREF(why);
 }
 
 /* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
-  fd_shutdown_internal(exec_ctx, fd, why, false);
+static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
+  fd_shutdown_internal(fd, why, false);
 }
 
-static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                      grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
                       bool already_closed, const char* reason) {
   grpc_error* error = GRPC_ERROR_NONE;
   bool is_release_fd = (release_fd != nullptr);
 
   if (!fd->read_closure->IsShutdown()) {
-    fd_shutdown_internal(exec_ctx, fd,
-                         GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
+    fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
                          is_release_fd);
   }
 
@@ -335,7 +333,7 @@
     close(fd->fd);
   }
 
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
 
   grpc_iomgr_unregister_object(&fd->iomgr_object);
   fd->read_closure->DestroyEvent();
@@ -347,8 +345,7 @@
   gpr_mu_unlock(&fd_freelist_mu);
 }
 
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
-                                                  grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
   gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
   return (grpc_pollset*)notifier;
 }
@@ -357,26 +354,21 @@
   return fd->read_closure->IsShutdown();
 }
 
-static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                              grpc_closure* closure) {
-  fd->read_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
+  fd->read_closure->NotifyOn(closure);
 }
 
-static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_closure* closure) {
-  fd->write_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
+  fd->write_closure->NotifyOn(closure);
 }
 
-static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_pollset* notifier) {
-  fd->read_closure->SetReady(exec_ctx);
+static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
+  fd->read_closure->SetReady();
   /* Use release store to match with acquire load in fd_get_read_notifier */
   gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
 }
 
-static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
-  fd->write_closure->SetReady(exec_ctx);
-}
+static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
 
 /*******************************************************************************
  * Pollset Definitions
@@ -479,7 +471,7 @@
   pollset->next = pollset->prev = nullptr;
 }
 
-static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+static void pollset_destroy(grpc_pollset* pollset) {
   gpr_mu_lock(&pollset->mu);
   if (!pollset->seen_inactive) {
     pollset_neighborhood* neighborhood = pollset->neighborhood;
@@ -507,27 +499,26 @@
   gpr_mu_destroy(&pollset->mu);
 }
 
-static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset* pollset) {
+static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
   GPR_TIMER_BEGIN("pollset_kick_all", 0);
   grpc_error* error = GRPC_ERROR_NONE;
   if (pollset->root_worker != nullptr) {
     grpc_pollset_worker* worker = pollset->root_worker;
     do {
-      GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK();
       switch (worker->state) {
         case KICKED:
-          GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+          GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
           break;
         case UNKICKED:
           SET_KICK_STATE(worker, KICKED);
           if (worker->initialized_cv) {
-            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
             gpr_cv_signal(&worker->cv);
           }
           break;
         case DESIGNATED_POLLER:
-          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
           SET_KICK_STATE(worker, KICKED);
           append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
                        "pollset_kick_all");
@@ -543,32 +534,29 @@
   return error;
 }
 
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
-                                          grpc_pollset* pollset) {
+static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
   if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
       pollset->begin_refs == 0) {
     GPR_TIMER_MARK("pollset_finish_shutdown", 0);
-    GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
     pollset->shutdown_closure = nullptr;
   }
 }
 
-static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                             grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   GPR_TIMER_BEGIN("pollset_shutdown", 0);
   GPR_ASSERT(pollset->shutdown_closure == nullptr);
   GPR_ASSERT(!pollset->shutting_down);
   pollset->shutdown_closure = closure;
   pollset->shutting_down = true;
-  GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
-  pollset_maybe_finish_shutdown(exec_ctx, pollset);
+  GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+  pollset_maybe_finish_shutdown(pollset);
   GPR_TIMER_END("pollset_shutdown", 0);
 }
 
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                           grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_millis millis) {
   if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
-  grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+  grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
   if (delta > INT_MAX) {
     return INT_MAX;
   } else if (delta < 0) {
@@ -586,8 +574,7 @@
    NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
    called by g_active_poller thread. So there is no need for synchronization
    when accessing fields in g_epoll_set */
-static grpc_error* process_epoll_events(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset* pollset) {
+static grpc_error* process_epoll_events(grpc_pollset* pollset) {
   static const char* err_desc = "process_events";
   grpc_error* error = GRPC_ERROR_NONE;
 
@@ -611,11 +598,11 @@
       bool write_ev = (ev->events & EPOLLOUT) != 0;
 
       if (read_ev || cancel) {
-        fd_become_readable(exec_ctx, fd, pollset);
+        fd_become_readable(fd, pollset);
       }
 
       if (write_ev || cancel) {
-        fd_become_writable(exec_ctx, fd);
+        fd_become_writable(fd);
       }
     }
   }
@@ -631,27 +618,26 @@
    NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
    (i.e the designated poller thread) will be calling this function. So there is
    no need for any synchronization when accesing fields in g_epoll_set */
-static grpc_error* do_epoll_wait(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
-                                 grpc_millis deadline) {
+static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
   GPR_TIMER_BEGIN("do_epoll_wait", 0);
 
   int r;
-  int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
+  int timeout = poll_deadline_to_millis_timeout(deadline);
   if (timeout != 0) {
     GRPC_SCHEDULING_START_BLOCKING_REGION;
   }
   do {
-    GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+    GRPC_STATS_INC_SYSCALL_POLL();
     r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
                    timeout);
   } while (r < 0 && errno == EINTR);
   if (timeout != 0) {
-    GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+    GRPC_SCHEDULING_END_BLOCKING_REGION;
   }
 
   if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
 
-  GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
+  GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
 
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
@@ -664,8 +650,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                         grpc_pollset_worker* worker,
+static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
                          grpc_pollset_worker** worker_hdl,
                          grpc_millis deadline) {
   GPR_TIMER_BEGIN("begin_worker", 0);
@@ -753,14 +738,14 @@
       }
 
       if (gpr_cv_wait(&worker->cv, &pollset->mu,
-                      grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
+                      grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
           worker->state == UNKICKED) {
         /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
            received a kick */
         SET_KICK_STATE(worker, KICKED);
       }
     }
-    grpc_exec_ctx_invalidate_now(exec_ctx);
+    grpc_core::ExecCtx::Get()->InvalidateNow();
   }
 
   if (grpc_polling_trace.enabled()) {
@@ -791,7 +776,7 @@
 }
 
 static bool check_neighborhood_for_available_poller(
-    grpc_exec_ctx* exec_ctx, pollset_neighborhood* neighborhood) {
+    pollset_neighborhood* neighborhood) {
   GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
   bool found_worker = false;
   do {
@@ -815,7 +800,7 @@
               SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
               if (inspect_worker->initialized_cv) {
                 GPR_TIMER_MARK("signal worker", 0);
-                GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+                GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
                 gpr_cv_signal(&inspect_worker->cv);
               }
             } else {
@@ -855,8 +840,7 @@
   return found_worker;
 }
 
-static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                       grpc_pollset_worker* worker,
+static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
                        grpc_pollset_worker** worker_hdl) {
   GPR_TIMER_BEGIN("end_worker", 0);
   if (grpc_polling_trace.enabled()) {
@@ -866,7 +850,7 @@
   /* Make sure we appear kicked */
   SET_KICK_STATE(worker, KICKED);
   grpc_closure_list_move(&worker->schedule_on_end_work,
-                         &exec_ctx->closure_list);
+                         grpc_core::ExecCtx::Get()->closure_list());
   if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
     if (worker->next != worker && worker->next->state == UNKICKED) {
       if (grpc_polling_trace.enabled()) {
@@ -875,11 +859,11 @@
       GPR_ASSERT(worker->next->initialized_cv);
       gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
       SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
-      GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
       gpr_cv_signal(&worker->next->cv);
-      if (grpc_exec_ctx_has_work(exec_ctx)) {
+      if (grpc_core::ExecCtx::Get()->HasWork()) {
         gpr_mu_unlock(&pollset->mu);
-        grpc_exec_ctx_flush(exec_ctx);
+        grpc_core::ExecCtx::Get()->Flush();
         gpr_mu_lock(&pollset->mu);
       }
     } else {
@@ -894,8 +878,7 @@
             &g_neighborhoods[(poller_neighborhood_idx + i) %
                              g_num_neighborhoods];
         if (gpr_mu_trylock(&neighborhood->mu)) {
-          found_worker =
-              check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+          found_worker = check_neighborhood_for_available_poller(neighborhood);
           gpr_mu_unlock(&neighborhood->mu);
           scan_state[i] = true;
         } else {
@@ -908,16 +891,15 @@
             &g_neighborhoods[(poller_neighborhood_idx + i) %
                              g_num_neighborhoods];
         gpr_mu_lock(&neighborhood->mu);
-        found_worker =
-            check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+        found_worker = check_neighborhood_for_available_poller(neighborhood);
         gpr_mu_unlock(&neighborhood->mu);
       }
-      grpc_exec_ctx_flush(exec_ctx);
+      grpc_core::ExecCtx::Get()->Flush();
       gpr_mu_lock(&pollset->mu);
     }
-  } else if (grpc_exec_ctx_has_work(exec_ctx)) {
+  } else if (grpc_core::ExecCtx::Get()->HasWork()) {
     gpr_mu_unlock(&pollset->mu);
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(&pollset->mu);
   }
   if (worker->initialized_cv) {
@@ -927,7 +909,7 @@
     gpr_log(GPR_DEBUG, " .. remove worker");
   }
   if (EMPTIED == worker_remove(pollset, worker)) {
-    pollset_maybe_finish_shutdown(exec_ctx, pollset);
+    pollset_maybe_finish_shutdown(pollset);
   }
   GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
   GPR_TIMER_END("end_worker", 0);
@@ -937,7 +919,7 @@
    The function pollset_work() may temporarily release the lock (pollset->po.mu)
    during the course of its execution but it will always re-acquire the lock and
    ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
+static grpc_error* pollset_work(grpc_pollset* ps,
                                 grpc_pollset_worker** worker_hdl,
                                 grpc_millis deadline) {
   grpc_pollset_worker worker;
@@ -950,7 +932,7 @@
     return GRPC_ERROR_NONE;
   }
 
-  if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
+  if (begin_worker(ps, &worker, worker_hdl, deadline)) {
     gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
     gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
     GPR_ASSERT(!ps->shutting_down);
@@ -968,14 +950,14 @@
 
        process_epoll_events() returns very quickly: It just queues the work on
        exec_ctx but does not execute it (the actual exectution or more
-       accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
-       a designated poller). So we are not waiting long periods without a
-       designated poller */
+       accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
+       AFTER selecting a designated poller). So we are not waiting long periods
+       without a designated poller */
     if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
         gpr_atm_acq_load(&g_epoll_set.num_events)) {
-      append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
+      append_error(&error, do_epoll_wait(ps, deadline), err_desc);
     }
-    append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
+    append_error(&error, process_epoll_events(ps), err_desc);
 
     gpr_mu_lock(&ps->mu); /* lock */
 
@@ -983,17 +965,17 @@
   } else {
     gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
   }
-  end_worker(exec_ctx, ps, &worker, worker_hdl);
+  end_worker(ps, &worker, worker_hdl);
 
   gpr_tls_set(&g_current_thread_pollset, 0);
   GPR_TIMER_END("pollset_work", 0);
   return error;
 }
 
-static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+static grpc_error* pollset_kick(grpc_pollset* pollset,
                                 grpc_pollset_worker* specific_worker) {
   GPR_TIMER_BEGIN("pollset_kick", 0);
-  GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+  GRPC_STATS_INC_POLLSET_KICK();
   grpc_error* ret_err = GRPC_ERROR_NONE;
   if (grpc_polling_trace.enabled()) {
     gpr_strvec log;
@@ -1026,7 +1008,7 @@
     if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
       grpc_pollset_worker* root_worker = pollset->root_worker;
       if (root_worker == nullptr) {
-        GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
         pollset->kicked_without_poller = true;
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. kicked_without_poller");
@@ -1035,14 +1017,14 @@
       }
       grpc_pollset_worker* next_worker = root_worker->next;
       if (root_worker->state == KICKED) {
-        GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
         }
         SET_KICK_STATE(root_worker, KICKED);
         goto done;
       } else if (next_worker->state == KICKED) {
-        GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
         }
@@ -1053,7 +1035,7 @@
                                      // there is no next worker
                  root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
                                     &g_active_poller)) {
-        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
         }
@@ -1061,7 +1043,7 @@
         ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
         goto done;
       } else if (next_worker->state == UNKICKED) {
-        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
         }
@@ -1079,12 +1061,12 @@
           }
           SET_KICK_STATE(root_worker, KICKED);
           if (root_worker->initialized_cv) {
-            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
             gpr_cv_signal(&root_worker->cv);
           }
           goto done;
         } else {
-          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
           if (grpc_polling_trace.enabled()) {
             gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
                     root_worker);
@@ -1094,13 +1076,13 @@
           goto done;
         }
       } else {
-        GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
         GPR_ASSERT(next_worker->state == KICKED);
         SET_KICK_STATE(next_worker, KICKED);
         goto done;
       }
     } else {
-      GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
       if (grpc_polling_trace.enabled()) {
         gpr_log(GPR_ERROR, " .. kicked while waking up");
       }
@@ -1117,7 +1099,7 @@
     goto done;
   } else if (gpr_tls_get(&g_current_thread_worker) ==
              (intptr_t)specific_worker) {
-    GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
     }
@@ -1125,7 +1107,7 @@
     goto done;
   } else if (specific_worker ==
              (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
-    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. kick active poller");
     }
@@ -1133,7 +1115,7 @@
     ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
     goto done;
   } else if (specific_worker->initialized_cv) {
-    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. kick waiting worker");
     }
@@ -1141,7 +1123,7 @@
     gpr_cv_signal(&specific_worker->cv);
     goto done;
   } else {
-    GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. kick non-waiting worker");
     }
@@ -1153,8 +1135,7 @@
   return ret_err;
 }
 
-static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_fd* fd) {}
+static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
 
 /*******************************************************************************
  * Pollset-set Definitions
@@ -1164,27 +1145,20 @@
   return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
 }
 
-static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_pollset_set* pss) {}
+static void pollset_set_destroy(grpc_pollset_set* pss) {}
 
-static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {}
+static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
 
-static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {}
+static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
 
-static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {}
+static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
 
-static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {}
+static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
 
-static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {}
 
-static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {}
 
 /*******************************************************************************
@@ -1258,7 +1232,7 @@
 /* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
  * NULL */
 const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
-  return NULL;
+  return nullptr;
 }
 #endif /* defined(GRPC_POSIX_SOCKET) */
 #endif /* !defined(GRPC_LINUX_EPOLL) */
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.h b/src/core/lib/iomgr/ev_epoll1_linux.h
index 3e66747..9a1b96b 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.h
+++ b/src/core/lib/iomgr/ev_epoll1_linux.h
@@ -22,16 +22,8 @@
 #include "src/core/lib/iomgr/ev_posix.h"
 #include "src/core/lib/iomgr/port.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // a polling engine that utilizes a singleton epoll set and turnstile polling
 
 const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLL1_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epollex_linux.cc b/src/core/lib/iomgr/ev_epollex_linux.cc
index 931f3fc..5f5f45a 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.cc
+++ b/src/core/lib/iomgr/ev_epollex_linux.cc
@@ -257,8 +257,7 @@
 
 #ifndef NDEBUG
 #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
-#define UNREF_BY(ec, fd, n, reason) \
-  unref_by(ec, fd, n, reason, __FILE__, __LINE__)
+#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
 static void ref_by(grpc_fd* fd, int n, const char* reason, const char* file,
                    int line) {
   if (grpc_trace_fd_refcount.enabled()) {
@@ -269,13 +268,13 @@
   }
 #else
 #define REF_BY(fd, n, reason) ref_by(fd, n)
-#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
+#define UNREF_BY(fd, n, reason) unref_by(fd, n)
 static void ref_by(grpc_fd* fd, int n) {
 #endif
   GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
 }
 
-static void fd_destroy(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void fd_destroy(void* arg, grpc_error* error) {
   grpc_fd* fd = (grpc_fd*)arg;
   /* Add the fd to the freelist */
   grpc_iomgr_unregister_object(&fd->iomgr_object);
@@ -293,8 +292,8 @@
 }
 
 #ifndef NDEBUG
-static void unref_by(grpc_exec_ctx* exec_ctx, grpc_fd* fd, int n,
-                     const char* reason, const char* file, int line) {
+static void unref_by(grpc_fd* fd, int n, const char* reason, const char* file,
+                     int line) {
   if (grpc_trace_fd_refcount.enabled()) {
     gpr_log(GPR_DEBUG,
             "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
@@ -302,12 +301,11 @@
             gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
   }
 #else
-static void unref_by(grpc_exec_ctx* exec_ctx, grpc_fd* fd, int n) {
+static void unref_by(grpc_fd* fd, int n) {
 #endif
   gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
   if (old == n) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_CREATE(fd_destroy, fd, grpc_schedule_on_exec_ctx),
         GRPC_ERROR_NONE);
   } else {
@@ -373,8 +371,7 @@
   return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
 }
 
-static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                      grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
                       bool already_closed, const char* reason) {
   bool is_fd_closed = already_closed;
 
@@ -399,15 +396,14 @@
      to be alive (and not added to freelist) until the end of this function */
   REF_BY(fd, 1, reason);
 
-  GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE);
 
   gpr_mu_unlock(&fd->orphan_mu);
 
-  UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
+  UNREF_BY(fd, 2, reason); /* Drop the reference */
 }
 
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
-                                                  grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
   gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
   return (grpc_pollset*)notifier;
 }
@@ -417,22 +413,20 @@
 }
 
 /* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
-  if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
+static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
+  if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
     shutdown(fd->fd, SHUT_RDWR);
-    fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
+    fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
   }
   GRPC_ERROR_UNREF(why);
 }
 
-static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                              grpc_closure* closure) {
-  fd->read_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
+  fd->read_closure->NotifyOn(closure);
 }
 
-static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_closure* closure) {
-  fd->write_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
+  fd->write_closure->NotifyOn(closure);
 }
 
 /*******************************************************************************
@@ -556,8 +550,7 @@
 }
 
 /* pollset->mu must be held while calling this function */
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
-                                          grpc_pollset* pollset) {
+static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "PS:%p (pollable:%p) maybe_finish_shutdown sc=%p (target:!NULL) "
@@ -567,7 +560,7 @@
   }
   if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
       pollset->containing_pollset_set_count == 0) {
-    GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
     pollset->shutdown_closure = nullptr;
   }
 }
@@ -575,8 +568,7 @@
 /* pollset->mu must be held before calling this function,
  * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
  * held */
-static grpc_error* kick_one_worker(grpc_exec_ctx* exec_ctx,
-                                   grpc_pollset_worker* specific_worker) {
+static grpc_error* kick_one_worker(grpc_pollset_worker* specific_worker) {
   pollable* p = specific_worker->pollable_obj;
   grpc_core::mu_guard lock(&p->mu);
   GPR_ASSERT(specific_worker != nullptr);
@@ -584,19 +576,19 @@
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
     }
-    GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
     return GRPC_ERROR_NONE;
   }
   if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
     }
-    GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
     specific_worker->kicked = true;
     return GRPC_ERROR_NONE;
   }
   if (specific_worker == p->root_worker) {
-    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
     }
@@ -605,7 +597,7 @@
     return error;
   }
   if (specific_worker->initialized_cv) {
-    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
     }
@@ -618,9 +610,9 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+static grpc_error* pollset_kick(grpc_pollset* pollset,
                                 grpc_pollset_worker* specific_worker) {
-  GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+  GRPC_STATS_INC_POLLSET_KICK();
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
@@ -634,7 +626,7 @@
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
         }
-        GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
         pollset->kicked_without_poller = true;
         return GRPC_ERROR_NONE;
       } else {
@@ -654,29 +646,28 @@
         // so we take our chances and choose the SECOND worker enqueued against
         // the pollset as a worker that's likely to be in cv_wait
         return kick_one_worker(
-            exec_ctx, pollset->root_worker->links[PWLINK_POLLSET].next);
+            pollset->root_worker->links[PWLINK_POLLSET].next);
       }
     } else {
       if (grpc_polling_trace.enabled()) {
         gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
       }
-      GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
       return GRPC_ERROR_NONE;
     }
   } else {
-    return kick_one_worker(exec_ctx, specific_worker);
+    return kick_one_worker(specific_worker);
   }
 }
 
-static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset* pollset) {
+static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
   grpc_error* error = GRPC_ERROR_NONE;
   const char* err_desc = "pollset_kick_all";
   grpc_pollset_worker* w = pollset->root_worker;
   if (w != nullptr) {
     do {
-      GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
-      append_error(&error, kick_one_worker(exec_ctx, w), err_desc);
+      GRPC_STATS_INC_POLLSET_KICK();
+      append_error(&error, kick_one_worker(w), err_desc);
       w = w->links[PWLINK_POLLSET].next;
     } while (w != pollset->root_worker);
   }
@@ -689,10 +680,9 @@
   *mu = &pollset->mu;
 }
 
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                           grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_millis millis) {
   if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
-  grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+  grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
   if (delta > INT_MAX)
     return INT_MAX;
   else if (delta < 0)
@@ -701,9 +691,8 @@
     return (int)delta;
 }
 
-static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_pollset* notifier) {
-  fd->read_closure->SetReady(exec_ctx);
+static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
+  fd->read_closure->SetReady();
 
   /* Note, it is possible that fd_become_readable might be called twice with
      different 'notifier's when an fd becomes readable and it is in two epoll
@@ -714,9 +703,7 @@
   gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
 }
 
-static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
-  fd->write_closure->SetReady(exec_ctx);
-}
+static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
 
 static grpc_error* fd_get_or_become_pollable(grpc_fd* fd, pollable** p) {
   gpr_mu_lock(&fd->pollable_mu);
@@ -745,16 +732,14 @@
 }
 
 /* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                             grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   GPR_ASSERT(pollset->shutdown_closure == nullptr);
   pollset->shutdown_closure = closure;
-  GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
-  pollset_maybe_finish_shutdown(exec_ctx, pollset);
+  GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+  pollset_maybe_finish_shutdown(pollset);
 }
 
-static grpc_error* pollable_process_events(grpc_exec_ctx* exec_ctx,
-                                           grpc_pollset* pollset,
+static grpc_error* pollable_process_events(grpc_pollset* pollset,
                                            pollable* pollable_obj, bool drain) {
   static const char* err_desc = "pollset_process_events";
   grpc_error* error = GRPC_ERROR_NONE;
@@ -784,10 +769,10 @@
                 pollset, fd, cancel, read_ev, write_ev);
       }
       if (read_ev || cancel) {
-        fd_become_readable(exec_ctx, fd, pollset);
+        fd_become_readable(fd, pollset);
       }
       if (write_ev || cancel) {
-        fd_become_writable(exec_ctx, fd);
+        fd_become_writable(fd);
       }
     }
   }
@@ -796,14 +781,13 @@
 }
 
 /* pollset_shutdown is guaranteed to be called before pollset_destroy. */
-static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+static void pollset_destroy(grpc_pollset* pollset) {
   POLLABLE_UNREF(pollset->active_pollable, "pollset");
   pollset->active_pollable = nullptr;
 }
 
-static grpc_error* pollable_epoll(grpc_exec_ctx* exec_ctx, pollable* p,
-                                  grpc_millis deadline) {
-  int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
+static grpc_error* pollable_epoll(pollable* p, grpc_millis deadline) {
+  int timeout = poll_deadline_to_millis_timeout(deadline);
 
   if (grpc_polling_trace.enabled()) {
     char* desc = pollable_desc(p);
@@ -816,11 +800,11 @@
   }
   int r;
   do {
-    GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+    GRPC_STATS_INC_SYSCALL_POLL();
     r = epoll_wait(p->epfd, p->events, MAX_EPOLL_EVENTS, timeout);
   } while (r < 0 && errno == EINTR);
   if (timeout != 0) {
-    GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+    GRPC_SCHEDULING_END_BLOCKING_REGION;
   }
 
   if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
@@ -875,8 +859,7 @@
 }
 
 /* Return true if this thread should poll */
-static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                         grpc_pollset_worker* worker,
+static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
                          grpc_pollset_worker** worker_hdl,
                          grpc_millis deadline) {
   bool do_poll = (pollset->shutdown_closure == nullptr);
@@ -897,7 +880,7 @@
         worker->pollable_obj->root_worker != worker) {
       gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
               worker->pollable_obj, worker,
-              poll_deadline_to_millis_timeout(exec_ctx, deadline));
+              poll_deadline_to_millis_timeout(deadline));
     }
     while (do_poll && worker->pollable_obj->root_worker != worker) {
       if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
@@ -919,7 +902,7 @@
                 worker->pollable_obj, worker);
       }
     }
-    grpc_exec_ctx_invalidate_now(exec_ctx);
+    grpc_core::ExecCtx::Get()->InvalidateNow();
   } else {
     gpr_mu_unlock(&pollset->mu);
   }
@@ -928,8 +911,7 @@
   return do_poll;
 }
 
-static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                       grpc_pollset_worker* worker,
+static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
                        grpc_pollset_worker** worker_hdl) {
   gpr_mu_lock(&pollset->mu);
   gpr_mu_lock(&worker->pollable_obj->mu);
@@ -945,7 +927,7 @@
     case WRR_EMPTIED:
       if (pollset->active_pollable != worker->pollable_obj) {
         // pollable no longer being polled: flush events
-        pollable_process_events(exec_ctx, pollset, worker->pollable_obj, true);
+        pollable_process_events(pollset, worker->pollable_obj, true);
       }
       break;
     case WRR_REMOVED:
@@ -955,7 +937,7 @@
   POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
   if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
       WRR_EMPTIED) {
-    pollset_maybe_finish_shutdown(exec_ctx, pollset);
+    pollset_maybe_finish_shutdown(pollset);
   }
   if (worker->initialized_cv) {
     gpr_cv_destroy(&worker->cv);
@@ -970,7 +952,7 @@
    The function pollset_work() may temporarily release the lock (pollset->po.mu)
    during the course of its execution but it will always re-acquire the lock and
    ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+static grpc_error* pollset_work(grpc_pollset* pollset,
                                 grpc_pollset_worker** worker_hdl,
                                 grpc_millis deadline) {
 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
@@ -988,7 +970,7 @@
     gpr_log(GPR_DEBUG,
             "PS:%p work hdl=%p worker=%p now=%" PRIdPTR " deadline=%" PRIdPTR
             " kwp=%d pollable=%p",
-            pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
+            pollset, worker_hdl, WORKER_PTR, grpc_core::ExecCtx::Get()->Now(),
             deadline, pollset->kicked_without_poller, pollset->active_pollable);
   }
   static const char* err_desc = "pollset_work";
@@ -996,25 +978,23 @@
   if (pollset->kicked_without_poller) {
     pollset->kicked_without_poller = false;
   } else {
-    if (begin_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl, deadline)) {
+    if (begin_worker(pollset, WORKER_PTR, worker_hdl, deadline)) {
       gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
       gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
       if (WORKER_PTR->pollable_obj->event_cursor ==
           WORKER_PTR->pollable_obj->event_count) {
-        append_error(
-            &error,
-            pollable_epoll(exec_ctx, WORKER_PTR->pollable_obj, deadline),
-            err_desc);
+        append_error(&error, pollable_epoll(WORKER_PTR->pollable_obj, deadline),
+                     err_desc);
       }
-      append_error(&error,
-                   pollable_process_events(exec_ctx, pollset,
-                                           WORKER_PTR->pollable_obj, false),
-                   err_desc);
-      grpc_exec_ctx_flush(exec_ctx);
+      append_error(
+          &error,
+          pollable_process_events(pollset, WORKER_PTR->pollable_obj, false),
+          err_desc);
+      grpc_core::ExecCtx::Get()->Flush();
       gpr_tls_set(&g_current_thread_pollset, 0);
       gpr_tls_set(&g_current_thread_worker, 0);
     }
-    end_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl);
+    end_worker(pollset, WORKER_PTR, worker_hdl);
   }
 #ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
   gpr_free(worker);
@@ -1024,7 +1004,7 @@
 }
 
 static grpc_error* pollset_transition_pollable_from_empty_to_fd_locked(
-    grpc_exec_ctx* exec_ctx, grpc_pollset* pollset, grpc_fd* fd) {
+    grpc_pollset* pollset, grpc_fd* fd) {
   static const char* err_desc = "pollset_transition_pollable_from_empty_to_fd";
   grpc_error* error = GRPC_ERROR_NONE;
   if (grpc_polling_trace.enabled()) {
@@ -1032,7 +1012,7 @@
             "PS:%p add fd %p (%d); transition pollable from empty to fd",
             pollset, fd, fd->fd);
   }
-  append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
+  append_error(&error, pollset_kick_all(pollset), err_desc);
   POLLABLE_UNREF(pollset->active_pollable, "pollset");
   append_error(&error, fd_get_or_become_pollable(fd, &pollset->active_pollable),
                err_desc);
@@ -1040,7 +1020,7 @@
 }
 
 static grpc_error* pollset_transition_pollable_from_fd_to_multi_locked(
-    grpc_exec_ctx* exec_ctx, grpc_pollset* pollset, grpc_fd* and_add_fd) {
+    grpc_pollset* pollset, grpc_fd* and_add_fd) {
   static const char* err_desc = "pollset_transition_pollable_from_fd_to_multi";
   grpc_error* error = GRPC_ERROR_NONE;
   if (grpc_polling_trace.enabled()) {
@@ -1050,7 +1030,7 @@
         pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
         pollset->active_pollable->owner_fd);
   }
-  append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
+  append_error(&error, pollset_kick_all(pollset), err_desc);
   grpc_fd* initial_fd = pollset->active_pollable->owner_fd;
   POLLABLE_UNREF(pollset->active_pollable, "pollset");
   pollset->active_pollable = nullptr;
@@ -1068,27 +1048,25 @@
 }
 
 /* expects pollsets locked, flag whether fd is locked or not */
-static grpc_error* pollset_add_fd_locked(grpc_exec_ctx* exec_ctx,
-                                         grpc_pollset* pollset, grpc_fd* fd) {
+static grpc_error* pollset_add_fd_locked(grpc_pollset* pollset, grpc_fd* fd) {
   grpc_error* error = GRPC_ERROR_NONE;
   pollable* po_at_start =
       POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
   switch (pollset->active_pollable->type) {
     case PO_EMPTY:
       /* empty pollable --> single fd pollable */
-      error = pollset_transition_pollable_from_empty_to_fd_locked(exec_ctx,
-                                                                  pollset, fd);
+      error = pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
       break;
     case PO_FD:
       gpr_mu_lock(&po_at_start->owner_fd->orphan_mu);
       if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) &
            1) == 0) {
-        error = pollset_transition_pollable_from_empty_to_fd_locked(
-            exec_ctx, pollset, fd);
+        error =
+            pollset_transition_pollable_from_empty_to_fd_locked(pollset, fd);
       } else {
         /* fd --> multipoller */
-        error = pollset_transition_pollable_from_fd_to_multi_locked(
-            exec_ctx, pollset, fd);
+        error =
+            pollset_transition_pollable_from_fd_to_multi_locked(pollset, fd);
       }
       gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
       break;
@@ -1105,8 +1083,7 @@
   return error;
 }
 
-static grpc_error* pollset_as_multipollable_locked(grpc_exec_ctx* exec_ctx,
-                                                   grpc_pollset* pollset,
+static grpc_error* pollset_as_multipollable_locked(grpc_pollset* pollset,
                                                    pollable** pollable_obj) {
   grpc_error* error = GRPC_ERROR_NONE;
   pollable* po_at_start =
@@ -1123,8 +1100,8 @@
         POLLABLE_UNREF(pollset->active_pollable, "pollset");
         error = pollable_create(PO_MULTI, &pollset->active_pollable);
       } else {
-        error = pollset_transition_pollable_from_fd_to_multi_locked(
-            exec_ctx, pollset, nullptr);
+        error = pollset_transition_pollable_from_fd_to_multi_locked(pollset,
+                                                                    nullptr);
       }
       gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
       break;
@@ -1142,10 +1119,9 @@
   return error;
 }
 
-static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_fd* fd) {
+static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
   gpr_mu_lock(&pollset->mu);
-  grpc_error* error = pollset_add_fd_locked(exec_ctx, pollset, fd);
+  grpc_error* error = pollset_add_fd_locked(pollset, fd);
   gpr_mu_unlock(&pollset->mu);
   GRPC_LOG_IF_ERROR("pollset_add_fd", error);
 }
@@ -1171,28 +1147,27 @@
   return pss;
 }
 
-static void pollset_set_unref(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss) {
+static void pollset_set_unref(grpc_pollset_set* pss) {
   if (pss == nullptr) return;
   if (!gpr_unref(&pss->refs)) return;
-  pollset_set_unref(exec_ctx, pss->parent);
+  pollset_set_unref(pss->parent);
   gpr_mu_destroy(&pss->mu);
   for (size_t i = 0; i < pss->pollset_count; i++) {
     gpr_mu_lock(&pss->pollsets[i]->mu);
     if (0 == --pss->pollsets[i]->containing_pollset_set_count) {
-      pollset_maybe_finish_shutdown(exec_ctx, pss->pollsets[i]);
+      pollset_maybe_finish_shutdown(pss->pollsets[i]);
     }
     gpr_mu_unlock(&pss->pollsets[i]->mu);
   }
   for (size_t i = 0; i < pss->fd_count; i++) {
-    UNREF_BY(exec_ctx, pss->fds[i], 2, "pollset_set");
+    UNREF_BY(pss->fds[i], 2, "pollset_set");
   }
   gpr_free(pss->pollsets);
   gpr_free(pss->fds);
   gpr_free(pss);
 }
 
-static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {
+static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
   }
@@ -1215,8 +1190,7 @@
   GRPC_LOG_IF_ERROR(err_desc, error);
 }
 
-static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {
+static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
   }
@@ -1224,7 +1198,7 @@
   size_t i;
   for (i = 0; i < pss->fd_count; i++) {
     if (pss->fds[i] == fd) {
-      UNREF_BY(exec_ctx, fd, 2, "pollset_set");
+      UNREF_BY(fd, 2, "pollset_set");
       break;
     }
   }
@@ -1236,8 +1210,7 @@
   gpr_mu_unlock(&pss->mu);
 }
 
-static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {
+static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
   }
@@ -1256,15 +1229,15 @@
   gpr_mu_unlock(&pss->mu);
   gpr_mu_lock(&ps->mu);
   if (0 == --ps->containing_pollset_set_count) {
-    pollset_maybe_finish_shutdown(exec_ctx, ps);
+    pollset_maybe_finish_shutdown(ps);
   }
   gpr_mu_unlock(&ps->mu);
 }
 
 // add all fds to pollables, and output a new array of unorphaned out_fds
 // assumes pollsets are multipollable
-static grpc_error* add_fds_to_pollsets(grpc_exec_ctx* exec_ctx, grpc_fd** fds,
-                                       size_t fd_count, grpc_pollset** pollsets,
+static grpc_error* add_fds_to_pollsets(grpc_fd** fds, size_t fd_count,
+                                       grpc_pollset** pollsets,
                                        size_t pollset_count,
                                        const char* err_desc, grpc_fd** out_fds,
                                        size_t* out_fd_count) {
@@ -1273,7 +1246,7 @@
     gpr_mu_lock(&fds[i]->orphan_mu);
     if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
       gpr_mu_unlock(&fds[i]->orphan_mu);
-      UNREF_BY(exec_ctx, fds[i], 2, "pollset_set");
+      UNREF_BY(fds[i], 2, "pollset_set");
     } else {
       for (size_t j = 0; j < pollset_count; j++) {
         append_error(&error,
@@ -1287,8 +1260,7 @@
   return error;
 }
 
-static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {
+static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
   }
@@ -1296,8 +1268,8 @@
   static const char* err_desc = "pollset_set_add_pollset";
   pollable* pollable_obj = nullptr;
   gpr_mu_lock(&ps->mu);
-  if (!GRPC_LOG_IF_ERROR(err_desc, pollset_as_multipollable_locked(
-                                       exec_ctx, ps, &pollable_obj))) {
+  if (!GRPC_LOG_IF_ERROR(err_desc,
+                         pollset_as_multipollable_locked(ps, &pollable_obj))) {
     GPR_ASSERT(pollable_obj == nullptr);
     gpr_mu_unlock(&ps->mu);
     return;
@@ -1308,8 +1280,8 @@
   size_t initial_fd_count = pss->fd_count;
   pss->fd_count = 0;
   append_error(&error,
-               add_fds_to_pollsets(exec_ctx, pss->fds, initial_fd_count, &ps, 1,
-                                   err_desc, pss->fds, &pss->fd_count),
+               add_fds_to_pollsets(pss->fds, initial_fd_count, &ps, 1, err_desc,
+                                   pss->fds, &pss->fd_count),
                err_desc);
   if (pss->pollset_count == pss->pollset_capacity) {
     pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
@@ -1323,8 +1295,7 @@
   GRPC_LOG_IF_ERROR(err_desc, error);
 }
 
-static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* a,
+static void pollset_set_add_pollset_set(grpc_pollset_set* a,
                                         grpc_pollset_set* b) {
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
@@ -1373,13 +1344,13 @@
   a->fd_count = 0;
   append_error(
       &error,
-      add_fds_to_pollsets(exec_ctx, a->fds, initial_a_fd_count, b->pollsets,
+      add_fds_to_pollsets(a->fds, initial_a_fd_count, b->pollsets,
                           b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
       err_desc);
   append_error(
       &error,
-      add_fds_to_pollsets(exec_ctx, b->fds, b->fd_count, a->pollsets,
-                          a->pollset_count, "merge_b2a", a->fds, &a->fd_count),
+      add_fds_to_pollsets(b->fds, b->fd_count, a->pollsets, a->pollset_count,
+                          "merge_b2a", a->fds, &a->fd_count),
       err_desc);
   if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
     a->pollset_capacity =
@@ -1401,8 +1372,7 @@
   gpr_mu_unlock(&b->mu);
 }
 
-static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {}
 
 /*******************************************************************************
@@ -1479,7 +1449,7 @@
  * NULL */
 const grpc_event_engine_vtable* grpc_init_epollex_linux(
     bool explicitly_requested) {
-  return NULL;
+  return nullptr;
 }
 #endif /* defined(GRPC_POSIX_SOCKET) */
 
diff --git a/src/core/lib/iomgr/ev_epollex_linux.h b/src/core/lib/iomgr/ev_epollex_linux.h
index 22b536c..ffa7fc7 100644
--- a/src/core/lib/iomgr/ev_epollex_linux.h
+++ b/src/core/lib/iomgr/ev_epollex_linux.h
@@ -22,15 +22,7 @@
 #include "src/core/lib/iomgr/ev_posix.h"
 #include "src/core/lib/iomgr/port.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 const grpc_event_engine_vtable* grpc_init_epollex_linux(
     bool explicitly_requested);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLEX_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.cc b/src/core/lib/iomgr/ev_epollsig_linux.cc
index 7ff043f..8072a6c 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.cc
+++ b/src/core/lib/iomgr/ev_epollsig_linux.cc
@@ -165,13 +165,12 @@
 #ifndef NDEBUG
 
 #define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
-#define PI_UNREF(exec_ctx, p, r) \
-  pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
+#define PI_UNREF(p, r) pi_unref_dbg((p), (r), __FILE__, __LINE__)
 
 #else
 
 #define PI_ADD_REF(p, r) pi_add_ref((p))
-#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
+#define PI_UNREF(p, r) pi_unref((p))
 
 #endif
 
@@ -270,7 +269,7 @@
 static __thread polling_island* g_current_thread_polling_island;
 
 /* Forward declaration */
-static void polling_island_delete(grpc_exec_ctx* exec_ctx, polling_island* pi);
+static void polling_island_delete(polling_island* pi);
 
 #ifdef GRPC_TSAN
 /* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -284,7 +283,7 @@
 #endif /* defined(GRPC_TSAN) */
 
 static void pi_add_ref(polling_island* pi);
-static void pi_unref(grpc_exec_ctx* exec_ctx, polling_island* pi);
+static void pi_unref(polling_island* pi);
 
 #ifndef NDEBUG
 static void pi_add_ref_dbg(polling_island* pi, const char* reason,
@@ -299,8 +298,8 @@
   pi_add_ref(pi);
 }
 
-static void pi_unref_dbg(grpc_exec_ctx* exec_ctx, polling_island* pi,
-                         const char* reason, const char* file, int line) {
+static void pi_unref_dbg(polling_island* pi, const char* reason,
+                         const char* file, int line) {
   if (grpc_polling_trace.enabled()) {
     gpr_atm old_cnt = gpr_atm_acq_load(&pi->ref_count);
     gpr_log(GPR_DEBUG,
@@ -308,7 +307,7 @@
             " (%s) - (%s, %d)",
             pi, old_cnt, (old_cnt - 1), reason, file, line);
   }
-  pi_unref(exec_ctx, pi);
+  pi_unref(pi);
 }
 #endif
 
@@ -316,7 +315,7 @@
   gpr_atm_no_barrier_fetch_add(&pi->ref_count, 1);
 }
 
-static void pi_unref(grpc_exec_ctx* exec_ctx, polling_island* pi) {
+static void pi_unref(polling_island* pi) {
   /* If ref count went to zero, delete the polling island.
      Note that this deletion not be done under a lock. Once the ref count goes
      to zero, we are guaranteed that no one else holds a reference to the
@@ -327,9 +326,9 @@
    */
   if (1 == gpr_atm_full_fetch_add(&pi->ref_count, -1)) {
     polling_island* next = (polling_island*)gpr_atm_acq_load(&pi->merged_to);
-    polling_island_delete(exec_ctx, pi);
+    polling_island_delete(pi);
     if (next != nullptr) {
-      PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
+      PI_UNREF(next, "pi_delete"); /* Recursive call */
     }
   }
 }
@@ -465,8 +464,7 @@
 }
 
 /* Might return NULL in case of an error */
-static polling_island* polling_island_create(grpc_exec_ctx* exec_ctx,
-                                             grpc_fd* initial_fd,
+static polling_island* polling_island_create(grpc_fd* initial_fd,
                                              grpc_error** error) {
   polling_island* pi = nullptr;
   const char* err_desc = "polling_island_create";
@@ -482,7 +480,7 @@
 
   gpr_atm_rel_store(&pi->ref_count, 0);
   gpr_atm_rel_store(&pi->poller_count, 0);
-  gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
+  gpr_atm_rel_store(&pi->merged_to, (gpr_atm) nullptr);
 
   pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
 
@@ -497,13 +495,13 @@
 
 done:
   if (*error != GRPC_ERROR_NONE) {
-    polling_island_delete(exec_ctx, pi);
+    polling_island_delete(pi);
     pi = nullptr;
   }
   return pi;
 }
 
-static void polling_island_delete(grpc_exec_ctx* exec_ctx, polling_island* pi) {
+static void polling_island_delete(polling_island* pi) {
   GPR_ASSERT(pi->fd_cnt == 0);
 
   if (pi->epoll_fd >= 0) {
@@ -862,8 +860,7 @@
   return ret_fd;
 }
 
-static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                      grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
                       bool already_closed, const char* reason) {
   grpc_error* error = GRPC_ERROR_NONE;
   polling_island* unref_pi = nullptr;
@@ -902,7 +899,7 @@
 
   fd->orphaned = true;
 
-  GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_REF(error));
 
   gpr_mu_unlock(&fd->po.mu);
   UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -911,7 +908,7 @@
        The polling island owns a workqueue which owns an fd, and unreffing
        inside the lock can cause an eventual lock loop that makes TSAN very
        unhappy. */
-    PI_UNREF(exec_ctx, unref_pi, "fd_orphan");
+    PI_UNREF(unref_pi, "fd_orphan");
   }
   if (error != GRPC_ERROR_NONE) {
     const char* msg = grpc_error_string(error);
@@ -920,8 +917,7 @@
   GRPC_ERROR_UNREF(error);
 }
 
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
-                                                  grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
   gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
   return (grpc_pollset*)notifier;
 }
@@ -931,22 +927,20 @@
 }
 
 /* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
-  if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
+static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
+  if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
     shutdown(fd->fd, SHUT_RDWR);
-    fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
+    fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
   }
   GRPC_ERROR_UNREF(why);
 }
 
-static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                              grpc_closure* closure) {
-  fd->read_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
+  fd->read_closure->NotifyOn(closure);
 }
 
-static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_closure* closure) {
-  fd->write_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
+  fd->write_closure->NotifyOn(closure);
 }
 
 /*******************************************************************************
@@ -1028,11 +1022,11 @@
 }
 
 /* p->mu must be held before calling this function */
-static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+static grpc_error* pollset_kick(grpc_pollset* p,
                                 grpc_pollset_worker* specific_worker) {
   GPR_TIMER_BEGIN("pollset_kick", 0);
   grpc_error* error = GRPC_ERROR_NONE;
-  GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+  GRPC_STATS_INC_POLLSET_KICK();
   const char* err_desc = "Kick Failure";
   grpc_pollset_worker* worker = specific_worker;
   if (worker != nullptr) {
@@ -1096,10 +1090,9 @@
   pollset->shutdown_done = nullptr;
 }
 
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                           grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_millis millis) {
   if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
-  grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+  grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
   if (delta > INT_MAX)
     return INT_MAX;
   else if (delta < 0)
@@ -1108,9 +1101,8 @@
     return (int)delta;
 }
 
-static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_pollset* notifier) {
-  fd->read_closure->SetReady(exec_ctx);
+static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
+  fd->read_closure->SetReady();
 
   /* Note, it is possible that fd_become_readable might be called twice with
      different 'notifier's when an fd becomes readable and it is in two epoll
@@ -1121,39 +1113,34 @@
   gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
 }
 
-static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
-  fd->write_closure->SetReady(exec_ctx);
-}
+static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
 
-static void pollset_release_polling_island(grpc_exec_ctx* exec_ctx,
-                                           grpc_pollset* ps,
+static void pollset_release_polling_island(grpc_pollset* ps,
                                            const char* reason) {
   if (ps->po.pi != nullptr) {
-    PI_UNREF(exec_ctx, ps->po.pi, reason);
+    PI_UNREF(ps->po.pi, reason);
   }
   ps->po.pi = nullptr;
 }
 
-static void finish_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_pollset* pollset) {
+static void finish_shutdown_locked(grpc_pollset* pollset) {
   /* The pollset cannot have any workers if we are at this stage */
   GPR_ASSERT(!pollset_has_workers(pollset));
 
   pollset->finish_shutdown_called = true;
 
   /* Release the ref and set pollset->po.pi to NULL */
-  pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
-  GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+  pollset_release_polling_island(pollset, "ps_shutdown");
+  GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE);
 }
 
 /* pollset->po.mu lock must be held by the caller before calling this */
-static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                             grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   GPR_TIMER_BEGIN("pollset_shutdown", 0);
   GPR_ASSERT(!pollset->shutting_down);
   pollset->shutting_down = true;
   pollset->shutdown_done = closure;
-  pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
+  pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
 
   /* If the pollset has any workers, we cannot call finish_shutdown_locked()
      because it would release the underlying polling island. In such a case, we
@@ -1161,7 +1148,7 @@
   if (!pollset_has_workers(pollset)) {
     GPR_ASSERT(!pollset->finish_shutdown_called);
     GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
-    finish_shutdown_locked(exec_ctx, pollset);
+    finish_shutdown_locked(pollset);
   }
   GPR_TIMER_END("pollset_shutdown", 0);
 }
@@ -1169,15 +1156,14 @@
 /* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
  * than destroying the mutexes, there is nothing special that needs to be done
  * here */
-static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+static void pollset_destroy(grpc_pollset* pollset) {
   GPR_ASSERT(!pollset_has_workers(pollset));
   gpr_mu_destroy(&pollset->po.mu);
 }
 
 #define GRPC_EPOLL_MAX_EVENTS 100
 /* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
-static void pollset_work_and_unlock(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset* pollset,
+static void pollset_work_and_unlock(grpc_pollset* pollset,
                                     grpc_pollset_worker* worker, int timeout_ms,
                                     sigset_t* sig_mask, grpc_error** error) {
   struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
@@ -1199,7 +1185,7 @@
      this function (i.e pollset_work_and_unlock()) is called */
 
   if (pollset->po.pi == nullptr) {
-    pollset->po.pi = polling_island_create(exec_ctx, nullptr, error);
+    pollset->po.pi = polling_island_create(nullptr, error);
     if (pollset->po.pi == nullptr) {
       GPR_TIMER_END("pollset_work_and_unlock", 0);
       return; /* Fatal error. We cannot continue */
@@ -1219,7 +1205,7 @@
     /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
        polling island to be deleted */
     PI_ADD_REF(pi, "ps");
-    PI_UNREF(exec_ctx, pollset->po.pi, "ps");
+    PI_UNREF(pollset->po.pi, "ps");
     pollset->po.pi = pi;
   }
 
@@ -1233,10 +1219,10 @@
   g_current_thread_polling_island = pi;
 
   GRPC_SCHEDULING_START_BLOCKING_REGION;
-  GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+  GRPC_STATS_INC_SYSCALL_POLL();
   ep_rv =
       epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms, sig_mask);
-  GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+  GRPC_SCHEDULING_END_BLOCKING_REGION;
   if (ep_rv < 0) {
     if (errno != EINTR) {
       gpr_asprintf(&err_msg,
@@ -1274,10 +1260,10 @@
       int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
       int write_ev = ep_ev[i].events & EPOLLOUT;
       if (read_ev || cancel) {
-        fd_become_readable(exec_ctx, fd, pollset);
+        fd_become_readable(fd, pollset);
       }
       if (write_ev || cancel) {
-        fd_become_writable(exec_ctx, fd);
+        fd_become_writable(fd);
       }
     }
   }
@@ -1292,7 +1278,7 @@
      that we got before releasing the polling island lock). This is because
      pollset->po.pi pointer might get udpated in other parts of the
      code when there is an island merge while we are doing epoll_wait() above */
-  PI_UNREF(exec_ctx, pi, "ps_work");
+  PI_UNREF(pi, "ps_work");
 
   GPR_TIMER_END("pollset_work_and_unlock", 0);
 }
@@ -1301,12 +1287,12 @@
    The function pollset_work() may temporarily release the lock (pollset->po.mu)
    during the course of its execution but it will always re-acquire the lock and
    ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+static grpc_error* pollset_work(grpc_pollset* pollset,
                                 grpc_pollset_worker** worker_hdl,
                                 grpc_millis deadline) {
   GPR_TIMER_BEGIN("pollset_work", 0);
   grpc_error* error = GRPC_ERROR_NONE;
-  int timeout_ms = poll_deadline_to_millis_timeout(exec_ctx, deadline);
+  int timeout_ms = poll_deadline_to_millis_timeout(deadline);
 
   sigset_t new_mask;
 
@@ -1364,9 +1350,9 @@
 
     push_front_worker(pollset, &worker); /* Add worker to pollset */
 
-    pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms,
-                            &g_orig_sigmask, &error);
-    grpc_exec_ctx_flush(exec_ctx);
+    pollset_work_and_unlock(pollset, &worker, timeout_ms, &g_orig_sigmask,
+                            &error);
+    grpc_core::ExecCtx::Get()->Flush();
 
     gpr_mu_lock(&pollset->po.mu);
 
@@ -1386,10 +1372,10 @@
   if (pollset->shutting_down && !pollset_has_workers(pollset) &&
       !pollset->finish_shutdown_called) {
     GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
-    finish_shutdown_locked(exec_ctx, pollset);
+    finish_shutdown_locked(pollset);
 
     gpr_mu_unlock(&pollset->po.mu);
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(&pollset->po.mu);
   }
 
@@ -1404,9 +1390,8 @@
   return error;
 }
 
-static void add_poll_object(grpc_exec_ctx* exec_ctx, poll_obj* bag,
-                            poll_obj_type bag_type, poll_obj* item,
-                            poll_obj_type item_type) {
+static void add_poll_object(poll_obj* bag, poll_obj_type bag_type,
+                            poll_obj* item, poll_obj_type item_type) {
   GPR_TIMER_BEGIN("add_poll_object", 0);
 
 #ifndef NDEBUG
@@ -1456,7 +1441,7 @@
            keeping TSAN happy outweigh any performance advantage we might have
            by keeping the lock held. */
         gpr_mu_unlock(&item->mu);
-        pi_new = polling_island_create(exec_ctx, FD_FROM_PO(item), &error);
+        pi_new = polling_island_create(FD_FROM_PO(item), &error);
         gpr_mu_lock(&item->mu);
 
         /* Need to reverify any assumptions made between the initial lock and
@@ -1475,11 +1460,11 @@
           /* Ref and unref so that the polling island gets deleted during unref
            */
           PI_ADD_REF(pi_new, "dance_of_destruction");
-          PI_UNREF(exec_ctx, pi_new, "dance_of_destruction");
+          PI_UNREF(pi_new, "dance_of_destruction");
           goto retry;
         }
       } else {
-        pi_new = polling_island_create(exec_ctx, nullptr, &error);
+        pi_new = polling_island_create(nullptr, &error);
       }
 
       GRPC_POLLING_TRACE(
@@ -1533,7 +1518,7 @@
   if (item->pi != pi_new) {
     PI_ADD_REF(pi_new, poll_obj_string(item_type));
     if (item->pi != nullptr) {
-      PI_UNREF(exec_ctx, item->pi, poll_obj_string(item_type));
+      PI_UNREF(item->pi, poll_obj_string(item_type));
     }
     item->pi = pi_new;
   }
@@ -1541,7 +1526,7 @@
   if (bag->pi != pi_new) {
     PI_ADD_REF(pi_new, poll_obj_string(bag_type));
     if (bag->pi != nullptr) {
-      PI_UNREF(exec_ctx, bag->pi, poll_obj_string(bag_type));
+      PI_UNREF(bag->pi, poll_obj_string(bag_type));
     }
     bag->pi = pi_new;
   }
@@ -1553,10 +1538,8 @@
   GPR_TIMER_END("add_poll_object", 0);
 }
 
-static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_fd* fd) {
-  add_poll_object(exec_ctx, &pollset->po, POLL_OBJ_POLLSET, &fd->po,
-                  POLL_OBJ_FD);
+static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
+  add_poll_object(&pollset->po, POLL_OBJ_POLLSET, &fd->po, POLL_OBJ_FD);
 }
 
 /*******************************************************************************
@@ -1573,48 +1556,39 @@
   return pss;
 }
 
-static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_pollset_set* pss) {
+static void pollset_set_destroy(grpc_pollset_set* pss) {
   gpr_mu_destroy(&pss->po.mu);
 
   if (pss->po.pi != nullptr) {
-    PI_UNREF(exec_ctx, pss->po.pi, "pss_destroy");
+    PI_UNREF(pss->po.pi, "pss_destroy");
   }
 
   gpr_free(pss);
 }
 
-static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {
-  add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &fd->po,
-                  POLL_OBJ_FD);
+static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {
+  add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &fd->po, POLL_OBJ_FD);
 }
 
-static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {
+static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {
   /* Nothing to do */
 }
 
-static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {
-  add_poll_object(exec_ctx, &pss->po, POLL_OBJ_POLLSET_SET, &ps->po,
-                  POLL_OBJ_POLLSET);
+static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
+  add_poll_object(&pss->po, POLL_OBJ_POLLSET_SET, &ps->po, POLL_OBJ_POLLSET);
 }
 
-static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {
+static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {
   /* Nothing to do */
 }
 
-static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {
-  add_poll_object(exec_ctx, &bag->po, POLL_OBJ_POLLSET_SET, &item->po,
+  add_poll_object(&bag->po, POLL_OBJ_POLLSET_SET, &item->po,
                   POLL_OBJ_POLLSET_SET);
 }
 
-static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {
   /* Nothing to do */
 }
@@ -1758,7 +1732,7 @@
  * NULL */
 const grpc_event_engine_vtable* grpc_init_epollsig_linux(
     bool explicit_request) {
-  return NULL;
+  return nullptr;
 }
 #endif /* defined(GRPC_POSIX_SOCKET) */
 
diff --git a/src/core/lib/iomgr/ev_epollsig_linux.h b/src/core/lib/iomgr/ev_epollsig_linux.h
index ca68595..5b8aba9 100644
--- a/src/core/lib/iomgr/ev_epollsig_linux.h
+++ b/src/core/lib/iomgr/ev_epollsig_linux.h
@@ -22,10 +22,6 @@
 #include "src/core/lib/iomgr/ev_posix.h"
 #include "src/core/lib/iomgr/port.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 const grpc_event_engine_vtable* grpc_init_epollsig_linux(bool explicit_request);
 
 #ifdef GRPC_LINUX_EPOLL
@@ -34,8 +30,4 @@
 bool grpc_are_polling_islands_equal(void* p, void* q);
 #endif /* defined(GRPC_LINUX_EPOLL) */
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_EV_EPOLLSIG_LINUX_H */
diff --git a/src/core/lib/iomgr/ev_poll_posix.cc b/src/core/lib/iomgr/ev_poll_posix.cc
index 13468b3..5fa0201 100644
--- a/src/core/lib/iomgr/ev_poll_posix.cc
+++ b/src/core/lib/iomgr/ev_poll_posix.cc
@@ -71,7 +71,6 @@
   int shutdown;
   int closed;
   int released;
-  gpr_atm pollhup;
   grpc_error* shutdown_error;
 
   /* The watcher list.
@@ -129,8 +128,7 @@
    MUST NOT be called with a pollset lock taken
    if got_read or got_write are 1, also does the become_{readable,writable} as
    appropriate. */
-static void fd_end_poll(grpc_exec_ctx* exec_ctx, grpc_fd_watcher* rec,
-                        int got_read, int got_write,
+static void fd_end_poll(grpc_fd_watcher* rec, int got_read, int got_write,
                         grpc_pollset* read_notifier_pollset);
 
 /* Return 1 if this fd is orphaned, 0 otherwise */
@@ -187,11 +185,9 @@
 };
 
 /* Add an fd to a pollset */
-static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           struct grpc_fd* fd);
+static void pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
 
-static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
-                               grpc_pollset_set* pollset_set, grpc_fd* fd);
+static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
 
 /* Convert a timespec to milliseconds:
    - very small or negative poll times are clamped to zero to do a
@@ -200,8 +196,7 @@
    - longer than a millisecond polls are rounded up to the next nearest
      millisecond to avoid spinning
    - infinite timeouts are converted to -1 */
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                           grpc_millis deadline);
+static int poll_deadline_to_millis_timeout(grpc_millis deadline);
 
 /* Allow kick to wakeup the currently polling worker */
 #define GRPC_POLLSET_CAN_KICK_SELF 1
@@ -209,7 +204,7 @@
 #define GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP 2
 /* As per pollset_kick, with an extended set of flags (defined above)
    -- mostly for fd_posix's use. */
-static grpc_error* pollset_kick_ext(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+static grpc_error* pollset_kick_ext(grpc_pollset* p,
                                     grpc_pollset_worker* specific_worker,
                                     uint32_t flags) GRPC_MUST_USE_RESULT;
 
@@ -247,7 +242,7 @@
 
 typedef struct poll_result {
   gpr_refcount refcount;
-  cv_node* watchers;
+  grpc_cv_node* watchers;
   int watchcount;
   struct pollfd* fds;
   nfds_t nfds;
@@ -278,7 +273,7 @@
 } poll_hash_table;
 
 poll_hash_table poll_cache;
-cv_fd_table g_cvfds;
+grpc_cv_fd_table g_cvfds;
 
 /*******************************************************************************
  * fd_posix.c
@@ -340,7 +335,6 @@
   r->on_done_closure = nullptr;
   r->closed = 0;
   r->released = 0;
-  gpr_atm_no_barrier_store(&r->pollhup, 0);
   r->read_notifier_pollset = nullptr;
 
   char* name2;
@@ -355,8 +349,7 @@
 }
 
 /* Return the read-notifier pollset */
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
-                                                  grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
   grpc_pollset* notifier = nullptr;
 
   gpr_mu_lock(&fd->mu);
@@ -366,39 +359,36 @@
   return notifier;
 }
 
-static grpc_error* pollset_kick_locked(grpc_exec_ctx* exec_ctx,
-                                       grpc_fd_watcher* watcher) {
+static grpc_error* pollset_kick_locked(grpc_fd_watcher* watcher) {
   gpr_mu_lock(&watcher->pollset->mu);
   GPR_ASSERT(watcher->worker);
-  grpc_error* err =
-      pollset_kick_ext(exec_ctx, watcher->pollset, watcher->worker,
-                       GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
+  grpc_error* err = pollset_kick_ext(watcher->pollset, watcher->worker,
+                                     GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP);
   gpr_mu_unlock(&watcher->pollset->mu);
   return err;
 }
 
-static void maybe_wake_one_watcher_locked(grpc_exec_ctx* exec_ctx,
-                                          grpc_fd* fd) {
+static void maybe_wake_one_watcher_locked(grpc_fd* fd) {
   if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
-    pollset_kick_locked(exec_ctx, fd->inactive_watcher_root.next);
+    pollset_kick_locked(fd->inactive_watcher_root.next);
   } else if (fd->read_watcher) {
-    pollset_kick_locked(exec_ctx, fd->read_watcher);
+    pollset_kick_locked(fd->read_watcher);
   } else if (fd->write_watcher) {
-    pollset_kick_locked(exec_ctx, fd->write_watcher);
+    pollset_kick_locked(fd->write_watcher);
   }
 }
 
-static void wake_all_watchers_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
+static void wake_all_watchers_locked(grpc_fd* fd) {
   grpc_fd_watcher* watcher;
   for (watcher = fd->inactive_watcher_root.next;
        watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
-    pollset_kick_locked(exec_ctx, watcher);
+    pollset_kick_locked(watcher);
   }
   if (fd->read_watcher) {
-    pollset_kick_locked(exec_ctx, fd->read_watcher);
+    pollset_kick_locked(fd->read_watcher);
   }
   if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
-    pollset_kick_locked(exec_ctx, fd->write_watcher);
+    pollset_kick_locked(fd->write_watcher);
   }
 }
 
@@ -407,12 +397,12 @@
          fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
 }
 
-static void close_fd_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
+static void close_fd_locked(grpc_fd* fd) {
   fd->closed = 1;
   if (!fd->released) {
     close(fd->fd);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(fd->on_done_closure, GRPC_ERROR_NONE);
 }
 
 static int fd_wrapped_fd(grpc_fd* fd) {
@@ -423,8 +413,7 @@
   }
 }
 
-static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                      grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
                       bool already_closed, const char* reason) {
   fd->on_done_closure = on_done;
   fd->released = release_fd != nullptr;
@@ -437,9 +426,9 @@
   gpr_mu_lock(&fd->mu);
   REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
   if (!has_watchers(fd)) {
-    close_fd_locked(exec_ctx, fd);
+    close_fd_locked(fd);
   } else {
-    wake_all_watchers_locked(exec_ctx, fd);
+    wake_all_watchers_locked(fd);
   }
   gpr_mu_unlock(&fd->mu);
   UNREF_BY(fd, 2, reason); /* drop the reference */
@@ -471,10 +460,10 @@
   }
 }
 
-static void notify_on_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                             grpc_closure** st, grpc_closure* closure) {
+static void notify_on_locked(grpc_fd* fd, grpc_closure** st,
+                             grpc_closure* closure) {
   if (fd->shutdown) {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure,
+    GRPC_CLOSURE_SCHED(closure,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING("FD shutdown"));
   } else if (*st == CLOSURE_NOT_READY) {
     /* not ready ==> switch to a waiting state by setting the closure */
@@ -482,8 +471,8 @@
   } else if (*st == CLOSURE_READY) {
     /* already ready ==> queue the closure to run immediately */
     *st = CLOSURE_NOT_READY;
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, fd_shutdown_error(fd));
-    maybe_wake_one_watcher_locked(exec_ctx, fd);
+    GRPC_CLOSURE_SCHED(closure, fd_shutdown_error(fd));
+    maybe_wake_one_watcher_locked(fd);
   } else {
     /* upcallptr was set to a different closure.  This is an error! */
     gpr_log(GPR_ERROR,
@@ -494,8 +483,7 @@
 }
 
 /* returns 1 if state becomes not ready */
-static int set_ready_locked(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                            grpc_closure** st) {
+static int set_ready_locked(grpc_fd* fd, grpc_closure** st) {
   if (*st == CLOSURE_READY) {
     /* duplicate ready ==> ignore */
     return 0;
@@ -505,18 +493,18 @@
     return 0;
   } else {
     /* waiting ==> queue closure */
-    GRPC_CLOSURE_SCHED(exec_ctx, *st, fd_shutdown_error(fd));
+    GRPC_CLOSURE_SCHED(*st, fd_shutdown_error(fd));
     *st = CLOSURE_NOT_READY;
     return 1;
   }
 }
 
 static void set_read_notifier_pollset_locked(
-    grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_pollset* read_notifier_pollset) {
+    grpc_fd* fd, grpc_pollset* read_notifier_pollset) {
   fd->read_notifier_pollset = read_notifier_pollset;
 }
 
-static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
+static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
   gpr_mu_lock(&fd->mu);
   /* only shutdown once */
   if (!fd->shutdown) {
@@ -524,8 +512,8 @@
     fd->shutdown_error = why;
     /* signal read/write closed to OS so that future operations fail */
     shutdown(fd->fd, SHUT_RDWR);
-    set_ready_locked(exec_ctx, fd, &fd->read_closure);
-    set_ready_locked(exec_ctx, fd, &fd->write_closure);
+    set_ready_locked(fd, &fd->read_closure);
+    set_ready_locked(fd, &fd->write_closure);
   } else {
     GRPC_ERROR_UNREF(why);
   }
@@ -539,17 +527,15 @@
   return r;
 }
 
-static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                              grpc_closure* closure) {
+static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
   gpr_mu_lock(&fd->mu);
-  notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
+  notify_on_locked(fd, &fd->read_closure, closure);
   gpr_mu_unlock(&fd->mu);
 }
 
-static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_closure* closure) {
+static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
   gpr_mu_lock(&fd->mu);
-  notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
+  notify_on_locked(fd, &fd->write_closure, closure);
   gpr_mu_unlock(&fd->mu);
 }
 
@@ -604,8 +590,7 @@
   return mask;
 }
 
-static void fd_end_poll(grpc_exec_ctx* exec_ctx, grpc_fd_watcher* watcher,
-                        int got_read, int got_write,
+static void fd_end_poll(grpc_fd_watcher* watcher, int got_read, int got_write,
                         grpc_pollset* read_notifier_pollset) {
   int was_polling = 0;
   int kick = 0;
@@ -639,23 +624,23 @@
     watcher->prev->next = watcher->next;
   }
   if (got_read) {
-    if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
+    if (set_ready_locked(fd, &fd->read_closure)) {
       kick = 1;
     }
     if (read_notifier_pollset != nullptr) {
-      set_read_notifier_pollset_locked(exec_ctx, fd, read_notifier_pollset);
+      set_read_notifier_pollset_locked(fd, read_notifier_pollset);
     }
   }
   if (got_write) {
-    if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
+    if (set_ready_locked(fd, &fd->write_closure)) {
       kick = 1;
     }
   }
   if (kick) {
-    maybe_wake_one_watcher_locked(exec_ctx, fd);
+    maybe_wake_one_watcher_locked(fd);
   }
   if (fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
-    close_fd_locked(exec_ctx, fd);
+    close_fd_locked(fd);
   }
   gpr_mu_unlock(&fd->mu);
 
@@ -716,12 +701,12 @@
   *composite = grpc_error_add_child(*composite, error);
 }
 
-static grpc_error* pollset_kick_ext(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+static grpc_error* pollset_kick_ext(grpc_pollset* p,
                                     grpc_pollset_worker* specific_worker,
                                     uint32_t flags) {
   GPR_TIMER_BEGIN("pollset_kick_ext", 0);
   grpc_error* error = GRPC_ERROR_NONE;
-  GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+  GRPC_STATS_INC_POLLSET_KICK();
 
   /* pollset->mu already held */
   if (specific_worker != nullptr) {
@@ -787,9 +772,9 @@
   return error;
 }
 
-static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+static grpc_error* pollset_kick(grpc_pollset* p,
                                 grpc_pollset_worker* specific_worker) {
-  return pollset_kick_ext(exec_ctx, p, specific_worker, 0);
+  return pollset_kick_ext(p, specific_worker, 0);
 }
 
 /* global state management */
@@ -823,7 +808,7 @@
   pollset->pollset_set_count = 0;
 }
 
-static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+static void pollset_destroy(grpc_pollset* pollset) {
   GPR_ASSERT(!pollset_has_workers(pollset));
   GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
   while (pollset->local_wakeup_cache) {
@@ -836,8 +821,7 @@
   gpr_mu_destroy(&pollset->mu);
 }
 
-static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_fd* fd) {
+static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {
   gpr_mu_lock(&pollset->mu);
   size_t i;
   /* TODO(ctiller): this is O(num_fds^2); maybe switch to a hash set here */
@@ -852,19 +836,19 @@
   }
   pollset->fds[pollset->fd_count++] = fd;
   GRPC_FD_REF(fd, "multipoller");
-  pollset_kick(exec_ctx, pollset, nullptr);
+  pollset_kick(pollset, nullptr);
 exit:
   gpr_mu_unlock(&pollset->mu);
 }
 
-static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+static void finish_shutdown(grpc_pollset* pollset) {
   GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
   size_t i;
   for (i = 0; i < pollset->fd_count; i++) {
     GRPC_FD_UNREF(pollset->fds[i], "multipoller");
   }
   pollset->fd_count = 0;
-  GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(pollset->shutdown_done, GRPC_ERROR_NONE);
 }
 
 static void work_combine_error(grpc_error** composite, grpc_error* error) {
@@ -875,7 +859,7 @@
   *composite = grpc_error_add_child(*composite, error);
 }
 
-static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+static grpc_error* pollset_work(grpc_pollset* pollset,
                                 grpc_pollset_worker** worker_hdl,
                                 grpc_millis deadline) {
   grpc_pollset_worker worker;
@@ -914,7 +898,7 @@
   if (!pollset_has_workers(pollset) &&
       !grpc_closure_list_empty(pollset->idle_jobs)) {
     GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
+    GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs);
     goto done;
   }
   /* If we're shutting down then we don't execute any extended work */
@@ -946,7 +930,7 @@
       grpc_fd_watcher* watchers;
       struct pollfd* pfds;
 
-      timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
+      timeout = poll_deadline_to_millis_timeout(deadline);
 
       if (pollset->fd_count + 2 <= inline_elements) {
         pfds = pollfd_space;
@@ -966,8 +950,7 @@
       pfds[0].events = POLLIN;
       pfds[0].revents = 0;
       for (i = 0; i < pollset->fd_count; i++) {
-        if (fd_is_orphaned(pollset->fds[i]) ||
-            gpr_atm_no_barrier_load(&pollset->fds[i]->pollhup) == 1) {
+        if (fd_is_orphaned(pollset->fds[i])) {
           GRPC_FD_UNREF(pollset->fds[i], "multipoller");
         } else {
           pollset->fds[fd_count++] = pollset->fds[i];
@@ -991,9 +974,9 @@
       /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
          even going into the blocking annotation if possible */
       GRPC_SCHEDULING_START_BLOCKING_REGION;
-      GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+      GRPC_STATS_INC_SYSCALL_POLL();
       r = grpc_poll_function(pfds, pfd_count, timeout);
-      GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+      GRPC_SCHEDULING_END_BLOCKING_REGION;
 
       if (grpc_polling_trace.enabled()) {
         gpr_log(GPR_DEBUG, "%p poll=%d", pollset, r);
@@ -1006,16 +989,16 @@
 
         for (i = 1; i < pfd_count; i++) {
           if (watchers[i].fd == nullptr) {
-            fd_end_poll(exec_ctx, &watchers[i], 0, 0, nullptr);
+            fd_end_poll(&watchers[i], 0, 0, nullptr);
           } else {
             // Wake up all the file descriptors, if we have an invalid one
             // we can identify it on the next pollset_work()
-            fd_end_poll(exec_ctx, &watchers[i], 1, 1, pollset);
+            fd_end_poll(&watchers[i], 1, 1, pollset);
           }
         }
       } else if (r == 0) {
         for (i = 1; i < pfd_count; i++) {
-          fd_end_poll(exec_ctx, &watchers[i], 0, 0, nullptr);
+          fd_end_poll(&watchers[i], 0, 0, nullptr);
         }
       } else {
         if (pfds[0].revents & POLLIN_CHECK) {
@@ -1027,20 +1010,14 @@
         }
         for (i = 1; i < pfd_count; i++) {
           if (watchers[i].fd == nullptr) {
-            fd_end_poll(exec_ctx, &watchers[i], 0, 0, nullptr);
+            fd_end_poll(&watchers[i], 0, 0, nullptr);
           } else {
             if (grpc_polling_trace.enabled()) {
               gpr_log(GPR_DEBUG, "%p got_event: %d r:%d w:%d [%d]", pollset,
                       pfds[i].fd, (pfds[i].revents & POLLIN_CHECK) != 0,
                       (pfds[i].revents & POLLOUT_CHECK) != 0, pfds[i].revents);
             }
-            /* This is a mitigation to prevent poll() from spinning on a
-             ** POLLHUP https://github.com/grpc/grpc/pull/13665
-             */
-            if (pfds[i].revents & POLLHUP) {
-              gpr_atm_no_barrier_store(&watchers[i].fd->pollhup, 1);
-            }
-            fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
+            fd_end_poll(&watchers[i], pfds[i].revents & POLLIN_CHECK,
                         pfds[i].revents & POLLOUT_CHECK, pollset);
           }
         }
@@ -1063,7 +1040,7 @@
      worker list, which means nobody could ask us to re-evaluate polling). */
   done:
     if (!locked) {
-      queued_work |= grpc_exec_ctx_flush(exec_ctx);
+      queued_work |= grpc_core::ExecCtx::Get()->Flush();
       gpr_mu_lock(&pollset->mu);
       locked = 1;
     }
@@ -1092,21 +1069,21 @@
   /* check shutdown conditions */
   if (pollset->shutting_down) {
     if (pollset_has_workers(pollset)) {
-      pollset_kick(exec_ctx, pollset, nullptr);
+      pollset_kick(pollset, nullptr);
     } else if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
       pollset->called_shutdown = 1;
       gpr_mu_unlock(&pollset->mu);
-      finish_shutdown(exec_ctx, pollset);
-      grpc_exec_ctx_flush(exec_ctx);
+      finish_shutdown(pollset);
+      grpc_core::ExecCtx::Get()->Flush();
       /* Continuing to access pollset here is safe -- it is the caller's
        * responsibility to not destroy when it has outstanding calls to
        * pollset_work.
        * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
       gpr_mu_lock(&pollset->mu);
     } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
-      GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
+      GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs);
       gpr_mu_unlock(&pollset->mu);
-      grpc_exec_ctx_flush(exec_ctx);
+      grpc_core::ExecCtx::Get()->Flush();
       gpr_mu_lock(&pollset->mu);
     }
   }
@@ -1116,26 +1093,24 @@
   return error;
 }
 
-static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                             grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   GPR_ASSERT(!pollset->shutting_down);
   pollset->shutting_down = 1;
   pollset->shutdown_done = closure;
-  pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
+  pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
   if (!pollset_has_workers(pollset)) {
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx, &pollset->idle_jobs);
+    GRPC_CLOSURE_LIST_SCHED(&pollset->idle_jobs);
   }
   if (!pollset->called_shutdown && !pollset_has_observers(pollset)) {
     pollset->called_shutdown = 1;
-    finish_shutdown(exec_ctx, pollset);
+    finish_shutdown(pollset);
   }
 }
 
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                           grpc_millis deadline) {
+static int poll_deadline_to_millis_timeout(grpc_millis deadline) {
   if (deadline == GRPC_MILLIS_INF_FUTURE) return -1;
   if (deadline == 0) return 0;
-  grpc_millis n = deadline - grpc_exec_ctx_now(exec_ctx);
+  grpc_millis n = deadline - grpc_core::ExecCtx::Get()->Now();
   if (n < 0) return 0;
   if (n > INT_MAX) return -1;
   return (int)n;
@@ -1152,8 +1127,7 @@
   return pollset_set;
 }
 
-static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_pollset_set* pollset_set) {
+static void pollset_set_destroy(grpc_pollset_set* pollset_set) {
   size_t i;
   gpr_mu_destroy(&pollset_set->mu);
   for (i = 0; i < pollset_set->fd_count; i++) {
@@ -1168,7 +1142,7 @@
         !pollset_has_observers(pollset)) {
       pollset->called_shutdown = 1;
       gpr_mu_unlock(&pollset->mu);
-      finish_shutdown(exec_ctx, pollset);
+      finish_shutdown(pollset);
     } else {
       gpr_mu_unlock(&pollset->mu);
     }
@@ -1179,8 +1153,7 @@
   gpr_free(pollset_set);
 }
 
-static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pollset_set,
+static void pollset_set_add_pollset(grpc_pollset_set* pollset_set,
                                     grpc_pollset* pollset) {
   size_t i, j;
   gpr_mu_lock(&pollset->mu);
@@ -1199,7 +1172,7 @@
     if (fd_is_orphaned(pollset_set->fds[i])) {
       GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
     } else {
-      pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
+      pollset_add_fd(pollset, pollset_set->fds[i]);
       pollset_set->fds[j++] = pollset_set->fds[i];
     }
   }
@@ -1207,8 +1180,7 @@
   gpr_mu_unlock(&pollset_set->mu);
 }
 
-static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pollset_set,
+static void pollset_set_del_pollset(grpc_pollset_set* pollset_set,
                                     grpc_pollset* pollset) {
   size_t i;
   gpr_mu_lock(&pollset_set->mu);
@@ -1228,14 +1200,13 @@
       !pollset_has_observers(pollset)) {
     pollset->called_shutdown = 1;
     gpr_mu_unlock(&pollset->mu);
-    finish_shutdown(exec_ctx, pollset);
+    finish_shutdown(pollset);
   } else {
     gpr_mu_unlock(&pollset->mu);
   }
 }
 
-static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {
   size_t i, j;
   gpr_mu_lock(&bag->mu);
@@ -1250,7 +1221,7 @@
     if (fd_is_orphaned(bag->fds[i])) {
       GRPC_FD_UNREF(bag->fds[i], "pollset_set");
     } else {
-      pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
+      pollset_set_add_fd(item, bag->fds[i]);
       bag->fds[j++] = bag->fds[i];
     }
   }
@@ -1258,8 +1229,7 @@
   gpr_mu_unlock(&bag->mu);
 }
 
-static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {
   size_t i;
   gpr_mu_lock(&bag->mu);
@@ -1274,8 +1244,7 @@
   gpr_mu_unlock(&bag->mu);
 }
 
-static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
-                               grpc_pollset_set* pollset_set, grpc_fd* fd) {
+static void pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
   size_t i;
   gpr_mu_lock(&pollset_set->mu);
   if (pollset_set->fd_count == pollset_set->fd_capacity) {
@@ -1286,16 +1255,15 @@
   GRPC_FD_REF(fd, "pollset_set");
   pollset_set->fds[pollset_set->fd_count++] = fd;
   for (i = 0; i < pollset_set->pollset_count; i++) {
-    pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
+    pollset_add_fd(pollset_set->pollsets[i], fd);
   }
   for (i = 0; i < pollset_set->pollset_set_count; i++) {
-    pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+    pollset_set_add_fd(pollset_set->pollset_sets[i], fd);
   }
   gpr_mu_unlock(&pollset_set->mu);
 }
 
-static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
-                               grpc_pollset_set* pollset_set, grpc_fd* fd) {
+static void pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
   size_t i;
   gpr_mu_lock(&pollset_set->mu);
   for (i = 0; i < pollset_set->fd_count; i++) {
@@ -1308,7 +1276,7 @@
     }
   }
   for (i = 0; i < pollset_set->pollset_set_count; i++) {
-    pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
+    pollset_set_del_fd(pollset_set->pollset_sets[i], fd);
   }
   gpr_mu_unlock(&pollset_set->mu);
 }
@@ -1391,7 +1359,7 @@
   gpr_thd_options opt = gpr_thd_options_default();
   gpr_ref(&g_cvfds.pollcount);
   gpr_thd_options_set_detached(&opt);
-  GPR_ASSERT(gpr_thd_new(&t_id, &run_poll, pargs, &opt));
+  GPR_ASSERT(gpr_thd_new(&t_id, "grpc_poller", &run_poll, pargs, &opt));
   return pargs;
 }
 
@@ -1467,7 +1435,7 @@
   }
 }
 
-void remove_cvn(cv_node** head, cv_node* target) {
+void remove_cvn(grpc_cv_node** head, grpc_cv_node* target) {
   if (target->next) {
     target->next->prev = target->prev;
   }
@@ -1492,7 +1460,7 @@
       result->completed = 1;
       result->retval = retval;
       result->err = errno;
-      cv_node* watcher = result->watchers;
+      grpc_cv_node* watcher = result->watchers;
       while (watcher) {
         gpr_cv_signal(watcher->cv);
         watcher = watcher->next;
@@ -1503,7 +1471,7 @@
       decref_poll_result(result);
       // Leave this polling thread alive for a grace period to do another poll()
       // op
-      gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
+      gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC);
       deadline = gpr_time_add(deadline, thread_grace);
       pargs->trigger_set = 0;
       gpr_cv_wait(&pargs->trigger, &g_cvfds.mu, deadline);
@@ -1526,17 +1494,17 @@
 static int cvfd_poll(struct pollfd* fds, nfds_t nfds, int timeout) {
   unsigned int i;
   int res, idx;
-  cv_node* pollcv;
+  grpc_cv_node* pollcv;
   int skip_poll = 0;
   nfds_t nsockfds = 0;
   poll_result* result = nullptr;
   gpr_mu_lock(&g_cvfds.mu);
-  pollcv = (cv_node*)gpr_malloc(sizeof(cv_node));
+  pollcv = (grpc_cv_node*)gpr_malloc(sizeof(grpc_cv_node));
   pollcv->next = nullptr;
   gpr_cv pollcv_cv;
   gpr_cv_init(&pollcv_cv);
   pollcv->cv = &pollcv_cv;
-  cv_node* fd_cvs = (cv_node*)gpr_malloc(nfds * sizeof(cv_node));
+  grpc_cv_node* fd_cvs = (grpc_cv_node*)gpr_malloc(nfds * sizeof(grpc_cv_node));
 
   for (i = 0; i < nfds; i++) {
     fds[i].revents = 0;
@@ -1558,9 +1526,9 @@
     }
   }
 
-  gpr_timespec deadline = gpr_now(GPR_CLOCK_REALTIME);
+  gpr_timespec deadline = gpr_now(GPR_CLOCK_MONOTONIC);
   if (timeout < 0) {
-    deadline = gpr_inf_future(GPR_CLOCK_REALTIME);
+    deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
   } else {
     deadline =
         gpr_time_add(deadline, gpr_time_from_millis(timeout, GPR_TIMESPAN));
@@ -1632,7 +1600,8 @@
   gpr_cv_init(&g_cvfds.shutdown_cv);
   gpr_ref_init(&g_cvfds.pollcount, 1);
   g_cvfds.size = CV_DEFAULT_TABLE_SIZE;
-  g_cvfds.cvfds = (fd_node*)gpr_malloc(sizeof(fd_node) * CV_DEFAULT_TABLE_SIZE);
+  g_cvfds.cvfds =
+      (grpc_fd_node*)gpr_malloc(sizeof(grpc_fd_node) * CV_DEFAULT_TABLE_SIZE);
   g_cvfds.free_fds = nullptr;
   thread_grace = gpr_time_from_millis(POLLCV_THREAD_GRACE_MS, GPR_TIMESPAN);
   for (int i = 0; i < CV_DEFAULT_TABLE_SIZE; i++) {
@@ -1663,7 +1632,7 @@
   // Not doing so will result in reported memory leaks
   if (!gpr_unref(&g_cvfds.pollcount)) {
     int res = gpr_cv_wait(&g_cvfds.shutdown_cv, &g_cvfds.mu,
-                          gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+                          gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                                        gpr_time_from_seconds(3, GPR_TIMESPAN)));
     GPR_ASSERT(res == 0);
   }
diff --git a/src/core/lib/iomgr/ev_poll_posix.h b/src/core/lib/iomgr/ev_poll_posix.h
index 626e95b..f6bc624 100644
--- a/src/core/lib/iomgr/ev_poll_posix.h
+++ b/src/core/lib/iomgr/ev_poll_posix.h
@@ -21,15 +21,7 @@
 
 #include "src/core/lib/iomgr/ev_posix.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 const grpc_event_engine_vtable* grpc_init_poll_posix(bool explicit_request);
 const grpc_event_engine_vtable* grpc_init_poll_cv_posix(bool explicit_request);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_EV_POLL_POSIX_H */
diff --git a/src/core/lib/iomgr/ev_posix.cc b/src/core/lib/iomgr/ev_posix.cc
index 80dde6d..b516f93 100644
--- a/src/core/lib/iomgr/ev_posix.cc
+++ b/src/core/lib/iomgr/ev_posix.cc
@@ -46,7 +46,7 @@
 
 grpc_wakeup_fd grpc_global_wakeup_fd;
 
-static const grpc_event_engine_vtable* g_event_engine;
+static const grpc_event_engine_vtable* g_event_engine = nullptr;
 static const char* g_poll_strategy_name = nullptr;
 
 typedef const grpc_event_engine_vtable* (*event_engine_factory_fn)(
@@ -59,8 +59,6 @@
 
 namespace {
 
-extern "C" {
-
 grpc_poll_function_type real_poll_function;
 
 int dummy_poll(struct pollfd fds[], nfds_t nfds, int timeout) {
@@ -72,7 +70,6 @@
     return -1;
   }
 }
-}  // extern "C"
 
 const grpc_event_engine_vtable* init_non_polling(bool explicit_request) {
   if (!explicit_request) {
@@ -187,28 +184,25 @@
   return g_event_engine->fd_wrapped_fd(fd);
 }
 
-void grpc_fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
-                    int* release_fd, bool already_closed, const char* reason) {
-  g_event_engine->fd_orphan(exec_ctx, fd, on_done, release_fd, already_closed,
-                            reason);
+void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+                    bool already_closed, const char* reason) {
+  g_event_engine->fd_orphan(fd, on_done, release_fd, already_closed, reason);
 }
 
-void grpc_fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
-  g_event_engine->fd_shutdown(exec_ctx, fd, why);
+void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why) {
+  g_event_engine->fd_shutdown(fd, why);
 }
 
 bool grpc_fd_is_shutdown(grpc_fd* fd) {
   return g_event_engine->fd_is_shutdown(fd);
 }
 
-void grpc_fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                            grpc_closure* closure) {
-  g_event_engine->fd_notify_on_read(exec_ctx, fd, closure);
+void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
+  g_event_engine->fd_notify_on_read(fd, closure);
 }
 
-void grpc_fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                             grpc_closure* closure) {
-  g_event_engine->fd_notify_on_write(exec_ctx, fd, closure);
+void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
+  g_event_engine->fd_notify_on_write(fd, closure);
 }
 
 size_t grpc_pollset_size(void) { return g_event_engine->pollset_size; }
@@ -217,72 +211,63 @@
   g_event_engine->pollset_init(pollset, mu);
 }
 
-void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_closure* closure) {
-  g_event_engine->pollset_shutdown(exec_ctx, pollset, closure);
+void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
+  g_event_engine->pollset_shutdown(pollset, closure);
 }
 
-void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
-  g_event_engine->pollset_destroy(exec_ctx, pollset);
+void grpc_pollset_destroy(grpc_pollset* pollset) {
+  g_event_engine->pollset_destroy(pollset);
 }
 
-grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_pollset* pollset,
                               grpc_pollset_worker** worker,
                               grpc_millis deadline) {
-  return g_event_engine->pollset_work(exec_ctx, pollset, worker, deadline);
+  return g_event_engine->pollset_work(pollset, worker, deadline);
 }
 
-grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
                               grpc_pollset_worker* specific_worker) {
-  return g_event_engine->pollset_kick(exec_ctx, pollset, specific_worker);
+  return g_event_engine->pollset_kick(pollset, specific_worker);
 }
 
-void grpc_pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                         struct grpc_fd* fd) {
-  g_event_engine->pollset_add_fd(exec_ctx, pollset, fd);
+void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd) {
+  g_event_engine->pollset_add_fd(pollset, fd);
 }
 
 grpc_pollset_set* grpc_pollset_set_create(void) {
   return g_event_engine->pollset_set_create();
 }
 
-void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                              grpc_pollset_set* pollset_set) {
-  g_event_engine->pollset_set_destroy(exec_ctx, pollset_set);
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {
+  g_event_engine->pollset_set_destroy(pollset_set);
 }
 
-void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset) {
-  g_event_engine->pollset_set_add_pollset(exec_ctx, pollset_set, pollset);
+  g_event_engine->pollset_set_add_pollset(pollset_set, pollset);
 }
 
-void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset) {
-  g_event_engine->pollset_set_del_pollset(exec_ctx, pollset_set, pollset);
+  g_event_engine->pollset_set_del_pollset(pollset_set, pollset);
 }
 
-void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item) {
-  g_event_engine->pollset_set_add_pollset_set(exec_ctx, bag, item);
+  g_event_engine->pollset_set_add_pollset_set(bag, item);
 }
 
-void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item) {
-  g_event_engine->pollset_set_del_pollset_set(exec_ctx, bag, item);
+  g_event_engine->pollset_set_del_pollset_set(bag, item);
 }
 
-void grpc_pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
-                             grpc_pollset_set* pollset_set, grpc_fd* fd) {
-  g_event_engine->pollset_set_add_fd(exec_ctx, pollset_set, fd);
+void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
+  g_event_engine->pollset_set_add_fd(pollset_set, fd);
 }
 
-void grpc_pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
-                             grpc_pollset_set* pollset_set, grpc_fd* fd) {
-  g_event_engine->pollset_set_del_fd(exec_ctx, pollset_set, fd);
+void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd) {
+  g_event_engine->pollset_set_del_fd(pollset_set, fd);
 }
 
 #endif  // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index 8f45d2e..62f1162 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -27,10 +27,6 @@
 #include "src/core/lib/iomgr/pollset_set.h"
 #include "src/core/lib/iomgr/wakeup_fd_posix.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern grpc_core::TraceFlag grpc_polling_trace; /* Disabled by default */
 
 typedef struct grpc_fd grpc_fd;
@@ -40,48 +36,36 @@
 
   grpc_fd* (*fd_create)(int fd, const char* name);
   int (*fd_wrapped_fd)(grpc_fd* fd);
-  void (*fd_orphan)(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
-                    int* release_fd, bool already_closed, const char* reason);
-  void (*fd_shutdown)(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why);
-  void (*fd_notify_on_read)(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                            grpc_closure* closure);
-  void (*fd_notify_on_write)(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                             grpc_closure* closure);
+  void (*fd_orphan)(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+                    bool already_closed, const char* reason);
+  void (*fd_shutdown)(grpc_fd* fd, grpc_error* why);
+  void (*fd_notify_on_read)(grpc_fd* fd, grpc_closure* closure);
+  void (*fd_notify_on_write)(grpc_fd* fd, grpc_closure* closure);
   bool (*fd_is_shutdown)(grpc_fd* fd);
-  grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_exec_ctx* exec_ctx,
-                                                grpc_fd* fd);
+  grpc_pollset* (*fd_get_read_notifier_pollset)(grpc_fd* fd);
 
   void (*pollset_init)(grpc_pollset* pollset, gpr_mu** mu);
-  void (*pollset_shutdown)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_closure* closure);
-  void (*pollset_destroy)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
-  grpc_error* (*pollset_work)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+  void (*pollset_shutdown)(grpc_pollset* pollset, grpc_closure* closure);
+  void (*pollset_destroy)(grpc_pollset* pollset);
+  grpc_error* (*pollset_work)(grpc_pollset* pollset,
                               grpc_pollset_worker** worker,
                               grpc_millis deadline);
-  grpc_error* (*pollset_kick)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+  grpc_error* (*pollset_kick)(grpc_pollset* pollset,
                               grpc_pollset_worker* specific_worker);
-  void (*pollset_add_fd)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                         struct grpc_fd* fd);
+  void (*pollset_add_fd)(grpc_pollset* pollset, struct grpc_fd* fd);
 
   grpc_pollset_set* (*pollset_set_create)(void);
-  void (*pollset_set_destroy)(grpc_exec_ctx* exec_ctx,
-                              grpc_pollset_set* pollset_set);
-  void (*pollset_set_add_pollset)(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+  void (*pollset_set_destroy)(grpc_pollset_set* pollset_set);
+  void (*pollset_set_add_pollset)(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset);
-  void (*pollset_set_del_pollset)(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+  void (*pollset_set_del_pollset)(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset);
-  void (*pollset_set_add_pollset_set)(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+  void (*pollset_set_add_pollset_set)(grpc_pollset_set* bag,
                                       grpc_pollset_set* item);
-  void (*pollset_set_del_pollset_set)(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+  void (*pollset_set_del_pollset_set)(grpc_pollset_set* bag,
                                       grpc_pollset_set* item);
-  void (*pollset_set_add_fd)(grpc_exec_ctx* exec_ctx,
-                             grpc_pollset_set* pollset_set, grpc_fd* fd);
-  void (*pollset_set_del_fd)(grpc_exec_ctx* exec_ctx,
-                             grpc_pollset_set* pollset_set, grpc_fd* fd);
+  void (*pollset_set_add_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
+  void (*pollset_set_del_fd)(grpc_pollset_set* pollset_set, grpc_fd* fd);
 
   void (*shutdown_engine)(void);
 } grpc_event_engine_vtable;
@@ -107,14 +91,14 @@
    Requires: *fd initialized; no outstanding notify_on_read or
    notify_on_write.
    MUST NOT be called with a pollset lock taken */
-void grpc_fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_closure* on_done,
-                    int* release_fd, bool already_closed, const char* reason);
+void grpc_fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
+                    bool already_closed, const char* reason);
 
 /* Has grpc_fd_shutdown been called on an fd? */
 bool grpc_fd_is_shutdown(grpc_fd* fd);
 
 /* Cause any current and future callbacks to fail. */
-void grpc_fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why);
+void grpc_fd_shutdown(grpc_fd* fd, grpc_error* why);
 
 /* Register read interest, causing read_cb to be called once when fd becomes
    readable, on deadline specified by deadline, or on shutdown triggered by
@@ -129,29 +113,23 @@
    underlying platform. This means that users must drain fd in read_cb before
    calling notify_on_read again. Users are also expected to handle spurious
    events, i.e read_cb is called while nothing can be readable from fd  */
-void grpc_fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                            grpc_closure* closure);
+void grpc_fd_notify_on_read(grpc_fd* fd, grpc_closure* closure);
 
 /* Exactly the same semantics as above, except based on writable events.  */
-void grpc_fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                             grpc_closure* closure);
+void grpc_fd_notify_on_write(grpc_fd* fd, grpc_closure* closure);
 
 /* Return the read notifier pollset from the fd */
-grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
-                                                grpc_fd* fd);
+grpc_pollset* grpc_fd_get_read_notifier_pollset(grpc_fd* fd);
 
 /* pollset_posix functions */
 
 /* Add an fd to a pollset */
-void grpc_pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                         struct grpc_fd* fd);
+void grpc_pollset_add_fd(grpc_pollset* pollset, struct grpc_fd* fd);
 
 /* pollset_set_posix functions */
 
-void grpc_pollset_set_add_fd(grpc_exec_ctx* exec_ctx,
-                             grpc_pollset_set* pollset_set, grpc_fd* fd);
-void grpc_pollset_set_del_fd(grpc_exec_ctx* exec_ctx,
-                             grpc_pollset_set* pollset_set, grpc_fd* fd);
+void grpc_pollset_set_add_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
+void grpc_pollset_set_del_fd(grpc_pollset_set* pollset_set, grpc_fd* fd);
 
 /* override to allow tests to hook poll() usage */
 typedef int (*grpc_poll_function_type)(struct pollfd*, nfds_t, int);
@@ -162,8 +140,4 @@
 void grpc_set_event_engine_test_only(const grpc_event_engine_vtable*);
 const grpc_event_engine_vtable* grpc_get_event_engine_test_only();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_EV_POSIX_H */
diff --git a/src/core/lib/iomgr/exec_ctx.cc b/src/core/lib/iomgr/exec_ctx.cc
index 1777456..e005437 100644
--- a/src/core/lib/iomgr/exec_ctx.cc
+++ b/src/core/lib/iomgr/exec_ctx.cc
@@ -25,39 +25,7 @@
 #include "src/core/lib/iomgr/combiner.h"
 #include "src/core/lib/profiling/timers.h"
 
-bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx* exec_ctx) {
-  if ((exec_ctx->flags & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) {
-    if (exec_ctx->check_ready_to_finish(exec_ctx,
-                                        exec_ctx->check_ready_to_finish_arg)) {
-      exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
-      return true;
-    }
-    return false;
-  } else {
-    return true;
-  }
-}
-
-bool grpc_never_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored) {
-  return false;
-}
-
-bool grpc_always_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored) {
-  return true;
-}
-
-bool grpc_exec_ctx_has_work(grpc_exec_ctx* exec_ctx) {
-  return exec_ctx->active_combiner != nullptr ||
-         !grpc_closure_list_empty(exec_ctx->closure_list);
-}
-
-void grpc_exec_ctx_finish(grpc_exec_ctx* exec_ctx) {
-  exec_ctx->flags |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
-  grpc_exec_ctx_flush(exec_ctx);
-}
-
-static void exec_ctx_run(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                         grpc_error* error) {
+static void exec_ctx_run(grpc_closure* closure, grpc_error* error) {
 #ifndef NDEBUG
   closure->scheduled = false;
   if (grpc_trace_closure.enabled()) {
@@ -67,7 +35,7 @@
             closure->line_initiated);
   }
 #endif
-  closure->cb(exec_ctx, closure->cb_arg, error);
+  closure->cb(closure->cb_arg, error);
 #ifndef NDEBUG
   if (grpc_trace_closure.enabled()) {
     gpr_log(GPR_DEBUG, "closure %p finished", closure);
@@ -76,42 +44,13 @@
   GRPC_ERROR_UNREF(error);
 }
 
-bool grpc_exec_ctx_flush(grpc_exec_ctx* exec_ctx) {
-  bool did_something = 0;
-  GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
-  for (;;) {
-    if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
-      grpc_closure* c = exec_ctx->closure_list.head;
-      exec_ctx->closure_list.head = exec_ctx->closure_list.tail = nullptr;
-      while (c != nullptr) {
-        grpc_closure* next = c->next_data.next;
-        grpc_error* error = c->error_data.error;
-        did_something = true;
-        exec_ctx_run(exec_ctx, c, error);
-        c = next;
-      }
-    } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
-      break;
-    }
-  }
-  GPR_ASSERT(exec_ctx->active_combiner == nullptr);
-  GPR_TIMER_END("grpc_exec_ctx_flush", 0);
-  return did_something;
-}
-
-static void exec_ctx_sched(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                           grpc_error* error) {
-  grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+static void exec_ctx_sched(grpc_closure* closure, grpc_error* error) {
+  grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), closure,
+                           error);
 }
 
 static gpr_timespec g_start_time;
 
-void grpc_exec_ctx_global_init(void) {
-  g_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
-}
-
-void grpc_exec_ctx_global_shutdown(void) {}
-
 static gpr_atm timespec_to_atm_round_down(gpr_timespec ts) {
   ts = gpr_time_sub(ts, g_start_time);
   double x =
@@ -131,18 +70,6 @@
   return (gpr_atm)x;
 }
 
-grpc_millis grpc_exec_ctx_now(grpc_exec_ctx* exec_ctx) {
-  if (!exec_ctx->now_is_valid) {
-    exec_ctx->now = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
-    exec_ctx->now_is_valid = true;
-  }
-  return exec_ctx->now;
-}
-
-void grpc_exec_ctx_invalidate_now(grpc_exec_ctx* exec_ctx) {
-  exec_ctx->now_is_valid = false;
-}
-
 gpr_timespec grpc_millis_to_timespec(grpc_millis millis,
                                      gpr_clock_type clock_type) {
   // special-case infinities as grpc_millis can be 32bit on some platforms
@@ -175,3 +102,44 @@
     exec_ctx_run, exec_ctx_sched, "exec_ctx"};
 static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
 grpc_closure_scheduler* grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
+
+namespace grpc_core {
+GPR_TLS_CLASS_DEF(ExecCtx::exec_ctx_);
+
+void ExecCtx::GlobalInit(void) {
+  g_start_time = gpr_now(GPR_CLOCK_MONOTONIC);
+  gpr_tls_init(&exec_ctx_);
+}
+
+bool ExecCtx::Flush() {
+  bool did_something = 0;
+  GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
+  for (;;) {
+    if (!grpc_closure_list_empty(closure_list_)) {
+      grpc_closure* c = closure_list_.head;
+      closure_list_.head = closure_list_.tail = nullptr;
+      while (c != nullptr) {
+        grpc_closure* next = c->next_data.next;
+        grpc_error* error = c->error_data.error;
+        did_something = true;
+        exec_ctx_run(c, error);
+        c = next;
+      }
+    } else if (!grpc_combiner_continue_exec_ctx()) {
+      break;
+    }
+  }
+  GPR_ASSERT(combiner_data_.active_combiner == nullptr);
+  GPR_TIMER_END("grpc_exec_ctx_flush", 0);
+  return did_something;
+}
+
+grpc_millis ExecCtx::Now() {
+  if (!now_is_valid_) {
+    now_ = timespec_to_atm_round_down(gpr_now(GPR_CLOCK_MONOTONIC));
+    now_is_valid_ = true;
+  }
+  return now_;
+}
+
+}  // namespace grpc_core
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index bd27506..8f8d518 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -21,13 +21,11 @@
 
 #include <grpc/support/atm.h>
 #include <grpc/support/cpu.h>
+#include <grpc/support/log.h>
+#include <grpc/support/tls.h>
 
 #include "src/core/lib/iomgr/closure.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef gpr_atm grpc_millis;
 
 #define GRPC_MILLIS_INF_FUTURE GPR_ATM_MAX
@@ -45,6 +43,13 @@
    should be given to not delete said call/channel from this exec_ctx */
 #define GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP 2
 
+extern grpc_closure_scheduler* grpc_schedule_on_exec_ctx;
+
+gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
+grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
+grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
+
+namespace grpc_core {
 /** Execution context.
  *  A bag of data that collects information along a callstack.
  *  Generally created at public API entry points, and passed down as
@@ -65,67 +70,133 @@
  *  - Instances are always passed as the first argument to a function that
  *    takes it, and always as a pointer (grpc_exec_ctx is never copied).
  */
-struct grpc_exec_ctx {
-  grpc_closure_list closure_list;
-  /** currently active combiner: updated only via combiner.c */
-  grpc_combiner* active_combiner;
-  /** last active combiner in the active combiner list */
-  grpc_combiner* last_combiner;
-  uintptr_t flags;
-  unsigned starting_cpu;
-  void* check_ready_to_finish_arg;
-  bool (*check_ready_to_finish)(grpc_exec_ctx* exec_ctx, void* arg);
+class ExecCtx {
+ public:
+  /** Default Constructor */
 
-  bool now_is_valid;
-  grpc_millis now;
-};
+  ExecCtx() : flags_(GRPC_EXEC_CTX_FLAG_IS_FINISHED) { Set(this); }
 
-/* initializer for grpc_exec_ctx:
-   prefer to use GRPC_EXEC_CTX_INIT whenever possible */
-#define GRPC_EXEC_CTX_INITIALIZER(flags, finish_check, finish_check_arg) \
-  {                                                                      \
-    GRPC_CLOSURE_LIST_INIT, NULL, NULL, flags, gpr_cpu_current_cpu(),    \
-        finish_check_arg, finish_check, false, 0                         \
+  /** Parameterised Constructor */
+  ExecCtx(uintptr_t fl) : flags_(fl) { Set(this); }
+
+  /** Destructor */
+  virtual ~ExecCtx() {
+    flags_ |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
+    Flush();
+    Set(last_exec_ctx_);
   }
 
-/* initialize an execution context at the top level of an API call into grpc
-   (this is safe to use elsewhere, though possibly not as efficient) */
-#define GRPC_EXEC_CTX_INIT \
-  GRPC_EXEC_CTX_INITIALIZER(GRPC_EXEC_CTX_FLAG_IS_FINISHED, NULL, NULL)
+  /** Disallow copy and assignment operators */
+  ExecCtx(const ExecCtx&) = delete;
+  ExecCtx& operator=(const ExecCtx&) = delete;
 
-extern grpc_closure_scheduler* grpc_schedule_on_exec_ctx;
+  /** Return starting_cpu */
+  unsigned starting_cpu() const { return starting_cpu_; }
 
-bool grpc_exec_ctx_has_work(grpc_exec_ctx* exec_ctx);
+  struct CombinerData {
+    /* currently active combiner: updated only via combiner.c */
+    grpc_combiner* active_combiner;
+    /* last active combiner in the active combiner list */
+    grpc_combiner* last_combiner;
+  };
 
-/** Flush any work that has been enqueued onto this grpc_exec_ctx.
- *  Caller must guarantee that no interfering locks are held.
- *  Returns true if work was performed, false otherwise. */
-bool grpc_exec_ctx_flush(grpc_exec_ctx* exec_ctx);
-/** Finish any pending work for a grpc_exec_ctx. Must be called before
- *  the instance is destroyed, or work may be lost. */
-void grpc_exec_ctx_finish(grpc_exec_ctx* exec_ctx);
-/** Returns true if we'd like to leave this execution context as soon as
-    possible: useful for deciding whether to do something more or not depending
-    on outside context */
-bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx* exec_ctx);
-/** A finish check that is never ready to finish */
-bool grpc_never_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored);
-/** A finish check that is always ready to finish */
-bool grpc_always_ready_to_finish(grpc_exec_ctx* exec_ctx, void* arg_ignored);
+  /** Only to be used by grpc-combiner code */
+  CombinerData* combiner_data() { return &combiner_data_; }
 
-void grpc_exec_ctx_global_init(void);
+  /** Return pointer to grpc_closure_list */
+  grpc_closure_list* closure_list() { return &closure_list_; }
 
-void grpc_exec_ctx_global_init(void);
-void grpc_exec_ctx_global_shutdown(void);
+  /** Return flags */
+  uintptr_t flags() { return flags_; }
 
-grpc_millis grpc_exec_ctx_now(grpc_exec_ctx* exec_ctx);
-void grpc_exec_ctx_invalidate_now(grpc_exec_ctx* exec_ctx);
-gpr_timespec grpc_millis_to_timespec(grpc_millis millis, gpr_clock_type clock);
-grpc_millis grpc_timespec_to_millis_round_down(gpr_timespec timespec);
-grpc_millis grpc_timespec_to_millis_round_up(gpr_timespec timespec);
+  /** Checks if there is work to be done */
+  bool HasWork() {
+    return combiner_data_.active_combiner != NULL ||
+           !grpc_closure_list_empty(closure_list_);
+  }
 
-#ifdef __cplusplus
-}
-#endif
+  /** Flush any work that has been enqueued onto this grpc_exec_ctx.
+   *  Caller must guarantee that no interfering locks are held.
+   *  Returns true if work was performed, false otherwise. */
+  bool Flush();
+
+  /** Returns true if we'd like to leave this execution context as soon as
+possible: useful for deciding whether to do something more or not depending
+on outside context */
+  bool IsReadyToFinish() {
+    if ((flags_ & GRPC_EXEC_CTX_FLAG_IS_FINISHED) == 0) {
+      if (CheckReadyToFinish()) {
+        flags_ |= GRPC_EXEC_CTX_FLAG_IS_FINISHED;
+        return true;
+      }
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  /** Returns the stored current time relative to start if valid,
+   * otherwise refreshes the stored time, sets it valid and returns the new
+   * value */
+  grpc_millis Now();
+
+  /** Invalidates the stored time value. A new time value will be set on calling
+   * Now() */
+  void InvalidateNow() { now_is_valid_ = false; }
+
+  /** To be used only by shutdown code in iomgr */
+  void SetNowIomgrShutdown() {
+    now_ = GRPC_MILLIS_INF_FUTURE;
+    now_is_valid_ = true;
+  }
+
+  /** To be used only for testing.
+   * Sets the now value
+   */
+  void TestOnlySetNow(grpc_millis new_val) {
+    now_ = new_val;
+    now_is_valid_ = true;
+  }
+
+  /** Finish any pending work for a grpc_exec_ctx. Must be called before
+   *  the instance is destroyed, or work may be lost. */
+  void Finish();
+
+  /** Global initialization for ExecCtx. Called by iomgr */
+  static void GlobalInit(void);
+
+  /** Global shutdown for ExecCtx. Called by iomgr */
+  static void GlobalShutdown(void) { gpr_tls_destroy(&exec_ctx_); }
+
+  /** Gets pointer to current exec_ctx */
+  static ExecCtx* Get() {
+    return reinterpret_cast<ExecCtx*>(gpr_tls_get(&exec_ctx_));
+  }
+
+ protected:
+  /** Check if ready to finish */
+  virtual bool CheckReadyToFinish() { return false; }
+
+  /** Disallow delete on ExecCtx */
+  static void operator delete(void* p) { abort(); }
+
+ private:
+  /** Set exec_ctx_ to exec_ctx */
+  void Set(ExecCtx* exec_ctx) {
+    gpr_tls_set(&exec_ctx_, reinterpret_cast<intptr_t>(exec_ctx));
+  }
+
+  grpc_closure_list closure_list_ = GRPC_CLOSURE_LIST_INIT;
+  CombinerData combiner_data_ = {nullptr, nullptr};
+  uintptr_t flags_;
+  unsigned starting_cpu_ = gpr_cpu_current_cpu();
+
+  bool now_is_valid_ = false;
+  grpc_millis now_ = 0;
+
+  GPR_TLS_CLASS_DECL(exec_ctx_);
+  ExecCtx* last_exec_ctx_ = Get();
+};
+}  // namespace grpc_core
 
 #endif /* GRPC_CORE_LIB_IOMGR_EXEC_CTX_H */
diff --git a/src/core/lib/iomgr/executor.cc b/src/core/lib/iomgr/executor.cc
index d8a195f..67a0412 100644
--- a/src/core/lib/iomgr/executor.cc
+++ b/src/core/lib/iomgr/executor.cc
@@ -55,7 +55,7 @@
 
 static void executor_thread(void* arg);
 
-static size_t run_closures(grpc_exec_ctx* exec_ctx, grpc_closure_list list) {
+static size_t run_closures(grpc_closure_list list) {
   size_t n = 0;
 
   grpc_closure* c = list.head;
@@ -73,11 +73,11 @@
 #ifndef NDEBUG
     c->scheduled = false;
 #endif
-    c->cb(exec_ctx, c->cb_arg, error);
+    c->cb(c->cb_arg, error);
     GRPC_ERROR_UNREF(error);
     c = next;
     n++;
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   return n;
@@ -87,7 +87,7 @@
   return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
 }
 
-void grpc_executor_set_threading(grpc_exec_ctx* exec_ctx, bool threading) {
+void grpc_executor_set_threading(bool threading) {
   gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
   if (threading) {
     if (cur_threads > 0) return;
@@ -104,8 +104,8 @@
 
     gpr_thd_options opt = gpr_thd_options_default();
     gpr_thd_options_set_joinable(&opt);
-    gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
-                &opt);
+    gpr_thd_new(&g_thread_state[0].id, "grpc_executor", executor_thread,
+                &g_thread_state[0], &opt);
   } else {
     if (cur_threads == 0) return;
     for (size_t i = 0; i < g_max_threads; i++) {
@@ -125,28 +125,25 @@
     for (size_t i = 0; i < g_max_threads; i++) {
       gpr_mu_destroy(&g_thread_state[i].mu);
       gpr_cv_destroy(&g_thread_state[i].cv);
-      run_closures(exec_ctx, g_thread_state[i].elems);
+      run_closures(g_thread_state[i].elems);
     }
     gpr_free(g_thread_state);
     gpr_tls_destroy(&g_this_thread_state);
   }
 }
 
-void grpc_executor_init(grpc_exec_ctx* exec_ctx) {
+void grpc_executor_init() {
   gpr_atm_no_barrier_store(&g_cur_threads, 0);
-  grpc_executor_set_threading(exec_ctx, true);
+  grpc_executor_set_threading(true);
 }
 
-void grpc_executor_shutdown(grpc_exec_ctx* exec_ctx) {
-  grpc_executor_set_threading(exec_ctx, false);
-}
+void grpc_executor_shutdown() { grpc_executor_set_threading(false); }
 
 static void executor_thread(void* arg) {
   thread_state* ts = (thread_state*)arg;
   gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
 
-  grpc_exec_ctx exec_ctx =
-      GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, nullptr);
+  grpc_core::ExecCtx exec_ctx(0);
 
   size_t subtract_depth = 0;
   for (;;) {
@@ -158,7 +155,7 @@
     ts->depth -= subtract_depth;
     while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
       ts->queued_long_job = false;
-      gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+      gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
     }
     if (ts->shutdown) {
       if (executor_trace.enabled()) {
@@ -168,7 +165,7 @@
       gpr_mu_unlock(&ts->mu);
       break;
     }
-    GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
+    GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED();
     grpc_closure_list exec = ts->elems;
     ts->elems = GRPC_CLOSURE_LIST_INIT;
     gpr_mu_unlock(&ts->mu);
@@ -176,19 +173,18 @@
       gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
     }
 
-    grpc_exec_ctx_invalidate_now(&exec_ctx);
-    subtract_depth = run_closures(&exec_ctx, exec);
+    grpc_core::ExecCtx::Get()->InvalidateNow();
+    subtract_depth = run_closures(exec);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void executor_push(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                          grpc_error* error, bool is_short) {
+static void executor_push(grpc_closure* closure, grpc_error* error,
+                          bool is_short) {
   bool retry_push;
   if (is_short) {
-    GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
+    GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS();
   } else {
-    GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
+    GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS();
   }
   do {
     retry_push = false;
@@ -202,14 +198,16 @@
         gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
 #endif
       }
-      grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
+      grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(),
+                               closure, error);
       return;
     }
     thread_state* ts = (thread_state*)gpr_tls_get(&g_this_thread_state);
     if (ts == nullptr) {
-      ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
+      ts = &g_thread_state[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(),
+                                            cur_thread_count)];
     } else {
-      GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
+      GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF();
     }
     thread_state* orig_ts = ts;
 
@@ -244,8 +242,8 @@
         }
         continue;
       }
-      if (grpc_closure_list_empty(ts->elems)) {
-        GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
+      if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
+        GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED();
         gpr_cv_signal(&ts->cv);
       }
       grpc_closure_list_append(&ts->elems, closure, error);
@@ -263,25 +261,23 @@
 
         gpr_thd_options opt = gpr_thd_options_default();
         gpr_thd_options_set_joinable(&opt);
-        gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
-                    &g_thread_state[cur_thread_count], &opt);
+        gpr_thd_new(&g_thread_state[cur_thread_count].id, "gpr_executor",
+                    executor_thread, &g_thread_state[cur_thread_count], &opt);
       }
       gpr_spinlock_unlock(&g_adding_thread_lock);
     }
     if (retry_push) {
-      GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
+      GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES();
     }
   } while (retry_push);
 }
 
-static void executor_push_short(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                                grpc_error* error) {
-  executor_push(exec_ctx, closure, error, true);
+static void executor_push_short(grpc_closure* closure, grpc_error* error) {
+  executor_push(closure, error, true);
 }
 
-static void executor_push_long(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                               grpc_error* error) {
-  executor_push(exec_ctx, closure, error, false);
+static void executor_push_long(grpc_closure* closure, grpc_error* error) {
+  executor_push(closure, error, false);
 }
 
 static const grpc_closure_scheduler_vtable executor_vtable_short = {
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index 8418ace..e16f11a 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/iomgr/closure.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_EXECUTOR_SHORT,
   GRPC_EXECUTOR_LONG
@@ -35,22 +31,18 @@
  * This mechanism is meant to outsource work (grpc_closure instances) to a
  * thread, for those cases where blocking isn't an option but there isn't a
  * non-blocking solution available. */
-void grpc_executor_init(grpc_exec_ctx* exec_ctx);
+void grpc_executor_init();
 
 grpc_closure_scheduler* grpc_executor_scheduler(grpc_executor_job_length);
 
 /** Shutdown the executor, running all pending work as part of the call */
-void grpc_executor_shutdown(grpc_exec_ctx* exec_ctx);
+void grpc_executor_shutdown();
 
 /** Is the executor multi-threaded? */
 bool grpc_executor_is_threaded();
 
 /* enable/disable threading - must be called after grpc_executor_init and before
    grpc_executor_shutdown */
-void grpc_executor_set_threading(grpc_exec_ctx* exec_ctx, bool enable);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_executor_set_threading(bool enable);
 
 #endif /* GRPC_CORE_LIB_IOMGR_EXECUTOR_H */
diff --git a/src/core/lib/iomgr/fork_posix.cc b/src/core/lib/iomgr/fork_posix.cc
index a55b3a3..cc13140 100644
--- a/src/core/lib/iomgr/fork_posix.cc
+++ b/src/core/lib/iomgr/fork_posix.cc
@@ -49,10 +49,10 @@
     return;
   }
   if (grpc_is_initialized()) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_timer_manager_set_threading(false);
-    grpc_executor_set_threading(&exec_ctx, false);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_executor_set_threading(false);
+    grpc_core::ExecCtx::Get()->Flush();
     if (!gpr_await_threads(
             gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                          gpr_time_from_seconds(3, GPR_TIMESPAN)))) {
@@ -64,24 +64,25 @@
 void grpc_postfork_parent() {
   if (grpc_is_initialized()) {
     grpc_timer_manager_set_threading(true);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_executor_set_threading(&exec_ctx, true);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_executor_set_threading(true);
   }
 }
 
 void grpc_postfork_child() {
   if (grpc_is_initialized()) {
     grpc_timer_manager_set_threading(true);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_executor_set_threading(&exec_ctx, true);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_executor_set_threading(true);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 }
 
 void grpc_fork_handlers_auto_register() {
   if (grpc_fork_support_enabled()) {
+#ifdef GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK
     pthread_atfork(grpc_prefork, grpc_postfork_parent, grpc_postfork_child);
+#endif  // GRPC_POSIX_FORK_ALLOW_PTHREAD_ATFORK
   }
 }
 
diff --git a/src/core/lib/iomgr/gethostname.h b/src/core/lib/iomgr/gethostname.h
index 2e65b5f..9f10b4a 100644
--- a/src/core/lib/iomgr/gethostname.h
+++ b/src/core/lib/iomgr/gethostname.h
@@ -19,16 +19,8 @@
 #ifndef GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H
 #define GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // Returns the hostname of the local machine.
 // Caller takes ownership of result.
 char* grpc_gethostname();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_GETHOSTNAME_H */
diff --git a/src/core/lib/iomgr/iocp_windows.cc b/src/core/lib/iomgr/iocp_windows.cc
index 6bbe566..0b6e682 100644
--- a/src/core/lib/iomgr/iocp_windows.cc
+++ b/src/core/lib/iomgr/iocp_windows.cc
@@ -42,20 +42,18 @@
 
 static HANDLE g_iocp;
 
-static DWORD deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                        grpc_millis deadline) {
+static DWORD deadline_to_millis_timeout(grpc_millis deadline) {
   if (deadline == GRPC_MILLIS_INF_FUTURE) {
     return INFINITE;
   }
-  grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+  grpc_millis now = grpc_core::ExecCtx::Get()->Now();
   if (deadline < now) return 0;
   grpc_millis timeout = deadline - now;
   if (timeout > std::numeric_limits<DWORD>::max()) return INFINITE;
   return static_cast<DWORD>(deadline - now);
 }
 
-grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx* exec_ctx,
-                                     grpc_millis deadline) {
+grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline) {
   BOOL success;
   DWORD bytes = 0;
   DWORD flags = 0;
@@ -63,11 +61,11 @@
   LPOVERLAPPED overlapped;
   grpc_winsocket* socket;
   grpc_winsocket_callback_info* info;
-  GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+  GRPC_STATS_INC_SYSCALL_POLL();
   success =
       GetQueuedCompletionStatus(g_iocp, &bytes, &completion_key, &overlapped,
-                                deadline_to_millis_timeout(exec_ctx, deadline));
-  grpc_exec_ctx_invalidate_now(exec_ctx);
+                                deadline_to_millis_timeout(deadline));
+  grpc_core::ExecCtx::Get()->InvalidateNow();
   if (success == 0 && overlapped == NULL) {
     return GRPC_IOCP_WORK_TIMEOUT;
   }
@@ -95,7 +93,7 @@
   info->bytes_transfered = bytes;
   info->wsa_error = success ? 0 : WSAGetLastError();
   GPR_ASSERT(overlapped == &info->overlapped);
-  grpc_socket_become_ready(exec_ctx, socket, info);
+  grpc_socket_become_ready(socket, info);
   return GRPC_IOCP_WORK_WORK;
 }
 
@@ -115,22 +113,22 @@
 }
 
 void grpc_iocp_flush(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_iocp_work_status work_status;
 
   do {
-    work_status = grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_PAST);
+    work_status = grpc_iocp_work(GRPC_MILLIS_INF_PAST);
   } while (work_status == GRPC_IOCP_WORK_KICK ||
-           grpc_exec_ctx_flush(&exec_ctx));
+           grpc_core::ExecCtx::Get()->Flush());
 }
 
 void grpc_iocp_shutdown(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (gpr_atm_acq_load(&g_custom_events)) {
-    grpc_iocp_work(&exec_ctx, GRPC_MILLIS_INF_FUTURE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_iocp_work(GRPC_MILLIS_INF_FUTURE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   GPR_ASSERT(CloseHandle(g_iocp));
 }
 
diff --git a/src/core/lib/iomgr/iocp_windows.h b/src/core/lib/iomgr/iocp_windows.h
index d112c50..75b0ff4 100644
--- a/src/core/lib/iomgr/iocp_windows.h
+++ b/src/core/lib/iomgr/iocp_windows.h
@@ -27,28 +27,19 @@
 
 #include "src/core/lib/iomgr/socket_windows.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_IOCP_WORK_WORK,
   GRPC_IOCP_WORK_TIMEOUT,
   GRPC_IOCP_WORK_KICK
 } grpc_iocp_work_status;
 
-grpc_iocp_work_status grpc_iocp_work(grpc_exec_ctx* exec_ctx,
-                                     grpc_millis deadline);
+grpc_iocp_work_status grpc_iocp_work(grpc_millis deadline);
 void grpc_iocp_init(void);
 void grpc_iocp_kick(void);
 void grpc_iocp_flush(void);
 void grpc_iocp_shutdown(void);
 void grpc_iocp_add_socket(grpc_winsocket*);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_IOCP_WINDOWS_H */
diff --git a/src/core/lib/iomgr/iomgr.cc b/src/core/lib/iomgr/iomgr.cc
index e077b35..70807c4 100644
--- a/src/core/lib/iomgr/iomgr.cc
+++ b/src/core/lib/iomgr/iomgr.cc
@@ -45,20 +45,20 @@
 static int g_shutdown;
 static grpc_iomgr_object g_root_object;
 
-void grpc_iomgr_init(grpc_exec_ctx* exec_ctx) {
+void grpc_iomgr_init() {
+  grpc_core::ExecCtx exec_ctx;
   g_shutdown = 0;
   gpr_mu_init(&g_mu);
   gpr_cv_init(&g_rcv);
-  grpc_exec_ctx_global_init();
-  grpc_executor_init(exec_ctx);
-  grpc_timer_list_init(exec_ctx);
+  grpc_executor_init();
+  grpc_timer_list_init();
   g_root_object.next = g_root_object.prev = &g_root_object;
   g_root_object.name = (char*)"root";
   grpc_network_status_init();
   grpc_iomgr_platform_init();
 }
 
-void grpc_iomgr_start(grpc_exec_ctx* exec_ctx) { grpc_timer_manager_init(); }
+void grpc_iomgr_start() { grpc_timer_manager_init(); }
 
 static size_t count_objects(void) {
   grpc_iomgr_object* obj;
@@ -76,75 +76,76 @@
   }
 }
 
-void grpc_iomgr_shutdown(grpc_exec_ctx* exec_ctx) {
+void grpc_iomgr_shutdown() {
   gpr_timespec shutdown_deadline = gpr_time_add(
       gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_seconds(10, GPR_TIMESPAN));
   gpr_timespec last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
 
-  grpc_timer_manager_shutdown();
-  grpc_iomgr_platform_flush();
-  grpc_executor_shutdown(exec_ctx);
+  {
+    grpc_timer_manager_shutdown();
+    grpc_iomgr_platform_flush();
+    grpc_executor_shutdown();
 
-  gpr_mu_lock(&g_mu);
-  g_shutdown = 1;
-  while (g_root_object.next != &g_root_object) {
-    if (gpr_time_cmp(
-            gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
-            gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
+    gpr_mu_lock(&g_mu);
+    g_shutdown = 1;
+    while (g_root_object.next != &g_root_object) {
+      if (gpr_time_cmp(
+              gpr_time_sub(gpr_now(GPR_CLOCK_REALTIME), last_warning_time),
+              gpr_time_from_seconds(1, GPR_TIMESPAN)) >= 0) {
+        if (g_root_object.next != &g_root_object) {
+          gpr_log(GPR_DEBUG,
+                  "Waiting for %" PRIuPTR " iomgr objects to be destroyed",
+                  count_objects());
+        }
+        last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
+      }
+      grpc_core::ExecCtx::Get()->SetNowIomgrShutdown();
+      if (grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED) {
+        gpr_mu_unlock(&g_mu);
+        grpc_core::ExecCtx::Get()->Flush();
+        grpc_iomgr_platform_flush();
+        gpr_mu_lock(&g_mu);
+        continue;
+      }
       if (g_root_object.next != &g_root_object) {
-        gpr_log(GPR_DEBUG,
-                "Waiting for %" PRIuPTR " iomgr objects to be destroyed",
-                count_objects());
-      }
-      last_warning_time = gpr_now(GPR_CLOCK_REALTIME);
-    }
-    exec_ctx->now_is_valid = true;
-    exec_ctx->now = GRPC_MILLIS_INF_FUTURE;
-    if (grpc_timer_check(exec_ctx, nullptr) == GRPC_TIMERS_FIRED) {
-      gpr_mu_unlock(&g_mu);
-      grpc_exec_ctx_flush(exec_ctx);
-      grpc_iomgr_platform_flush();
-      gpr_mu_lock(&g_mu);
-      continue;
-    }
-    if (g_root_object.next != &g_root_object) {
-      if (grpc_iomgr_abort_on_leaks()) {
-        gpr_log(GPR_DEBUG,
-                "Failed to free %" PRIuPTR
-                " iomgr objects before shutdown deadline: "
-                "memory leaks are likely",
-                count_objects());
-        dump_objects("LEAKED");
-        abort();
-      }
-      gpr_timespec short_deadline = gpr_time_add(
-          gpr_now(GPR_CLOCK_REALTIME), gpr_time_from_millis(100, GPR_TIMESPAN));
-      if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
-        if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) > 0) {
-          if (g_root_object.next != &g_root_object) {
-            gpr_log(GPR_DEBUG,
-                    "Failed to free %" PRIuPTR
-                    " iomgr objects before shutdown deadline: "
-                    "memory leaks are likely",
-                    count_objects());
-            dump_objects("LEAKED");
+        if (grpc_iomgr_abort_on_leaks()) {
+          gpr_log(GPR_DEBUG,
+                  "Failed to free %" PRIuPTR
+                  " iomgr objects before shutdown deadline: "
+                  "memory leaks are likely",
+                  count_objects());
+          dump_objects("LEAKED");
+          abort();
+        }
+        gpr_timespec short_deadline =
+            gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
+                         gpr_time_from_millis(100, GPR_TIMESPAN));
+        if (gpr_cv_wait(&g_rcv, &g_mu, short_deadline)) {
+          if (gpr_time_cmp(gpr_now(GPR_CLOCK_REALTIME), shutdown_deadline) >
+              0) {
+            if (g_root_object.next != &g_root_object) {
+              gpr_log(GPR_DEBUG,
+                      "Failed to free %" PRIuPTR
+                      " iomgr objects before shutdown deadline: "
+                      "memory leaks are likely",
+                      count_objects());
+              dump_objects("LEAKED");
+            }
+            break;
           }
-          break;
         }
       }
     }
+    gpr_mu_unlock(&g_mu);
+    grpc_timer_list_shutdown();
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  gpr_mu_unlock(&g_mu);
-
-  grpc_timer_list_shutdown(exec_ctx);
-  grpc_exec_ctx_flush(exec_ctx);
 
   /* ensure all threads have left g_mu */
   gpr_mu_lock(&g_mu);
   gpr_mu_unlock(&g_mu);
 
   grpc_iomgr_platform_shutdown();
-  grpc_exec_ctx_global_shutdown();
   grpc_network_status_shutdown();
   gpr_mu_destroy(&g_mu);
   gpr_cv_destroy(&g_rcv);
diff --git a/src/core/lib/iomgr/iomgr.h b/src/core/lib/iomgr/iomgr.h
index d1549c8..3f238c6 100644
--- a/src/core/lib/iomgr/iomgr.h
+++ b/src/core/lib/iomgr/iomgr.h
@@ -22,22 +22,14 @@
 #include <grpc/impl/codegen/exec_ctx_fwd.h>
 #include "src/core/lib/iomgr/port.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Initializes the iomgr. */
-void grpc_iomgr_init(grpc_exec_ctx* exec_ctx);
+void grpc_iomgr_init();
 
 /** Starts any background threads for iomgr. */
-void grpc_iomgr_start(grpc_exec_ctx* exec_ctx);
+void grpc_iomgr_start();
 
 /** Signals the intention to shutdown the iomgr. Expects to be able to flush
  * exec_ctx. */
-void grpc_iomgr_shutdown(grpc_exec_ctx* exec_ctx);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_iomgr_shutdown();
 
 #endif /* GRPC_CORE_LIB_IOMGR_IOMGR_H */
diff --git a/src/core/lib/iomgr/iomgr_internal.h b/src/core/lib/iomgr/iomgr_internal.h
index b818c68..20b3cb7 100644
--- a/src/core/lib/iomgr/iomgr_internal.h
+++ b/src/core/lib/iomgr/iomgr_internal.h
@@ -23,10 +23,6 @@
 
 #include "src/core/lib/iomgr/iomgr.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_iomgr_object {
   char* name;
   struct grpc_iomgr_object* next;
@@ -44,8 +40,4 @@
 
 bool grpc_iomgr_abort_on_leaks(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_IOMGR_INTERNAL_H */
diff --git a/src/core/lib/iomgr/iomgr_uv.cc b/src/core/lib/iomgr/iomgr_uv.cc
index b8a10f2..9614c2e 100644
--- a/src/core/lib/iomgr/iomgr_uv.cc
+++ b/src/core/lib/iomgr/iomgr_uv.cc
@@ -29,12 +29,11 @@
 gpr_thd_id g_init_thread;
 
 void grpc_iomgr_platform_init(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_pollset_global_init();
 
-  grpc_executor_set_threading(&exec_ctx, false);
+  grpc_executor_set_threading(false);
   g_init_thread = gpr_thd_currentid();
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 void grpc_iomgr_platform_flush(void) {}
 void grpc_iomgr_platform_shutdown(void) { grpc_pollset_global_shutdown(); }
diff --git a/src/core/lib/iomgr/iomgr_uv.h b/src/core/lib/iomgr/iomgr_uv.h
index bc42ca8..3b4daaa 100644
--- a/src/core/lib/iomgr/iomgr_uv.h
+++ b/src/core/lib/iomgr/iomgr_uv.h
@@ -23,18 +23,10 @@
 
 #include <grpc/support/thd.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* The thread ID of the thread on which grpc was initialized. Used to verify
  * that all calls into libuv are made on that same thread */
 extern gpr_thd_id g_init_thread;
 
-#ifdef __cplusplus
-}
-#endif
-
 #ifdef GRPC_UV_THREAD_CHECK
 #define GRPC_UV_ASSERT_SAME_THREAD() \
   GPR_ASSERT(gpr_thd_currentid() == g_init_thread)
diff --git a/src/core/lib/iomgr/load_file.h b/src/core/lib/iomgr/load_file.h
index 5b367c1..a733652 100644
--- a/src/core/lib/iomgr/load_file.h
+++ b/src/core/lib/iomgr/load_file.h
@@ -25,17 +25,9 @@
 
 #include "src/core/lib/iomgr/error.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Loads the content of a file into a slice. add_null_terminator will add
    a NULL terminator if non-zero. */
 grpc_error* grpc_load_file(const char* filename, int add_null_terminator,
                            grpc_slice* slice);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_LOAD_FILE_H */
diff --git a/src/core/lib/iomgr/lockfree_event.cc b/src/core/lib/iomgr/lockfree_event.cc
index f0e798e..7b194e3 100644
--- a/src/core/lib/iomgr/lockfree_event.cc
+++ b/src/core/lib/iomgr/lockfree_event.cc
@@ -85,7 +85,7 @@
                                    kShutdownBit /* shutdown, no error */));
 }
 
-void LockfreeEvent::NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
+void LockfreeEvent::NotifyOn(grpc_closure* closure) {
   while (true) {
     gpr_atm curr = gpr_atm_no_barrier_load(&state_);
     if (grpc_polling_trace.enabled()) {
@@ -118,7 +118,7 @@
            closure when transitioning out of CLOSURE_NO_READY state (i.e there
            is no other code that needs to 'happen-after' this) */
         if (gpr_atm_no_barrier_cas(&state_, kClosureReady, kClosureNotReady)) {
-          GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+          GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
           return; /* Successful. Return */
         }
 
@@ -131,7 +131,7 @@
            schedule the closure with the shutdown error */
         if ((curr & kShutdownBit) > 0) {
           grpc_error* shutdown_err = (grpc_error*)(curr & ~kShutdownBit);
-          GRPC_CLOSURE_SCHED(exec_ctx, closure,
+          GRPC_CLOSURE_SCHED(closure,
                              GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                                  "FD Shutdown", &shutdown_err, 1));
           return;
@@ -149,8 +149,7 @@
   GPR_UNREACHABLE_CODE(return );
 }
 
-bool LockfreeEvent::SetShutdown(grpc_exec_ctx* exec_ctx,
-                                grpc_error* shutdown_err) {
+bool LockfreeEvent::SetShutdown(grpc_error* shutdown_err) {
   gpr_atm new_state = (gpr_atm)shutdown_err | kShutdownBit;
 
   while (true) {
@@ -184,7 +183,7 @@
            happens-after on that edge), and a release to pair with anything
            loading the shutdown state. */
         if (gpr_atm_full_cas(&state_, curr, new_state)) {
-          GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr,
+          GRPC_CLOSURE_SCHED((grpc_closure*)curr,
                              GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                                  "FD Shutdown", &shutdown_err, 1));
           return true;
@@ -200,7 +199,7 @@
   GPR_UNREACHABLE_CODE(return false);
 }
 
-void LockfreeEvent::SetReady(grpc_exec_ctx* exec_ctx) {
+void LockfreeEvent::SetReady() {
   while (true) {
     gpr_atm curr = gpr_atm_no_barrier_load(&state_);
 
@@ -234,7 +233,7 @@
            spurious set_ready; release pairs with this or the acquire in
            notify_on (or set_shutdown) */
         else if (gpr_atm_full_cas(&state_, curr, kClosureNotReady)) {
-          GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)curr, GRPC_ERROR_NONE);
+          GRPC_CLOSURE_SCHED((grpc_closure*)curr, GRPC_ERROR_NONE);
           return;
         }
         /* else the state changed again (only possible by either a racing
diff --git a/src/core/lib/iomgr/lockfree_event.h b/src/core/lib/iomgr/lockfree_event.h
index aec67a3..3bd3fd7 100644
--- a/src/core/lib/iomgr/lockfree_event.h
+++ b/src/core/lib/iomgr/lockfree_event.h
@@ -44,9 +44,9 @@
     return (gpr_atm_no_barrier_load(&state_) & kShutdownBit) != 0;
   }
 
-  void NotifyOn(grpc_exec_ctx* exec_ctx, grpc_closure* closure);
-  bool SetShutdown(grpc_exec_ctx* exec_ctx, grpc_error* error);
-  void SetReady(grpc_exec_ctx* exec_ctx);
+  void NotifyOn(grpc_closure* closure);
+  bool SetShutdown(grpc_error* error);
+  void SetReady();
 
  private:
   enum State { kClosureNotReady = 0, kClosureReady = 2, kShutdownBit = 1 };
diff --git a/src/core/lib/iomgr/polling_entity.cc b/src/core/lib/iomgr/polling_entity.cc
index 0ee4ea1..126f6f4 100644
--- a/src/core/lib/iomgr/polling_entity.cc
+++ b/src/core/lib/iomgr/polling_entity.cc
@@ -56,32 +56,28 @@
   return pollent->tag == GRPC_POLLS_NONE;
 }
 
-void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
-                                            grpc_polling_entity* pollent,
+void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
                                             grpc_pollset_set* pss_dst) {
   if (pollent->tag == GRPC_POLLS_POLLSET) {
     GPR_ASSERT(pollent->pollent.pollset != nullptr);
-    grpc_pollset_set_add_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
+    grpc_pollset_set_add_pollset(pss_dst, pollent->pollent.pollset);
   } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
     GPR_ASSERT(pollent->pollent.pollset_set != nullptr);
-    grpc_pollset_set_add_pollset_set(exec_ctx, pss_dst,
-                                     pollent->pollent.pollset_set);
+    grpc_pollset_set_add_pollset_set(pss_dst, pollent->pollent.pollset_set);
   } else {
     gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag);
     abort();
   }
 }
 
-void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                              grpc_polling_entity* pollent,
+void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
                                               grpc_pollset_set* pss_dst) {
   if (pollent->tag == GRPC_POLLS_POLLSET) {
     GPR_ASSERT(pollent->pollent.pollset != nullptr);
-    grpc_pollset_set_del_pollset(exec_ctx, pss_dst, pollent->pollent.pollset);
+    grpc_pollset_set_del_pollset(pss_dst, pollent->pollent.pollset);
   } else if (pollent->tag == GRPC_POLLS_POLLSET_SET) {
     GPR_ASSERT(pollent->pollent.pollset_set != nullptr);
-    grpc_pollset_set_del_pollset_set(exec_ctx, pss_dst,
-                                     pollent->pollent.pollset_set);
+    grpc_pollset_set_del_pollset_set(pss_dst, pollent->pollent.pollset_set);
   } else {
     gpr_log(GPR_ERROR, "Invalid grpc_polling_entity tag '%d'", pollent->tag);
     abort();
diff --git a/src/core/lib/iomgr/polling_entity.h b/src/core/lib/iomgr/polling_entity.h
index 867e085..0102d32 100644
--- a/src/core/lib/iomgr/polling_entity.h
+++ b/src/core/lib/iomgr/polling_entity.h
@@ -22,10 +22,6 @@
 #include "src/core/lib/iomgr/pollset.h"
 #include "src/core/lib/iomgr/pollset_set.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum grpc_pollset_tag {
   GRPC_POLLS_NONE,
   GRPC_POLLS_POLLSET,
@@ -59,17 +55,12 @@
 
 /** Add the pollset or pollset_set in \a pollent to the destination pollset_set
  * \a * pss_dst */
-void grpc_polling_entity_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
-                                            grpc_polling_entity* pollent,
+void grpc_polling_entity_add_to_pollset_set(grpc_polling_entity* pollent,
                                             grpc_pollset_set* pss_dst);
 
 /** Delete the pollset or pollset_set in \a pollent from the destination
  * pollset_set \a * pss_dst */
-void grpc_polling_entity_del_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                              grpc_polling_entity* pollent,
+void grpc_polling_entity_del_from_pollset_set(grpc_polling_entity* pollent,
                                               grpc_pollset_set* pss_dst);
-#ifdef __cplusplus
-}
-#endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_POLLING_ENTITY_H */
diff --git a/src/core/lib/iomgr/pollset.h b/src/core/lib/iomgr/pollset.h
index 6911a8e..6bb3cd3 100644
--- a/src/core/lib/iomgr/pollset.h
+++ b/src/core/lib/iomgr/pollset.h
@@ -25,10 +25,6 @@
 
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_fd_refcount;
 
 /* A grpc_pollset is a set of file descriptors that a higher level item is
@@ -46,9 +42,8 @@
 void grpc_pollset_init(grpc_pollset* pollset, gpr_mu** mu);
 /* Begin shutting down the pollset, and call closure when done.
  * pollset's mutex must be held */
-void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_closure* closure);
-void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
+void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure);
+void grpc_pollset_destroy(grpc_pollset* pollset);
 
 /* Do some work on a pollset.
    May involve invoking asynchronous callbacks, or actually polling file
@@ -72,18 +67,14 @@
    May call grpc_closure_list_run on grpc_closure_list, without holding the
    pollset
    lock */
-grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_pollset* pollset,
                               grpc_pollset_worker** worker,
                               grpc_millis deadline) GRPC_MUST_USE_RESULT;
 
 /* Break one polling thread out of polling work for this pollset.
    If specific_worker is non-NULL, then kick that worker. */
-grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
                               grpc_pollset_worker* specific_worker)
     GRPC_MUST_USE_RESULT;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_H */
diff --git a/src/core/lib/iomgr/pollset_set.h b/src/core/lib/iomgr/pollset_set.h
index 0167a50..a94d0af 100644
--- a/src/core/lib/iomgr/pollset_set.h
+++ b/src/core/lib/iomgr/pollset_set.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/iomgr/pollset.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A grpc_pollset_set is a set of pollsets that are interested in an
    action. Adding a pollset to a pollset_set automatically adds any
    fd's (etc) that have been registered with the set_set to that pollset.
@@ -33,23 +29,14 @@
 typedef struct grpc_pollset_set grpc_pollset_set;
 
 grpc_pollset_set* grpc_pollset_set_create(void);
-void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                              grpc_pollset_set* pollset_set);
-void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set);
+void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset);
-void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset);
-void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item);
-void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_SET_H */
diff --git a/src/core/lib/iomgr/pollset_set_uv.cc b/src/core/lib/iomgr/pollset_set_uv.cc
index 90186ed..ac5dade 100644
--- a/src/core/lib/iomgr/pollset_set_uv.cc
+++ b/src/core/lib/iomgr/pollset_set_uv.cc
@@ -26,23 +26,18 @@
   return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
 }
 
-void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                              grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
 
-void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset) {}
 
-void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset) {}
 
-void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item) {}
 
-void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item) {}
 
 #endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/pollset_set_windows.cc b/src/core/lib/iomgr/pollset_set_windows.cc
index 2105a47..85edc9d 100644
--- a/src/core/lib/iomgr/pollset_set_windows.cc
+++ b/src/core/lib/iomgr/pollset_set_windows.cc
@@ -27,23 +27,18 @@
   return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
 }
 
-void grpc_pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                              grpc_pollset_set* pollset_set) {}
+void grpc_pollset_set_destroy(grpc_pollset_set* pollset_set) {}
 
-void grpc_pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_add_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset) {}
 
-void grpc_pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                  grpc_pollset_set* pollset_set,
+void grpc_pollset_set_del_pollset(grpc_pollset_set* pollset_set,
                                   grpc_pollset* pollset) {}
 
-void grpc_pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item) {}
 
-void grpc_pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_pollset_set* bag,
+void grpc_pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                       grpc_pollset_set* item) {}
 
 #endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/pollset_uv.cc b/src/core/lib/iomgr/pollset_uv.cc
index 16132f3..d9e5ad8 100644
--- a/src/core/lib/iomgr/pollset_uv.cc
+++ b/src/core/lib/iomgr/pollset_uv.cc
@@ -88,8 +88,7 @@
   pollset->shutting_down = 0;
 }
 
-void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_closure* closure) {
+void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   GPR_ASSERT(!pollset->shutting_down);
   GRPC_UV_ASSERT_SAME_THREAD();
   pollset->shutting_down = 1;
@@ -100,10 +99,10 @@
     // kick the loop once
     uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
 }
 
-void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+void grpc_pollset_destroy(grpc_pollset* pollset) {
   GRPC_UV_ASSERT_SAME_THREAD();
   uv_close((uv_handle_t*)pollset->timer, timer_close_cb);
   // timer.data is a boolean indicating that the timer has finished closing
@@ -115,14 +114,14 @@
   }
 }
 
-grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_pollset* pollset,
                               grpc_pollset_worker** worker_hdl,
                               grpc_millis deadline) {
   uint64_t timeout;
   GRPC_UV_ASSERT_SAME_THREAD();
   gpr_mu_unlock(&grpc_polling_mu);
   if (grpc_pollset_work_run_loop) {
-    grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+    grpc_millis now = grpc_core::ExecCtx::Get()->Now();
     if (deadline >= now) {
       timeout = deadline - now;
     } else {
@@ -140,14 +139,14 @@
       uv_run(uv_default_loop(), UV_RUN_NOWAIT);
     }
   }
-  if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
-    grpc_exec_ctx_flush(exec_ctx);
+  if (!grpc_closure_list_empty(*grpc_core::ExecCtx::Get()->closure_list())) {
+    grpc_core::ExecCtx::Get()->Flush();
   }
   gpr_mu_lock(&grpc_polling_mu);
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_kick(grpc_pollset* pollset,
                               grpc_pollset_worker* specific_worker) {
   GRPC_UV_ASSERT_SAME_THREAD();
   uv_timer_start(dummy_uv_handle, dummy_timer_cb, 0, 0);
diff --git a/src/core/lib/iomgr/pollset_uv.h b/src/core/lib/iomgr/pollset_uv.h
index 5cc9faf..566c110 100644
--- a/src/core/lib/iomgr/pollset_uv.h
+++ b/src/core/lib/iomgr/pollset_uv.h
@@ -19,17 +19,9 @@
 #ifndef GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
 #define GRPC_CORE_LIB_IOMGR_POLLSET_UV_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern int grpc_pollset_work_run_loop;
 
 void grpc_pollset_global_init(void);
 void grpc_pollset_global_shutdown(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_UV_H */
diff --git a/src/core/lib/iomgr/pollset_windows.cc b/src/core/lib/iomgr/pollset_windows.cc
index 95dd7d7..6ef949a 100644
--- a/src/core/lib/iomgr/pollset_windows.cc
+++ b/src/core/lib/iomgr/pollset_windows.cc
@@ -92,20 +92,19 @@
           &pollset->root_worker;
 }
 
-void grpc_pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_closure* closure) {
+void grpc_pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   pollset->shutting_down = 1;
-  grpc_pollset_kick(exec_ctx, pollset, GRPC_POLLSET_KICK_BROADCAST);
+  grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
   if (!pollset->is_iocp_worker) {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
   } else {
     pollset->on_shutdown = closure;
   }
 }
 
-void grpc_pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {}
+void grpc_pollset_destroy(grpc_pollset* pollset) {}
 
-grpc_error* grpc_pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+grpc_error* grpc_pollset_work(grpc_pollset* pollset,
                               grpc_pollset_worker** worker_hdl,
                               grpc_millis deadline) {
   grpc_pollset_worker worker;
@@ -126,8 +125,8 @@
       pollset->is_iocp_worker = 1;
       g_active_poller = &worker;
       gpr_mu_unlock(&grpc_polling_mu);
-      grpc_iocp_work(exec_ctx, deadline);
-      grpc_exec_ctx_flush(exec_ctx);
+      grpc_iocp_work(deadline);
+      grpc_core::ExecCtx::Get()->Flush();
       gpr_mu_lock(&grpc_polling_mu);
       pollset->is_iocp_worker = 0;
       g_active_poller = NULL;
@@ -145,7 +144,7 @@
       }
 
       if (pollset->shutting_down && pollset->on_shutdown != NULL) {
-        GRPC_CLOSURE_SCHED(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(pollset->on_shutdown, GRPC_ERROR_NONE);
         pollset->on_shutdown = NULL;
       }
       goto done;
@@ -158,18 +157,18 @@
     while (!worker.kicked) {
       if (gpr_cv_wait(&worker.cv, &grpc_polling_mu,
                       grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
-        grpc_exec_ctx_invalidate_now(exec_ctx);
+        grpc_core::ExecCtx::Get()->InvalidateNow();
         break;
       }
-      grpc_exec_ctx_invalidate_now(exec_ctx);
+      grpc_core::ExecCtx::Get()->InvalidateNow();
     }
   } else {
     pollset->kicked_without_pollers = 0;
   }
 done:
-  if (!grpc_closure_list_empty(exec_ctx->closure_list)) {
+  if (!grpc_closure_list_empty(*grpc_core::ExecCtx::Get()->closure_list())) {
     gpr_mu_unlock(&grpc_polling_mu);
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(&grpc_polling_mu);
   }
   if (added_worker) {
@@ -181,7 +180,7 @@
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
+grpc_error* grpc_pollset_kick(grpc_pollset* p,
                               grpc_pollset_worker* specific_worker) {
   if (specific_worker != NULL) {
     if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
@@ -209,7 +208,7 @@
     specific_worker =
         pop_front_worker(&p->root_worker, GRPC_POLLSET_WORKER_LINK_POLLSET);
     if (specific_worker != NULL) {
-      grpc_pollset_kick(exec_ctx, p, specific_worker);
+      grpc_pollset_kick(p, specific_worker);
     } else if (p->is_iocp_worker) {
       grpc_iocp_kick();
     } else {
diff --git a/src/core/lib/iomgr/pollset_windows.h b/src/core/lib/iomgr/pollset_windows.h
index f6da9da..93fe7d6 100644
--- a/src/core/lib/iomgr/pollset_windows.h
+++ b/src/core/lib/iomgr/pollset_windows.h
@@ -26,10 +26,6 @@
 #ifdef GRPC_WINSOCK_SOCKET
 #include "src/core/lib/iomgr/socket_windows.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* There isn't really any such thing as a pollset under Windows, due to the
    nature of the IO completion ports. A Windows "pollset" is merely a mutex
    used to synchronize with the IOCP, and workers are condition variables
@@ -67,10 +63,6 @@
 void grpc_pollset_global_init(void);
 void grpc_pollset_global_shutdown(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_POLLSET_WINDOWS_H */
diff --git a/src/core/lib/iomgr/resolve_address.h b/src/core/lib/iomgr/resolve_address.h
index 847e10f..12fc2ed 100644
--- a/src/core/lib/iomgr/resolve_address.h
+++ b/src/core/lib/iomgr/resolve_address.h
@@ -25,10 +25,6 @@
 
 #define GRPC_MAX_SOCKADDR_SIZE 128
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   char addr[GRPC_MAX_SOCKADDR_SIZE];
   size_t len;
@@ -42,8 +38,7 @@
 /* Asynchronously resolve addr. Use default_port if a port isn't designated
    in addr, otherwise use the port in addr. */
 /* TODO(ctiller): add a timeout here */
-extern void (*grpc_resolve_address)(grpc_exec_ctx* exec_ctx, const char* addr,
-                                    const char* default_port,
+extern void (*grpc_resolve_address)(const char* addr, const char* default_port,
                                     grpc_pollset_set* interested_parties,
                                     grpc_closure* on_done,
                                     grpc_resolved_addresses** addresses);
@@ -56,8 +51,4 @@
     const char* name, const char* default_port,
     grpc_resolved_addresses** addresses);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_RESOLVE_ADDRESS_H */
diff --git a/src/core/lib/iomgr/resolve_address_posix.cc b/src/core/lib/iomgr/resolve_address_posix.cc
index fb5fa9d..cc3d4fd 100644
--- a/src/core/lib/iomgr/resolve_address_posix.cc
+++ b/src/core/lib/iomgr/resolve_address_posix.cc
@@ -42,6 +42,7 @@
 static grpc_error* blocking_resolve_address_impl(
     const char* name, const char* default_port,
     grpc_resolved_addresses** addresses) {
+  grpc_core::ExecCtx exec_ctx;
   struct addrinfo hints;
   struct addrinfo *result = nullptr, *resp;
   char* host;
@@ -81,7 +82,7 @@
 
   GRPC_SCHEDULING_START_BLOCKING_REGION;
   s = getaddrinfo(host, port, &hints, &result);
-  GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
+  GRPC_SCHEDULING_END_BLOCKING_REGION;
 
   if (s != 0) {
     /* Retry if well-known service name is recognized */
@@ -90,7 +91,7 @@
       if (strcmp(port, svc[i][0]) == 0) {
         GRPC_SCHEDULING_START_BLOCKING_REGION;
         s = getaddrinfo(host, svc[i][1], &hints, &result);
-        GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
+        GRPC_SCHEDULING_END_BLOCKING_REGION;
         break;
       }
     }
@@ -152,12 +153,10 @@
 
 /* Callback to be passed to grpc_executor to asynch-ify
  * grpc_blocking_resolve_address */
-static void do_request_thread(grpc_exec_ctx* exec_ctx, void* rp,
-                              grpc_error* error) {
+static void do_request_thread(void* rp, grpc_error* error) {
   request* r = (request*)rp;
-  GRPC_CLOSURE_SCHED(
-      exec_ctx, r->on_done,
-      grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
+  GRPC_CLOSURE_SCHED(r->on_done, grpc_blocking_resolve_address(
+                                     r->name, r->default_port, r->addrs_out));
   gpr_free(r->name);
   gpr_free(r->default_port);
   gpr_free(r);
@@ -170,8 +169,7 @@
   gpr_free(addrs);
 }
 
-static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
-                                 const char* default_port,
+static void resolve_address_impl(const char* name, const char* default_port,
                                  grpc_pollset_set* interested_parties,
                                  grpc_closure* on_done,
                                  grpc_resolved_addresses** addrs) {
@@ -182,11 +180,11 @@
   r->default_port = gpr_strdup(default_port);
   r->on_done = on_done;
   r->addrs_out = addrs;
-  GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE);
 }
 
 void (*grpc_resolve_address)(
-    grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+    const char* name, const char* default_port,
     grpc_pollset_set* interested_parties, grpc_closure* on_done,
     grpc_resolved_addresses** addrs) = resolve_address_impl;
 
diff --git a/src/core/lib/iomgr/resolve_address_uv.cc b/src/core/lib/iomgr/resolve_address_uv.cc
index 6d09fd1..3eab04f 100644
--- a/src/core/lib/iomgr/resolve_address_uv.cc
+++ b/src/core/lib/iomgr/resolve_address_uv.cc
@@ -114,7 +114,7 @@
 static void getaddrinfo_callback(uv_getaddrinfo_t* req, int status,
                                  struct addrinfo* res) {
   request* r = (request*)req->data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_error* error;
   int retry_status;
   char* port = r->port;
@@ -130,8 +130,8 @@
   /* Either no retry was attempted, or the retry failed. Either way, the
      original error probably has more interesting information */
   error = handle_addrinfo_result(status, res, r->addresses);
-  GRPC_CLOSURE_SCHED(&exec_ctx, r->on_done, error);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CLOSURE_SCHED(r->on_done, error);
+
   gpr_free(r->hints);
   gpr_free(r->host);
   gpr_free(r->port);
@@ -224,8 +224,7 @@
   gpr_free(addrs);
 }
 
-static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
-                                 const char* default_port,
+static void resolve_address_impl(const char* name, const char* default_port,
                                  grpc_pollset_set* interested_parties,
                                  grpc_closure* on_done,
                                  grpc_resolved_addresses** addrs) {
@@ -239,7 +238,7 @@
   GRPC_UV_ASSERT_SAME_THREAD();
   err = try_split_host_port(name, default_port, &host, &port);
   if (err != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
+    GRPC_CLOSURE_SCHED(on_done, err);
     gpr_free(host);
     gpr_free(port);
     return;
@@ -268,7 +267,7 @@
     err = GRPC_ERROR_CREATE_FROM_STATIC_STRING("getaddrinfo failed");
     err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR,
                              grpc_slice_from_static_string(uv_strerror(s)));
-    GRPC_CLOSURE_SCHED(exec_ctx, on_done, err);
+    GRPC_CLOSURE_SCHED(on_done, err);
     gpr_free(r);
     gpr_free(req);
     gpr_free(hints);
@@ -278,7 +277,7 @@
 }
 
 void (*grpc_resolve_address)(
-    grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+    const char* name, const char* default_port,
     grpc_pollset_set* interested_parties, grpc_closure* on_done,
     grpc_resolved_addresses** addrs) = resolve_address_impl;
 
diff --git a/src/core/lib/iomgr/resolve_address_windows.cc b/src/core/lib/iomgr/resolve_address_windows.cc
index d9fc17a..ccb1dae 100644
--- a/src/core/lib/iomgr/resolve_address_windows.cc
+++ b/src/core/lib/iomgr/resolve_address_windows.cc
@@ -51,6 +51,7 @@
 static grpc_error* blocking_resolve_address_impl(
     const char* name, const char* default_port,
     grpc_resolved_addresses** addresses) {
+  grpc_core::ExecCtx exec_ctx;
   struct addrinfo hints;
   struct addrinfo *result = NULL, *resp;
   char* host;
@@ -87,7 +88,7 @@
 
   GRPC_SCHEDULING_START_BLOCKING_REGION;
   s = getaddrinfo(host, port, &hints, &result);
-  GRPC_SCHEDULING_END_BLOCKING_REGION_NO_EXEC_CTX;
+  GRPC_SCHEDULING_END_BLOCKING_REGION;
   if (s != 0) {
     error = GRPC_WSA_ERROR(WSAGetLastError(), "getaddrinfo");
     goto done;
@@ -132,8 +133,7 @@
 
 /* Callback to be passed to grpc_executor to asynch-ify
  * grpc_blocking_resolve_address */
-static void do_request_thread(grpc_exec_ctx* exec_ctx, void* rp,
-                              grpc_error* error) {
+static void do_request_thread(void* rp, grpc_error* error) {
   request* r = (request*)rp;
   if (error == GRPC_ERROR_NONE) {
     error =
@@ -141,7 +141,7 @@
   } else {
     GRPC_ERROR_REF(error);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, error);
+  GRPC_CLOSURE_SCHED(r->on_done, error);
   gpr_free(r->name);
   gpr_free(r->default_port);
   gpr_free(r);
@@ -154,8 +154,7 @@
   gpr_free(addrs);
 }
 
-static void resolve_address_impl(grpc_exec_ctx* exec_ctx, const char* name,
-                                 const char* default_port,
+static void resolve_address_impl(const char* name, const char* default_port,
                                  grpc_pollset_set* interested_parties,
                                  grpc_closure* on_done,
                                  grpc_resolved_addresses** addresses) {
@@ -166,11 +165,11 @@
   r->default_port = gpr_strdup(default_port);
   r->on_done = on_done;
   r->addresses = addresses;
-  GRPC_CLOSURE_SCHED(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(&r->request_closure, GRPC_ERROR_NONE);
 }
 
 void (*grpc_resolve_address)(
-    grpc_exec_ctx* exec_ctx, const char* name, const char* default_port,
+    const char* name, const char* default_port,
     grpc_pollset_set* interested_parties, grpc_closure* on_done,
     grpc_resolved_addresses** addresses) = resolve_address_impl;
 
diff --git a/src/core/lib/iomgr/resource_quota.cc b/src/core/lib/iomgr/resource_quota.cc
index ccd8d9f..eaf2f5d 100644
--- a/src/core/lib/iomgr/resource_quota.cc
+++ b/src/core/lib/iomgr/resource_quota.cc
@@ -154,8 +154,7 @@
   char* name;
 };
 
-static void ru_unref_by(grpc_exec_ctx* exec_ctx,
-                        grpc_resource_user* resource_user, gpr_atm amount);
+static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount);
 
 /*******************************************************************************
  * list management
@@ -239,35 +238,31 @@
  * resource quota state machine
  */
 
-static bool rq_alloc(grpc_exec_ctx* exec_ctx,
-                     grpc_resource_quota* resource_quota);
+static bool rq_alloc(grpc_resource_quota* resource_quota);
 static bool rq_reclaim_from_per_user_free_pool(
-    grpc_exec_ctx* exec_ctx, grpc_resource_quota* resource_quota);
-static bool rq_reclaim(grpc_exec_ctx* exec_ctx,
-                       grpc_resource_quota* resource_quota, bool destructive);
+    grpc_resource_quota* resource_quota);
+static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive);
 
-static void rq_step(grpc_exec_ctx* exec_ctx, void* rq, grpc_error* error) {
+static void rq_step(void* rq, grpc_error* error) {
   grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
   resource_quota->step_scheduled = false;
   do {
-    if (rq_alloc(exec_ctx, resource_quota)) goto done;
-  } while (rq_reclaim_from_per_user_free_pool(exec_ctx, resource_quota));
+    if (rq_alloc(resource_quota)) goto done;
+  } while (rq_reclaim_from_per_user_free_pool(resource_quota));
 
-  if (!rq_reclaim(exec_ctx, resource_quota, false)) {
-    rq_reclaim(exec_ctx, resource_quota, true);
+  if (!rq_reclaim(resource_quota, false)) {
+    rq_reclaim(resource_quota, true);
   }
 
 done:
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
 }
 
-static void rq_step_sched(grpc_exec_ctx* exec_ctx,
-                          grpc_resource_quota* resource_quota) {
+static void rq_step_sched(grpc_resource_quota* resource_quota) {
   if (resource_quota->step_scheduled) return;
   resource_quota->step_scheduled = true;
   grpc_resource_quota_ref_internal(resource_quota);
-  GRPC_CLOSURE_SCHED(exec_ctx, &resource_quota->rq_step_closure,
-                     GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(&resource_quota->rq_step_closure, GRPC_ERROR_NONE);
 }
 
 /* update the atomically available resource estimate - use no barriers since
@@ -286,8 +281,7 @@
 }
 
 /* returns true if all allocations are completed */
-static bool rq_alloc(grpc_exec_ctx* exec_ctx,
-                     grpc_resource_quota* resource_quota) {
+static bool rq_alloc(grpc_resource_quota* resource_quota) {
   grpc_resource_user* resource_user;
   while ((resource_user = rulist_pop_head(resource_quota,
                                           GRPC_RULIST_AWAITING_ALLOCATION))) {
@@ -307,9 +301,9 @@
       int64_t aborted_allocations = resource_user->outstanding_allocations;
       resource_user->outstanding_allocations = 0;
       resource_user->free_pool += aborted_allocations;
-      GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
+      GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated);
       gpr_mu_unlock(&resource_user->mu);
-      ru_unref_by(exec_ctx, resource_user, (gpr_atm)aborted_allocations);
+      ru_unref_by(resource_user, (gpr_atm)aborted_allocations);
       continue;
     }
     if (resource_user->free_pool < 0 &&
@@ -333,7 +327,7 @@
     if (resource_user->free_pool >= 0) {
       resource_user->allocating = false;
       resource_user->outstanding_allocations = 0;
-      GRPC_CLOSURE_LIST_SCHED(exec_ctx, &resource_user->on_allocated);
+      GRPC_CLOSURE_LIST_SCHED(&resource_user->on_allocated);
       gpr_mu_unlock(&resource_user->mu);
     } else {
       rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@@ -346,7 +340,7 @@
 
 /* returns true if any memory could be reclaimed from buffers */
 static bool rq_reclaim_from_per_user_free_pool(
-    grpc_exec_ctx* exec_ctx, grpc_resource_quota* resource_quota) {
+    grpc_resource_quota* resource_quota) {
   grpc_resource_user* resource_user;
   while ((resource_user = rulist_pop_head(resource_quota,
                                           GRPC_RULIST_NON_EMPTY_FREE_POOL))) {
@@ -373,8 +367,7 @@
 }
 
 /* returns true if reclamation is proceeding */
-static bool rq_reclaim(grpc_exec_ctx* exec_ctx,
-                       grpc_resource_quota* resource_quota, bool destructive) {
+static bool rq_reclaim(grpc_resource_quota* resource_quota, bool destructive) {
   if (resource_quota->reclaiming) return true;
   grpc_rulist list = destructive ? GRPC_RULIST_RECLAIMER_DESTRUCTIVE
                                  : GRPC_RULIST_RECLAIMER_BENIGN;
@@ -392,7 +385,7 @@
   resource_quota->debug_only_last_reclaimer_resource_user = resource_user;
   resource_quota->debug_only_last_initiated_reclaimer = c;
   resource_user->reclaimers[destructive] = nullptr;
-  GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_RUN(c, GRPC_ERROR_NONE);
   return true;
 }
 
@@ -412,10 +405,10 @@
   gpr_ref(&rc->refs);
 }
 
-static void ru_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
+static void ru_slice_unref(void* p) {
   ru_slice_refcount* rc = (ru_slice_refcount*)p;
   if (gpr_unref(&rc->refs)) {
-    grpc_resource_user_free(exec_ctx, rc->resource_user, rc->size);
+    grpc_resource_user_free(rc->resource_user, rc->size);
     gpr_free(rc);
   }
 }
@@ -445,61 +438,57 @@
  * the combiner
  */
 
-static void ru_allocate(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
+static void ru_allocate(void* ru, grpc_error* error) {
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
   if (rulist_empty(resource_user->resource_quota,
                    GRPC_RULIST_AWAITING_ALLOCATION)) {
-    rq_step_sched(exec_ctx, resource_user->resource_quota);
+    rq_step_sched(resource_user->resource_quota);
   }
   rulist_add_tail(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
 }
 
-static void ru_add_to_free_pool(grpc_exec_ctx* exec_ctx, void* ru,
-                                grpc_error* error) {
+static void ru_add_to_free_pool(void* ru, grpc_error* error) {
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
   if (!rulist_empty(resource_user->resource_quota,
                     GRPC_RULIST_AWAITING_ALLOCATION) &&
       rulist_empty(resource_user->resource_quota,
                    GRPC_RULIST_NON_EMPTY_FREE_POOL)) {
-    rq_step_sched(exec_ctx, resource_user->resource_quota);
+    rq_step_sched(resource_user->resource_quota);
   }
   rulist_add_tail(resource_user, GRPC_RULIST_NON_EMPTY_FREE_POOL);
 }
 
-static bool ru_post_reclaimer(grpc_exec_ctx* exec_ctx,
-                              grpc_resource_user* resource_user,
+static bool ru_post_reclaimer(grpc_resource_user* resource_user,
                               bool destructive) {
   grpc_closure* closure = resource_user->new_reclaimers[destructive];
   GPR_ASSERT(closure != nullptr);
   resource_user->new_reclaimers[destructive] = nullptr;
   GPR_ASSERT(resource_user->reclaimers[destructive] == nullptr);
   if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_CANCELLED);
+    GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CANCELLED);
     return false;
   }
   resource_user->reclaimers[destructive] = closure;
   return true;
 }
 
-static void ru_post_benign_reclaimer(grpc_exec_ctx* exec_ctx, void* ru,
-                                     grpc_error* error) {
+static void ru_post_benign_reclaimer(void* ru, grpc_error* error) {
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
-  if (!ru_post_reclaimer(exec_ctx, resource_user, false)) return;
+  if (!ru_post_reclaimer(resource_user, false)) return;
   if (!rulist_empty(resource_user->resource_quota,
                     GRPC_RULIST_AWAITING_ALLOCATION) &&
       rulist_empty(resource_user->resource_quota,
                    GRPC_RULIST_NON_EMPTY_FREE_POOL) &&
       rulist_empty(resource_user->resource_quota,
                    GRPC_RULIST_RECLAIMER_BENIGN)) {
-    rq_step_sched(exec_ctx, resource_user->resource_quota);
+    rq_step_sched(resource_user->resource_quota);
   }
   rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
 }
 
-static void ru_post_destructive_reclaimer(grpc_exec_ctx* exec_ctx, void* ru,
-                                          grpc_error* error) {
+static void ru_post_destructive_reclaimer(void* ru, grpc_error* error) {
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
-  if (!ru_post_reclaimer(exec_ctx, resource_user, true)) return;
+  if (!ru_post_reclaimer(resource_user, true)) return;
   if (!rulist_empty(resource_user->resource_quota,
                     GRPC_RULIST_AWAITING_ALLOCATION) &&
       rulist_empty(resource_user->resource_quota,
@@ -508,51 +497,48 @@
                    GRPC_RULIST_RECLAIMER_BENIGN) &&
       rulist_empty(resource_user->resource_quota,
                    GRPC_RULIST_RECLAIMER_DESTRUCTIVE)) {
-    rq_step_sched(exec_ctx, resource_user->resource_quota);
+    rq_step_sched(resource_user->resource_quota);
   }
   rulist_add_tail(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
 }
 
-static void ru_shutdown(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
+static void ru_shutdown(void* ru, grpc_error* error) {
   if (grpc_resource_quota_trace.enabled()) {
     gpr_log(GPR_DEBUG, "RU shutdown %p", ru);
   }
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
-  GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
-                     GRPC_ERROR_CANCELLED);
-  GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
-                     GRPC_ERROR_CANCELLED);
+  gpr_mu_lock(&resource_user->mu);
+  GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
+  GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
   resource_user->reclaimers[0] = nullptr;
   resource_user->reclaimers[1] = nullptr;
   rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
   rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_DESTRUCTIVE);
   if (resource_user->allocating) {
-    rq_step_sched(exec_ctx, resource_user->resource_quota);
+    rq_step_sched(resource_user->resource_quota);
   }
+  gpr_mu_unlock(&resource_user->mu);
 }
 
-static void ru_destroy(grpc_exec_ctx* exec_ctx, void* ru, grpc_error* error) {
+static void ru_destroy(void* ru, grpc_error* error) {
   grpc_resource_user* resource_user = (grpc_resource_user*)ru;
   GPR_ASSERT(gpr_atm_no_barrier_load(&resource_user->refs) == 0);
   for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
     rulist_remove(resource_user, (grpc_rulist)i);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[0],
-                     GRPC_ERROR_CANCELLED);
-  GRPC_CLOSURE_SCHED(exec_ctx, resource_user->reclaimers[1],
-                     GRPC_ERROR_CANCELLED);
+  GRPC_CLOSURE_SCHED(resource_user->reclaimers[0], GRPC_ERROR_CANCELLED);
+  GRPC_CLOSURE_SCHED(resource_user->reclaimers[1], GRPC_ERROR_CANCELLED);
   if (resource_user->free_pool != 0) {
     resource_user->resource_quota->free_pool += resource_user->free_pool;
-    rq_step_sched(exec_ctx, resource_user->resource_quota);
+    rq_step_sched(resource_user->resource_quota);
   }
-  grpc_resource_quota_unref_internal(exec_ctx, resource_user->resource_quota);
+  grpc_resource_quota_unref_internal(resource_user->resource_quota);
   gpr_mu_destroy(&resource_user->mu);
   gpr_free(resource_user->name);
   gpr_free(resource_user);
 }
 
-static void ru_allocated_slices(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_error* error) {
+static void ru_allocated_slices(void* arg, grpc_error* error) {
   grpc_resource_user_slice_allocator* slice_allocator =
       (grpc_resource_user_slice_allocator*)arg;
   if (error == GRPC_ERROR_NONE) {
@@ -562,7 +548,7 @@
                                                  slice_allocator->length));
     }
   }
-  GRPC_CLOSURE_RUN(exec_ctx, &slice_allocator->on_done, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(&slice_allocator->on_done, GRPC_ERROR_REF(error));
 }
 
 /*******************************************************************************
@@ -576,23 +562,22 @@
   grpc_closure closure;
 } rq_resize_args;
 
-static void rq_resize(grpc_exec_ctx* exec_ctx, void* args, grpc_error* error) {
+static void rq_resize(void* args, grpc_error* error) {
   rq_resize_args* a = (rq_resize_args*)args;
   int64_t delta = a->size - a->resource_quota->size;
   a->resource_quota->size += delta;
   a->resource_quota->free_pool += delta;
   rq_update_estimate(a->resource_quota);
-  rq_step_sched(exec_ctx, a->resource_quota);
-  grpc_resource_quota_unref_internal(exec_ctx, a->resource_quota);
+  rq_step_sched(a->resource_quota);
+  grpc_resource_quota_unref_internal(a->resource_quota);
   gpr_free(a);
 }
 
-static void rq_reclamation_done(grpc_exec_ctx* exec_ctx, void* rq,
-                                grpc_error* error) {
+static void rq_reclamation_done(void* rq, grpc_error* error) {
   grpc_resource_quota* resource_quota = (grpc_resource_quota*)rq;
   resource_quota->reclaiming = false;
-  rq_step_sched(exec_ctx, resource_quota);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  rq_step_sched(resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
 }
 
 /*******************************************************************************
@@ -628,10 +613,9 @@
   return resource_quota;
 }
 
-void grpc_resource_quota_unref_internal(grpc_exec_ctx* exec_ctx,
-                                        grpc_resource_quota* resource_quota) {
+void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota) {
   if (gpr_unref(&resource_quota->refs)) {
-    GRPC_COMBINER_UNREF(exec_ctx, resource_quota->combiner, "resource_quota");
+    GRPC_COMBINER_UNREF(resource_quota->combiner, "resource_quota");
     gpr_free(resource_quota->name);
     gpr_free(resource_quota);
   }
@@ -639,9 +623,8 @@
 
 /* Public API */
 void grpc_resource_quota_unref(grpc_resource_quota* resource_quota) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_resource_quota_unref_internal(resource_quota);
 }
 
 grpc_resource_quota* grpc_resource_quota_ref_internal(
@@ -665,15 +648,14 @@
 /* Public API */
 void grpc_resource_quota_resize(grpc_resource_quota* resource_quota,
                                 size_t size) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   rq_resize_args* a = (rq_resize_args*)gpr_malloc(sizeof(*a));
   a->resource_quota = grpc_resource_quota_ref_internal(resource_quota);
   a->size = (int64_t)size;
   gpr_atm_no_barrier_store(&resource_quota->last_size,
                            (gpr_atm)GPR_MIN((size_t)GPR_ATM_MAX, size));
   GRPC_CLOSURE_INIT(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
-  GRPC_CLOSURE_SCHED(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CLOSURE_SCHED(&a->closure, GRPC_ERROR_NONE);
 }
 
 size_t grpc_resource_quota_peek_size(grpc_resource_quota* resource_quota) {
@@ -704,8 +686,8 @@
   return rq;
 }
 
-static void rq_destroy(grpc_exec_ctx* exec_ctx, void* rq) {
-  grpc_resource_quota_unref_internal(exec_ctx, (grpc_resource_quota*)rq);
+static void rq_destroy(void* rq) {
+  grpc_resource_quota_unref_internal((grpc_resource_quota*)rq);
 }
 
 static int rq_cmp(void* a, void* b) { return GPR_ICMP(a, b); }
@@ -773,14 +755,12 @@
   GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&resource_user->refs, amount) != 0);
 }
 
-static void ru_unref_by(grpc_exec_ctx* exec_ctx,
-                        grpc_resource_user* resource_user, gpr_atm amount) {
+static void ru_unref_by(grpc_resource_user* resource_user, gpr_atm amount) {
   GPR_ASSERT(amount > 0);
   gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
   GPR_ASSERT(old >= amount);
   if (old == amount) {
-    GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->destroy_closure,
-                       GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&resource_user->destroy_closure, GRPC_ERROR_NONE);
   }
 }
 
@@ -788,16 +768,13 @@
   ru_ref_by(resource_user, 1);
 }
 
-void grpc_resource_user_unref(grpc_exec_ctx* exec_ctx,
-                              grpc_resource_user* resource_user) {
-  ru_unref_by(exec_ctx, resource_user, 1);
+void grpc_resource_user_unref(grpc_resource_user* resource_user) {
+  ru_unref_by(resource_user, 1);
 }
 
-void grpc_resource_user_shutdown(grpc_exec_ctx* exec_ctx,
-                                 grpc_resource_user* resource_user) {
+void grpc_resource_user_shutdown(grpc_resource_user* resource_user) {
   if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_CREATE(
             ru_shutdown, resource_user,
             grpc_combiner_scheduler(resource_user->resource_quota->combiner)),
@@ -805,8 +782,7 @@
   }
 }
 
-void grpc_resource_user_alloc(grpc_exec_ctx* exec_ctx,
-                              grpc_resource_user* resource_user, size_t size,
+void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
                               grpc_closure* optional_on_done) {
   gpr_mu_lock(&resource_user->mu);
   ru_ref_by(resource_user, (gpr_atm)size);
@@ -822,18 +798,16 @@
                              GRPC_ERROR_NONE);
     if (!resource_user->allocating) {
       resource_user->allocating = true;
-      GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->allocate_closure,
-                         GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(&resource_user->allocate_closure, GRPC_ERROR_NONE);
     }
   } else {
     resource_user->outstanding_allocations -= (int64_t)size;
-    GRPC_CLOSURE_SCHED(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(optional_on_done, GRPC_ERROR_NONE);
   }
   gpr_mu_unlock(&resource_user->mu);
 }
 
-void grpc_resource_user_free(grpc_exec_ctx* exec_ctx,
-                             grpc_resource_user* resource_user, size_t size) {
+void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size) {
   gpr_mu_lock(&resource_user->mu);
   bool was_zero_or_negative = resource_user->free_pool <= 0;
   resource_user->free_pool += (int64_t)size;
@@ -846,32 +820,29 @@
   if (is_bigger_than_zero && was_zero_or_negative &&
       !resource_user->added_to_free_pool) {
     resource_user->added_to_free_pool = true;
-    GRPC_CLOSURE_SCHED(exec_ctx, &resource_user->add_to_free_pool_closure,
+    GRPC_CLOSURE_SCHED(&resource_user->add_to_free_pool_closure,
                        GRPC_ERROR_NONE);
   }
   gpr_mu_unlock(&resource_user->mu);
-  ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
+  ru_unref_by(resource_user, (gpr_atm)size);
 }
 
-void grpc_resource_user_post_reclaimer(grpc_exec_ctx* exec_ctx,
-                                       grpc_resource_user* resource_user,
+void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
                                        bool destructive,
                                        grpc_closure* closure) {
   GPR_ASSERT(resource_user->new_reclaimers[destructive] == nullptr);
   resource_user->new_reclaimers[destructive] = closure;
-  GRPC_CLOSURE_SCHED(exec_ctx,
-                     &resource_user->post_reclaimer_closure[destructive],
+  GRPC_CLOSURE_SCHED(&resource_user->post_reclaimer_closure[destructive],
                      GRPC_ERROR_NONE);
 }
 
-void grpc_resource_user_finish_reclamation(grpc_exec_ctx* exec_ctx,
-                                           grpc_resource_user* resource_user) {
+void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user) {
   if (grpc_resource_quota_trace.enabled()) {
     gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
             resource_user->resource_quota->name, resource_user->name);
   }
   GRPC_CLOSURE_SCHED(
-      exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
+      &resource_user->resource_quota->rq_reclamation_done_closure,
       GRPC_ERROR_NONE);
 }
 
@@ -886,12 +857,11 @@
 }
 
 void grpc_resource_user_alloc_slices(
-    grpc_exec_ctx* exec_ctx,
     grpc_resource_user_slice_allocator* slice_allocator, size_t length,
     size_t count, grpc_slice_buffer* dest) {
   slice_allocator->length = length;
   slice_allocator->count = count;
   slice_allocator->dest = dest;
-  grpc_resource_user_alloc(exec_ctx, slice_allocator->resource_user,
-                           count * length, &slice_allocator->on_allocated);
+  grpc_resource_user_alloc(slice_allocator->resource_user, count * length,
+                           &slice_allocator->on_allocated);
 }
diff --git a/src/core/lib/iomgr/resource_quota.h b/src/core/lib/iomgr/resource_quota.h
index 3af93a8..39e3aab 100644
--- a/src/core/lib/iomgr/resource_quota.h
+++ b/src/core/lib/iomgr/resource_quota.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/debug/trace.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** \file Tracks resource usage against a pool.
 
     The current implementation tracks only memory usage, but in the future
@@ -69,8 +65,7 @@
 
 grpc_resource_quota* grpc_resource_quota_ref_internal(
     grpc_resource_quota* resource_quota);
-void grpc_resource_quota_unref_internal(grpc_exec_ctx* exec_ctx,
-                                        grpc_resource_quota* resource_quota);
+void grpc_resource_quota_unref_internal(grpc_resource_quota* resource_quota);
 grpc_resource_quota* grpc_resource_quota_from_channel_args(
     const grpc_channel_args* channel_args);
 
@@ -93,32 +88,26 @@
     grpc_resource_user* resource_user);
 
 void grpc_resource_user_ref(grpc_resource_user* resource_user);
-void grpc_resource_user_unref(grpc_exec_ctx* exec_ctx,
-                              grpc_resource_user* resource_user);
-void grpc_resource_user_shutdown(grpc_exec_ctx* exec_ctx,
-                                 grpc_resource_user* resource_user);
+void grpc_resource_user_unref(grpc_resource_user* resource_user);
+void grpc_resource_user_shutdown(grpc_resource_user* resource_user);
 
 /* Allocate from the resource user (and its quota).
    If optional_on_done is NULL, then allocate immediately. This may push the
    quota over-limit, at which point reclamation will kick in.
    If optional_on_done is non-NULL, it will be scheduled when the allocation has
    been granted by the quota. */
-void grpc_resource_user_alloc(grpc_exec_ctx* exec_ctx,
-                              grpc_resource_user* resource_user, size_t size,
+void grpc_resource_user_alloc(grpc_resource_user* resource_user, size_t size,
                               grpc_closure* optional_on_done);
 /* Release memory back to the quota */
-void grpc_resource_user_free(grpc_exec_ctx* exec_ctx,
-                             grpc_resource_user* resource_user, size_t size);
+void grpc_resource_user_free(grpc_resource_user* resource_user, size_t size);
 /* Post a memory reclaimer to the resource user. Only one benign and one
    destructive reclaimer can be posted at once. When executed, the reclaimer
    MUST call grpc_resource_user_finish_reclamation before it completes, to
    return control to the resource quota. */
-void grpc_resource_user_post_reclaimer(grpc_exec_ctx* exec_ctx,
-                                       grpc_resource_user* resource_user,
+void grpc_resource_user_post_reclaimer(grpc_resource_user* resource_user,
                                        bool destructive, grpc_closure* closure);
 /* Finish a reclamation step */
-void grpc_resource_user_finish_reclamation(grpc_exec_ctx* exec_ctx,
-                                           grpc_resource_user* resource_user);
+void grpc_resource_user_finish_reclamation(grpc_resource_user* resource_user);
 
 /* Helper to allocate slices from a resource user */
 typedef struct grpc_resource_user_slice_allocator {
@@ -145,17 +134,11 @@
 /* Allocate \a count slices of length \a length into \a dest. Only one request
    can be outstanding at a time. */
 void grpc_resource_user_alloc_slices(
-    grpc_exec_ctx* exec_ctx,
     grpc_resource_user_slice_allocator* slice_allocator, size_t length,
     size_t count, grpc_slice_buffer* dest);
 
 /* Allocate one slice of length \a size synchronously. */
-grpc_slice grpc_resource_user_slice_malloc(grpc_exec_ctx* exec_ctx,
-                                           grpc_resource_user* resource_user,
+grpc_slice grpc_resource_user_slice_malloc(grpc_resource_user* resource_user,
                                            size_t size);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_RESOURCE_QUOTA_H */
diff --git a/src/core/lib/iomgr/sockaddr_utils.h b/src/core/lib/iomgr/sockaddr_utils.h
index 090470d..e3bd51a 100644
--- a/src/core/lib/iomgr/sockaddr_utils.h
+++ b/src/core/lib/iomgr/sockaddr_utils.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Returns true if addr is an IPv4-mapped IPv6 address within the
    ::ffff:0.0.0.0/96 range, or false otherwise.
 
@@ -81,8 +77,4 @@
 
 int grpc_sockaddr_get_family(const grpc_resolved_address* resolved_addr);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_SOCKADDR_UTILS_H */
diff --git a/src/core/lib/iomgr/socket_factory_posix.cc b/src/core/lib/iomgr/socket_factory_posix.cc
index 40bfecd..bc7d0b1 100644
--- a/src/core/lib/iomgr/socket_factory_posix.cc
+++ b/src/core/lib/iomgr/socket_factory_posix.cc
@@ -72,7 +72,7 @@
   return grpc_socket_factory_ref((grpc_socket_factory*)p);
 }
 
-static void socket_factory_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+static void socket_factory_arg_destroy(void* p) {
   grpc_socket_factory_unref((grpc_socket_factory*)p);
 }
 
diff --git a/src/core/lib/iomgr/socket_factory_posix.h b/src/core/lib/iomgr/socket_factory_posix.h
index e8257b0..af57cc5 100644
--- a/src/core/lib/iomgr/socket_factory_posix.h
+++ b/src/core/lib/iomgr/socket_factory_posix.h
@@ -23,10 +23,6 @@
 #include <grpc/support/sync.h>
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** The virtual table of grpc_socket_factory */
 typedef struct {
   /** Replacement for socket(2) */
@@ -68,8 +64,4 @@
 grpc_socket_factory* grpc_socket_factory_ref(grpc_socket_factory* factory);
 void grpc_socket_factory_unref(grpc_socket_factory* factory);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_FACTORY_POSIX_H */
diff --git a/src/core/lib/iomgr/socket_mutator.cc b/src/core/lib/iomgr/socket_mutator.cc
index ff6c0c7..9d30e46 100644
--- a/src/core/lib/iomgr/socket_mutator.cc
+++ b/src/core/lib/iomgr/socket_mutator.cc
@@ -63,7 +63,7 @@
   return grpc_socket_mutator_ref((grpc_socket_mutator*)p);
 }
 
-static void socket_mutator_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+static void socket_mutator_arg_destroy(void* p) {
   grpc_socket_mutator_unref((grpc_socket_mutator*)p);
 }
 
diff --git a/src/core/lib/iomgr/socket_mutator.h b/src/core/lib/iomgr/socket_mutator.h
index b4103f7..0a97cf6 100644
--- a/src/core/lib/iomgr/socket_mutator.h
+++ b/src/core/lib/iomgr/socket_mutator.h
@@ -24,10 +24,6 @@
 
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** The virtual table of grpc_socket_mutator */
 typedef struct {
   /** Mutates the socket opitons of \a fd */
@@ -60,8 +56,4 @@
 grpc_socket_mutator* grpc_socket_mutator_ref(grpc_socket_mutator* mutator);
 void grpc_socket_mutator_unref(grpc_socket_mutator* mutator);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_MUTATOR_H */
diff --git a/src/core/lib/iomgr/socket_utils.h b/src/core/lib/iomgr/socket_utils.h
index 4816ab6..9fd141b 100644
--- a/src/core/lib/iomgr/socket_utils.h
+++ b/src/core/lib/iomgr/socket_utils.h
@@ -21,15 +21,7 @@
 
 #include <stddef.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A wrapper for inet_ntop on POSIX systems and InetNtop on Windows systems */
 const char* grpc_inet_ntop(int af, const void* src, char* dst, size_t size);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_H */
diff --git a/src/core/lib/iomgr/socket_utils_posix.h b/src/core/lib/iomgr/socket_utils_posix.h
index 7a9c813..77df420 100644
--- a/src/core/lib/iomgr/socket_utils_posix.h
+++ b/src/core/lib/iomgr/socket_utils_posix.h
@@ -29,10 +29,6 @@
 #include "src/core/lib/iomgr/socket_factory_posix.h"
 #include "src/core/lib/iomgr/socket_mutator.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* a wrapper for accept or accept4 */
 int grpc_accept4(int sockfd, grpc_resolved_address* resolved_addr, int nonblock,
                  int cloexec);
@@ -133,8 +129,4 @@
     grpc_socket_factory* factory, const grpc_resolved_address* addr, int type,
     int protocol, grpc_dualstack_mode* dsmode, int* newfd);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_UTILS_POSIX_H */
diff --git a/src/core/lib/iomgr/socket_windows.cc b/src/core/lib/iomgr/socket_windows.cc
index aee80f4..9bb6a75 100644
--- a/src/core/lib/iomgr/socket_windows.cc
+++ b/src/core/lib/iomgr/socket_windows.cc
@@ -109,37 +109,34 @@
 -) The IOCP already completed in the background, and we need to call
 the callback now.
 -) The IOCP hasn't completed yet, and we're queuing it for later. */
-static void socket_notify_on_iocp(grpc_exec_ctx* exec_ctx,
-                                  grpc_winsocket* socket, grpc_closure* closure,
+static void socket_notify_on_iocp(grpc_winsocket* socket, grpc_closure* closure,
                                   grpc_winsocket_callback_info* info) {
   GPR_ASSERT(info->closure == NULL);
   gpr_mu_lock(&socket->state_mu);
   if (info->has_pending_iocp) {
     info->has_pending_iocp = 0;
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
   } else {
     info->closure = closure;
   }
   gpr_mu_unlock(&socket->state_mu);
 }
 
-void grpc_socket_notify_on_write(grpc_exec_ctx* exec_ctx,
-                                 grpc_winsocket* socket,
+void grpc_socket_notify_on_write(grpc_winsocket* socket,
                                  grpc_closure* closure) {
-  socket_notify_on_iocp(exec_ctx, socket, closure, &socket->write_info);
+  socket_notify_on_iocp(socket, closure, &socket->write_info);
 }
 
-void grpc_socket_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
-                                grpc_closure* closure) {
-  socket_notify_on_iocp(exec_ctx, socket, closure, &socket->read_info);
+void grpc_socket_notify_on_read(grpc_winsocket* socket, grpc_closure* closure) {
+  socket_notify_on_iocp(socket, closure, &socket->read_info);
 }
 
-void grpc_socket_become_ready(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+void grpc_socket_become_ready(grpc_winsocket* socket,
                               grpc_winsocket_callback_info* info) {
   GPR_ASSERT(!info->has_pending_iocp);
   gpr_mu_lock(&socket->state_mu);
   if (info->closure) {
-    GRPC_CLOSURE_SCHED(exec_ctx, info->closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(info->closure, GRPC_ERROR_NONE);
     info->closure = NULL;
   } else {
     info->has_pending_iocp = 1;
diff --git a/src/core/lib/iomgr/socket_windows.h b/src/core/lib/iomgr/socket_windows.h
index c3ad99d..cb28f2b 100644
--- a/src/core/lib/iomgr/socket_windows.h
+++ b/src/core/lib/iomgr/socket_windows.h
@@ -31,10 +31,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/iomgr/iomgr_internal.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* This holds the data for an outstanding read or write on a socket.
    The mutex to protect the concurrent access to that data is the one
    inside the winsocket wrapper. */
@@ -102,22 +98,15 @@
 /* Destroy a socket. Should only be called if there's no pending operation. */
 void grpc_winsocket_destroy(grpc_winsocket* socket);
 
-void grpc_socket_notify_on_write(grpc_exec_ctx* exec_ctx,
-                                 grpc_winsocket* winsocket,
+void grpc_socket_notify_on_write(grpc_winsocket* winsocket,
                                  grpc_closure* closure);
 
-void grpc_socket_notify_on_read(grpc_exec_ctx* exec_ctx,
-                                grpc_winsocket* winsocket,
+void grpc_socket_notify_on_read(grpc_winsocket* winsocket,
                                 grpc_closure* closure);
 
-void grpc_socket_become_ready(grpc_exec_ctx* exec_ctx,
-                              grpc_winsocket* winsocket,
+void grpc_socket_become_ready(grpc_winsocket* winsocket,
                               grpc_winsocket_callback_info* ci);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_SOCKET_WINDOWS_H */
diff --git a/src/core/lib/iomgr/tcp_client.h b/src/core/lib/iomgr/tcp_client.h
index c18d8a9..5f55d30 100644
--- a/src/core/lib/iomgr/tcp_client.h
+++ b/src/core/lib/iomgr/tcp_client.h
@@ -25,24 +25,15 @@
 #include "src/core/lib/iomgr/pollset_set.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Asynchronously connect to an address (specified as (addr, len)), and call
    cb with arg and the completed connection when done (or call cb with arg and
    NULL on failure).
    interested_parties points to a set of pollsets that would be interested
    in this connection being established (in order to continue their work) */
-void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* on_connect,
-                             grpc_endpoint** endpoint,
+void grpc_tcp_client_connect(grpc_closure* on_connect, grpc_endpoint** endpoint,
                              grpc_pollset_set* interested_parties,
                              const grpc_channel_args* channel_args,
                              const grpc_resolved_address* addr,
                              grpc_millis deadline);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_H */
diff --git a/src/core/lib/iomgr/tcp_client_posix.cc b/src/core/lib/iomgr/tcp_client_posix.cc
index 8a6262b..8cd5f8d 100644
--- a/src/core/lib/iomgr/tcp_client_posix.cc
+++ b/src/core/lib/iomgr/tcp_client_posix.cc
@@ -96,7 +96,7 @@
   return err;
 }
 
-static void tc_on_alarm(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+static void tc_on_alarm(void* acp, grpc_error* error) {
   int done;
   async_connect* ac = (async_connect*)acp;
   if (grpc_tcp_trace.enabled()) {
@@ -107,26 +107,24 @@
   gpr_mu_lock(&ac->mu);
   if (ac->fd != nullptr) {
     grpc_fd_shutdown(
-        exec_ctx, ac->fd,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
+        ac->fd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("connect() timed out"));
   }
   done = (--ac->refs == 0);
   gpr_mu_unlock(&ac->mu);
   if (done) {
     gpr_mu_destroy(&ac->mu);
     gpr_free(ac->addr_str);
-    grpc_channel_args_destroy(exec_ctx, ac->channel_args);
+    grpc_channel_args_destroy(ac->channel_args);
     gpr_free(ac);
   }
 }
 
 grpc_endpoint* grpc_tcp_client_create_from_fd(
-    grpc_exec_ctx* exec_ctx, grpc_fd* fd, const grpc_channel_args* channel_args,
-    const char* addr_str) {
-  return grpc_tcp_create(exec_ctx, fd, channel_args, addr_str);
+    grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str) {
+  return grpc_tcp_create(fd, channel_args, addr_str);
 }
 
-static void on_writable(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+static void on_writable(void* acp, grpc_error* error) {
   async_connect* ac = (async_connect*)acp;
   int so_error = 0;
   socklen_t so_error_size;
@@ -150,7 +148,7 @@
   ac->fd = nullptr;
   gpr_mu_unlock(&ac->mu);
 
-  grpc_timer_cancel(exec_ctx, &ac->alarm);
+  grpc_timer_cancel(&ac->alarm);
 
   gpr_mu_lock(&ac->mu);
   if (error != GRPC_ERROR_NONE) {
@@ -172,9 +170,8 @@
 
   switch (so_error) {
     case 0:
-      grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
-      *ep = grpc_tcp_client_create_from_fd(exec_ctx, fd, ac->channel_args,
-                                           ac->addr_str);
+      grpc_pollset_set_del_fd(ac->interested_parties, fd);
+      *ep = grpc_tcp_client_create_from_fd(fd, ac->channel_args, ac->addr_str);
       fd = nullptr;
       break;
     case ENOBUFS:
@@ -194,7 +191,7 @@
          don't do that! */
       gpr_log(GPR_ERROR, "kernel out of buffers");
       gpr_mu_unlock(&ac->mu);
-      grpc_fd_notify_on_write(exec_ctx, fd, &ac->write_closure);
+      grpc_fd_notify_on_write(fd, &ac->write_closure);
       return;
     case ECONNREFUSED:
       /* This error shouldn't happen for anything other than connect(). */
@@ -209,12 +206,15 @@
 
 finish:
   if (fd != nullptr) {
-    grpc_pollset_set_del_fd(exec_ctx, ac->interested_parties, fd);
-    grpc_fd_orphan(exec_ctx, fd, nullptr, nullptr, false /* already_closed */,
+    grpc_pollset_set_del_fd(ac->interested_parties, fd);
+    grpc_fd_orphan(fd, nullptr, nullptr, false /* already_closed */,
                    "tcp_client_orphan");
     fd = nullptr;
   }
   done = (--ac->refs == 0);
+  // Create a copy of the data from "ac" to be accessed after the unlock, as
+  // "ac" and its contents may be deallocated by the time they are read.
+  const grpc_slice addr_str_slice = grpc_slice_from_copied_string(ac->addr_str);
   gpr_mu_unlock(&ac->mu);
   if (error != GRPC_ERROR_NONE) {
     char* error_descr;
@@ -228,89 +228,93 @@
     gpr_free(error_descr);
     gpr_free(desc);
     error = grpc_error_set_str(error, GRPC_ERROR_STR_TARGET_ADDRESS,
-                               grpc_slice_from_copied_string(ac->addr_str));
+                               addr_str_slice /* takes ownership */);
+  } else {
+    grpc_slice_unref(addr_str_slice);
   }
   if (done) {
+    // This is safe even outside the lock, because "done", the sentinel, is
+    // populated *inside* the lock.
     gpr_mu_destroy(&ac->mu);
     gpr_free(ac->addr_str);
-    grpc_channel_args_destroy(exec_ctx, ac->channel_args);
+    grpc_channel_args_destroy(ac->channel_args);
     gpr_free(ac);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
+  GRPC_CLOSURE_SCHED(closure, error);
 }
 
-static void tcp_client_connect_impl(grpc_exec_ctx* exec_ctx,
-                                    grpc_closure* closure, grpc_endpoint** ep,
-                                    grpc_pollset_set* interested_parties,
-                                    const grpc_channel_args* channel_args,
-                                    const grpc_resolved_address* addr,
-                                    grpc_millis deadline) {
-  int fd;
+grpc_error* grpc_tcp_client_prepare_fd(const grpc_channel_args* channel_args,
+                                       const grpc_resolved_address* addr,
+                                       grpc_resolved_address* mapped_addr,
+                                       grpc_fd** fdobj) {
   grpc_dualstack_mode dsmode;
-  int err;
-  async_connect* ac;
-  grpc_resolved_address addr6_v4mapped;
-  grpc_resolved_address addr4_copy;
-  grpc_fd* fdobj;
+  int fd;
+  grpc_error* error;
   char* name;
   char* addr_str;
-  grpc_error* error;
-
-  *ep = nullptr;
-
-  /* Use dualstack sockets where available. */
-  if (grpc_sockaddr_to_v4mapped(addr, &addr6_v4mapped)) {
-    addr = &addr6_v4mapped;
+  *fdobj = nullptr;
+  /* Use dualstack sockets where available. Set mapped to v6 or v4 mapped to
+     v6. */
+  if (!grpc_sockaddr_to_v4mapped(addr, mapped_addr)) {
+    /* addr is v4 mapped to v6 or v6. */
+    memcpy(mapped_addr, addr, sizeof(*mapped_addr));
   }
-
-  error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
+  error =
+      grpc_create_dualstack_socket(mapped_addr, SOCK_STREAM, 0, &dsmode, &fd);
   if (error != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
-    return;
+    return error;
   }
   if (dsmode == GRPC_DSMODE_IPV4) {
-    /* If we got an AF_INET socket, map the address back to IPv4. */
-    GPR_ASSERT(grpc_sockaddr_is_v4mapped(addr, &addr4_copy));
-    addr = &addr4_copy;
+    /* Original addr is either v4 or v4 mapped to v6. Set mapped_addr to v4. */
+    if (!grpc_sockaddr_is_v4mapped(addr, mapped_addr)) {
+      memcpy(mapped_addr, addr, sizeof(*mapped_addr));
+    }
   }
-  if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, error);
-    return;
+  if ((error = prepare_socket(mapped_addr, fd, channel_args)) !=
+      GRPC_ERROR_NONE) {
+    return error;
   }
+  addr_str = grpc_sockaddr_to_uri(mapped_addr);
+  gpr_asprintf(&name, "tcp-client:%s", addr_str);
+  *fdobj = grpc_fd_create(fd, name);
+  gpr_free(name);
+  gpr_free(addr_str);
+  return GRPC_ERROR_NONE;
+}
 
+void grpc_tcp_client_create_from_prepared_fd(
+    grpc_pollset_set* interested_parties, grpc_closure* closure, grpc_fd* fdobj,
+    const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
+    grpc_millis deadline, grpc_endpoint** ep) {
+  const int fd = grpc_fd_wrapped_fd(fdobj);
+  int err;
+  async_connect* ac;
   do {
     GPR_ASSERT(addr->len < ~(socklen_t)0);
     err = connect(fd, (const struct sockaddr*)addr->addr, (socklen_t)addr->len);
   } while (err < 0 && errno == EINTR);
-
-  addr_str = grpc_sockaddr_to_uri(addr);
-  gpr_asprintf(&name, "tcp-client:%s", addr_str);
-
-  fdobj = grpc_fd_create(fd, name);
-
   if (err >= 0) {
-    *ep =
-        grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
-    goto done;
+    char* addr_str = grpc_sockaddr_to_uri(addr);
+    *ep = grpc_tcp_client_create_from_fd(fdobj, channel_args, addr_str);
+    gpr_free(addr_str);
+    GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
+    return;
   }
-
   if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
-    grpc_fd_orphan(exec_ctx, fdobj, nullptr, nullptr,
-                   false /* already_closed */, "tcp_client_connect_error");
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
-    goto done;
+    grpc_fd_orphan(fdobj, nullptr, nullptr, false /* already_closed */,
+                   "tcp_client_connect_error");
+    GRPC_CLOSURE_SCHED(closure, GRPC_OS_ERROR(errno, "connect"));
+    return;
   }
 
-  grpc_pollset_set_add_fd(exec_ctx, interested_parties, fdobj);
+  grpc_pollset_set_add_fd(interested_parties, fdobj);
 
   ac = (async_connect*)gpr_malloc(sizeof(async_connect));
   ac->closure = closure;
   ac->ep = ep;
   ac->fd = fdobj;
   ac->interested_parties = interested_parties;
-  ac->addr_str = addr_str;
-  addr_str = nullptr;
+  ac->addr_str = grpc_sockaddr_to_uri(addr);
   gpr_mu_init(&ac->mu);
   ac->refs = 2;
   GRPC_CLOSURE_INIT(&ac->write_closure, on_writable, ac,
@@ -324,32 +328,44 @@
 
   gpr_mu_lock(&ac->mu);
   GRPC_CLOSURE_INIT(&ac->on_alarm, tc_on_alarm, ac, grpc_schedule_on_exec_ctx);
-  grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
-  grpc_fd_notify_on_write(exec_ctx, ac->fd, &ac->write_closure);
+  grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm);
+  grpc_fd_notify_on_write(ac->fd, &ac->write_closure);
   gpr_mu_unlock(&ac->mu);
+}
 
-done:
-  gpr_free(name);
-  gpr_free(addr_str);
+static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
+                                    grpc_pollset_set* interested_parties,
+                                    const grpc_channel_args* channel_args,
+                                    const grpc_resolved_address* addr,
+                                    grpc_millis deadline) {
+  grpc_resolved_address mapped_addr;
+  grpc_fd* fdobj = nullptr;
+  grpc_error* error;
+  *ep = nullptr;
+  if ((error = grpc_tcp_client_prepare_fd(channel_args, addr, &mapped_addr,
+                                          &fdobj)) != GRPC_ERROR_NONE) {
+    GRPC_CLOSURE_SCHED(closure, error);
+    return;
+  }
+  grpc_tcp_client_create_from_prepared_fd(interested_parties, closure, fdobj,
+                                          channel_args, &mapped_addr, deadline,
+                                          ep);
 }
 
 // overridden by api_fuzzer.c
-extern "C" {
 void (*grpc_tcp_client_connect_impl)(
-    grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+    grpc_closure* closure, grpc_endpoint** ep,
     grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
     const grpc_resolved_address* addr,
     grpc_millis deadline) = tcp_client_connect_impl;
-}
 
-void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                             grpc_endpoint** ep,
+void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
                              grpc_pollset_set* interested_parties,
                              const grpc_channel_args* channel_args,
                              const grpc_resolved_address* addr,
                              grpc_millis deadline) {
-  grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
-                               channel_args, addr, deadline);
+  grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
+                               addr, deadline);
 }
 
 #endif
diff --git a/src/core/lib/iomgr/tcp_client_posix.h b/src/core/lib/iomgr/tcp_client_posix.h
index 13d9178..57e50a6 100644
--- a/src/core/lib/iomgr/tcp_client_posix.h
+++ b/src/core/lib/iomgr/tcp_client_posix.h
@@ -23,16 +23,44 @@
 #include "src/core/lib/iomgr/ev_posix.h"
 #include "src/core/lib/iomgr/tcp_client.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* Create an endpoint from a connected grpc_fd.
 
+   fd: a connected FD. Ownership is taken.
+   channel_args: may contain custom settings for the endpoint
+   addr_str: destination address in printable format
+   Returns: a new endpoint
+*/
 grpc_endpoint* grpc_tcp_client_create_from_fd(
-    grpc_exec_ctx* exec_ctx, grpc_fd* fd, const grpc_channel_args* channel_args,
-    const char* addr_str);
+    grpc_fd* fd, const grpc_channel_args* channel_args, const char* addr_str);
 
-#ifdef __cplusplus
-}
-#endif
+/* Return a configured, unbound, unconnected TCP client grpc_fd.
+
+   channel_args: may contain custom settings for the fd
+   addr: the destination address
+   mapped_addr: out parameter. addr mapped to an address appropriate to the
+     type of socket FD created. For example, if addr is IPv4 and dual stack
+     sockets are available, mapped_addr will be an IPv4-mapped IPv6 address
+   fdobj: out parameter. The new FD
+   Returns: error, if any. Out parameters are not set on error
+*/
+grpc_error* grpc_tcp_client_prepare_fd(const grpc_channel_args* channel_args,
+                                       const grpc_resolved_address* addr,
+                                       grpc_resolved_address* mapped_addr,
+                                       grpc_fd** fdobj);
+
+/* Connect a configured TCP client grpc_fd.
+
+   interested_parties: a set of pollsets that would be interested in this
+     connection being established (in order to continue their work
+   closure: called when complete. On success, *ep will be set.
+   fdobj: an FD returned from grpc_tcp_client_prepare_fd(). Ownership is taken
+   channel_args: may contain custom settings for the endpoint
+   deadline: connection deadline
+   ep: out parameter. Set before closure is called if successful
+*/
+void grpc_tcp_client_create_from_prepared_fd(
+    grpc_pollset_set* interested_parties, grpc_closure* closure, grpc_fd* fdobj,
+    const grpc_channel_args* channel_args, const grpc_resolved_address* addr,
+    grpc_millis deadline, grpc_endpoint** ep);
 
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_CLIENT_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_client_uv.cc b/src/core/lib/iomgr/tcp_client_uv.cc
index 7a5727e..4e9c7cc 100644
--- a/src/core/lib/iomgr/tcp_client_uv.cc
+++ b/src/core/lib/iomgr/tcp_client_uv.cc
@@ -46,17 +46,15 @@
   grpc_resource_quota* resource_quota;
 } grpc_uv_tcp_connect;
 
-static void uv_tcp_connect_cleanup(grpc_exec_ctx* exec_ctx,
-                                   grpc_uv_tcp_connect* connect) {
-  grpc_resource_quota_unref_internal(exec_ctx, connect->resource_quota);
+static void uv_tcp_connect_cleanup(grpc_uv_tcp_connect* connect) {
+  grpc_resource_quota_unref_internal(connect->resource_quota);
   gpr_free(connect->addr_name);
   gpr_free(connect);
 }
 
 static void tcp_close_callback(uv_handle_t* handle) { gpr_free(handle); }
 
-static void uv_tc_on_alarm(grpc_exec_ctx* exec_ctx, void* acp,
-                           grpc_error* error) {
+static void uv_tc_on_alarm(void* acp, grpc_error* error) {
   int done;
   grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)acp;
   if (grpc_tcp_trace.enabled()) {
@@ -72,17 +70,17 @@
   }
   done = (--connect->refs == 0);
   if (done) {
-    uv_tcp_connect_cleanup(exec_ctx, connect);
+    uv_tcp_connect_cleanup(connect);
   }
 }
 
 static void uv_tc_on_connect(uv_connect_t* req, int status) {
   grpc_uv_tcp_connect* connect = (grpc_uv_tcp_connect*)req->data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_error* error = GRPC_ERROR_NONE;
   int done;
   grpc_closure* closure = connect->closure;
-  grpc_timer_cancel(&exec_ctx, &connect->alarm);
+  grpc_timer_cancel(&connect->alarm);
   if (status == 0) {
     *connect->endpoint = grpc_tcp_create(
         connect->tcp_handle, connect->resource_quota, connect->addr_name);
@@ -107,15 +105,13 @@
   }
   done = (--connect->refs == 0);
   if (done) {
-    grpc_exec_ctx_flush(&exec_ctx);
-    uv_tcp_connect_cleanup(&exec_ctx, connect);
+    grpc_core::ExecCtx::Get()->Flush();
+    uv_tcp_connect_cleanup(connect);
   }
-  GRPC_CLOSURE_SCHED(&exec_ctx, closure, error);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CLOSURE_SCHED(closure, error);
 }
 
-static void tcp_client_connect_impl(grpc_exec_ctx* exec_ctx,
-                                    grpc_closure* closure, grpc_endpoint** ep,
+static void tcp_client_connect_impl(grpc_closure* closure, grpc_endpoint** ep,
                                     grpc_pollset_set* interested_parties,
                                     const grpc_channel_args* channel_args,
                                     const grpc_resolved_address* resolved_addr,
@@ -130,7 +126,7 @@
   if (channel_args != NULL) {
     for (size_t i = 0; i < channel_args->num_args; i++) {
       if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
-        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+        grpc_resource_quota_unref_internal(resource_quota);
         resource_quota = grpc_resource_quota_ref_internal(
             (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
       }
@@ -157,26 +153,23 @@
                  (const struct sockaddr*)resolved_addr->addr, uv_tc_on_connect);
   GRPC_CLOSURE_INIT(&connect->on_alarm, uv_tc_on_alarm, connect,
                     grpc_schedule_on_exec_ctx);
-  grpc_timer_init(exec_ctx, &connect->alarm, deadline, &connect->on_alarm);
+  grpc_timer_init(&connect->alarm, deadline, &connect->on_alarm);
 }
 
 // overridden by api_fuzzer.c
-extern "C" {
 void (*grpc_tcp_client_connect_impl)(
-    grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+    grpc_closure* closure, grpc_endpoint** ep,
     grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
     const grpc_resolved_address* addr,
     grpc_millis deadline) = tcp_client_connect_impl;
-}
 
-void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                             grpc_endpoint** ep,
+void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
                              grpc_pollset_set* interested_parties,
                              const grpc_channel_args* channel_args,
                              const grpc_resolved_address* addr,
                              grpc_millis deadline) {
-  grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
-                               channel_args, addr, deadline);
+  grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
+                               addr, deadline);
 }
 
 #endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_client_windows.cc b/src/core/lib/iomgr/tcp_client_windows.cc
index 8a87e92..97aa923 100644
--- a/src/core/lib/iomgr/tcp_client_windows.cc
+++ b/src/core/lib/iomgr/tcp_client_windows.cc
@@ -52,13 +52,12 @@
   grpc_channel_args* channel_args;
 } async_connect;
 
-static void async_connect_unlock_and_cleanup(grpc_exec_ctx* exec_ctx,
-                                             async_connect* ac,
+static void async_connect_unlock_and_cleanup(async_connect* ac,
                                              grpc_winsocket* socket) {
   int done = (--ac->refs == 0);
   gpr_mu_unlock(&ac->mu);
   if (done) {
-    grpc_channel_args_destroy(exec_ctx, ac->channel_args);
+    grpc_channel_args_destroy(ac->channel_args);
     gpr_mu_destroy(&ac->mu);
     gpr_free(ac->addr_name);
     gpr_free(ac);
@@ -66,7 +65,7 @@
   if (socket != NULL) grpc_winsocket_destroy(socket);
 }
 
-static void on_alarm(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+static void on_alarm(void* acp, grpc_error* error) {
   async_connect* ac = (async_connect*)acp;
   gpr_mu_lock(&ac->mu);
   grpc_winsocket* socket = ac->socket;
@@ -74,10 +73,10 @@
   if (socket != NULL) {
     grpc_winsocket_shutdown(socket);
   }
-  async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
+  async_connect_unlock_and_cleanup(ac, socket);
 }
 
-static void on_connect(grpc_exec_ctx* exec_ctx, void* acp, grpc_error* error) {
+static void on_connect(void* acp, grpc_error* error) {
   async_connect* ac = (async_connect*)acp;
   grpc_endpoint** ep = ac->endpoint;
   GPR_ASSERT(*ep == NULL);
@@ -90,7 +89,7 @@
   ac->socket = NULL;
   gpr_mu_unlock(&ac->mu);
 
-  grpc_timer_cancel(exec_ctx, &ac->alarm);
+  grpc_timer_cancel(&ac->alarm);
 
   gpr_mu_lock(&ac->mu);
 
@@ -106,8 +105,7 @@
         error = GRPC_WSA_ERROR(WSAGetLastError(), "ConnectEx");
         closesocket(socket->socket);
       } else {
-        *ep =
-            grpc_tcp_create(exec_ctx, socket, ac->channel_args, ac->addr_name);
+        *ep = grpc_tcp_create(socket, ac->channel_args, ac->addr_name);
         socket = NULL;
       }
     } else {
@@ -115,18 +113,20 @@
     }
   }
 
-  async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
+  async_connect_unlock_and_cleanup(ac, socket);
   /* If the connection was aborted, the callback was already called when
      the deadline was met. */
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
+  GRPC_CLOSURE_SCHED(on_done, error);
 }
 
 /* Tries to issue one async connection, then schedules both an IOCP
    notification request for the connection, and one timeout alert. */
-static void tcp_client_connect_impl(
-    grpc_exec_ctx* exec_ctx, grpc_closure* on_done, grpc_endpoint** endpoint,
-    grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
-    const grpc_resolved_address* addr, grpc_millis deadline) {
+static void tcp_client_connect_impl(grpc_closure* on_done,
+                                    grpc_endpoint** endpoint,
+                                    grpc_pollset_set* interested_parties,
+                                    const grpc_channel_args* channel_args,
+                                    const grpc_resolved_address* addr,
+                                    grpc_millis deadline) {
   SOCKET sock = INVALID_SOCKET;
   BOOL success;
   int status;
@@ -206,8 +206,8 @@
   GRPC_CLOSURE_INIT(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
 
   GRPC_CLOSURE_INIT(&ac->on_alarm, on_alarm, ac, grpc_schedule_on_exec_ctx);
-  grpc_timer_init(exec_ctx, &ac->alarm, deadline, &ac->on_alarm);
-  grpc_socket_notify_on_write(exec_ctx, socket, &ac->on_connect);
+  grpc_timer_init(&ac->alarm, deadline, &ac->on_alarm);
+  grpc_socket_notify_on_write(socket, &ac->on_connect);
   return;
 
 failure:
@@ -223,26 +223,23 @@
   } else if (sock != INVALID_SOCKET) {
     closesocket(sock);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, final_error);
+  GRPC_CLOSURE_SCHED(on_done, final_error);
 }
 
 // overridden by api_fuzzer.c
-extern "C" {
 void (*grpc_tcp_client_connect_impl)(
-    grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+    grpc_closure* closure, grpc_endpoint** ep,
     grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
     const grpc_resolved_address* addr,
     grpc_millis deadline) = tcp_client_connect_impl;
-}
 
-void grpc_tcp_client_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                             grpc_endpoint** ep,
+void grpc_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
                              grpc_pollset_set* interested_parties,
                              const grpc_channel_args* channel_args,
                              const grpc_resolved_address* addr,
                              grpc_millis deadline) {
-  grpc_tcp_client_connect_impl(exec_ctx, closure, ep, interested_parties,
-                               channel_args, addr, deadline);
+  grpc_tcp_client_connect_impl(closure, ep, interested_parties, channel_args,
+                               addr, deadline);
 }
 
 #endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.cc b/src/core/lib/iomgr/tcp_posix.cc
index d09cfca..816acf2 100644
--- a/src/core/lib/iomgr/tcp_posix.cc
+++ b/src/core/lib/iomgr/tcp_posix.cc
@@ -108,36 +108,31 @@
 static gpr_atm g_uncovered_notifications_pending;
 static gpr_atm g_backup_poller; /* backup_poller* */
 
-static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
-                            grpc_error* error);
-static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
-                             grpc_error* error);
-static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx* exec_ctx,
-                                                 void* arg /* grpc_tcp */,
+static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
+static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
+static void tcp_drop_uncovered_then_handle_write(void* arg /* grpc_tcp */,
                                                  grpc_error* error);
 
-static void done_poller(grpc_exec_ctx* exec_ctx, void* bp,
-                        grpc_error* error_ignored) {
+static void done_poller(void* bp, grpc_error* error_ignored) {
   backup_poller* p = (backup_poller*)bp;
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p destroy", p);
   }
-  grpc_pollset_destroy(exec_ctx, BACKUP_POLLER_POLLSET(p));
+  grpc_pollset_destroy(BACKUP_POLLER_POLLSET(p));
   gpr_free(p);
 }
 
-static void run_poller(grpc_exec_ctx* exec_ctx, void* bp,
-                       grpc_error* error_ignored) {
+static void run_poller(void* bp, grpc_error* error_ignored) {
   backup_poller* p = (backup_poller*)bp;
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p run", p);
   }
   gpr_mu_lock(p->pollset_mu);
-  grpc_millis deadline = grpc_exec_ctx_now(exec_ctx) + 13 * GPR_MS_PER_SEC;
-  GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS(exec_ctx);
+  grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 13 * GPR_MS_PER_SEC;
+  GRPC_STATS_INC_TCP_BACKUP_POLLER_POLLS();
   GRPC_LOG_IF_ERROR(
       "backup_poller:pollset_work",
-      grpc_pollset_work(exec_ctx, BACKUP_POLLER_POLLSET(p), nullptr, deadline));
+      grpc_pollset_work(BACKUP_POLLER_POLLSET(p), nullptr, deadline));
   gpr_mu_unlock(p->pollset_mu);
   /* last "uncovered" notification is the ref that keeps us polling, if we get
    * there try a cas to release it */
@@ -152,18 +147,18 @@
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p shutdown", p);
     }
-    grpc_pollset_shutdown(exec_ctx, BACKUP_POLLER_POLLSET(p),
+    grpc_pollset_shutdown(BACKUP_POLLER_POLLSET(p),
                           GRPC_CLOSURE_INIT(&p->run_poller, done_poller, p,
                                             grpc_schedule_on_exec_ctx));
   } else {
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p reschedule", p);
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, &p->run_poller, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&p->run_poller, GRPC_ERROR_NONE);
   }
 }
 
-static void drop_uncovered(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void drop_uncovered(grpc_tcp* tcp) {
   backup_poller* p = (backup_poller*)gpr_atm_acq_load(&g_backup_poller);
   gpr_atm old_count =
       gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, -1);
@@ -174,7 +169,7 @@
   GPR_ASSERT(old_count != 1);
 }
 
-static void cover_self(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void cover_self(grpc_tcp* tcp) {
   backup_poller* p;
   gpr_atm old_count =
       gpr_atm_no_barrier_fetch_add(&g_uncovered_notifications_pending, 2);
@@ -183,7 +178,7 @@
             2 + (int)old_count);
   }
   if (old_count == 0) {
-    GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED(exec_ctx);
+    GRPC_STATS_INC_TCP_BACKUP_POLLERS_CREATED();
     p = (backup_poller*)gpr_zalloc(sizeof(*p) + grpc_pollset_size());
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p create", p);
@@ -191,7 +186,6 @@
     grpc_pollset_init(BACKUP_POLLER_POLLSET(p), &p->pollset_mu);
     gpr_atm_rel_store(&g_backup_poller, (gpr_atm)p);
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_INIT(&p->run_poller, run_poller, p,
                           grpc_executor_scheduler(GRPC_EXECUTOR_LONG)),
         GRPC_ERROR_NONE);
@@ -204,39 +198,38 @@
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "BACKUP_POLLER:%p add %p", p, tcp);
   }
-  grpc_pollset_add_fd(exec_ctx, BACKUP_POLLER_POLLSET(p), tcp->em_fd);
+  grpc_pollset_add_fd(BACKUP_POLLER_POLLSET(p), tcp->em_fd);
   if (old_count != 0) {
-    drop_uncovered(exec_ctx, tcp);
+    drop_uncovered(tcp);
   }
 }
 
-static void notify_on_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void notify_on_read(grpc_tcp* tcp) {
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "TCP:%p notify_on_read", tcp);
   }
   GRPC_CLOSURE_INIT(&tcp->read_done_closure, tcp_handle_read, tcp,
                     grpc_schedule_on_exec_ctx);
-  grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_done_closure);
+  grpc_fd_notify_on_read(tcp->em_fd, &tcp->read_done_closure);
 }
 
-static void notify_on_write(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void notify_on_write(grpc_tcp* tcp) {
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "TCP:%p notify_on_write", tcp);
   }
-  cover_self(exec_ctx, tcp);
+  cover_self(tcp);
   GRPC_CLOSURE_INIT(&tcp->write_done_closure,
                     tcp_drop_uncovered_then_handle_write, tcp,
                     grpc_schedule_on_exec_ctx);
-  grpc_fd_notify_on_write(exec_ctx, tcp->em_fd, &tcp->write_done_closure);
+  grpc_fd_notify_on_write(tcp->em_fd, &tcp->write_done_closure);
 }
 
-static void tcp_drop_uncovered_then_handle_write(grpc_exec_ctx* exec_ctx,
-                                                 void* arg, grpc_error* error) {
+static void tcp_drop_uncovered_then_handle_write(void* arg, grpc_error* error) {
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "TCP:%p got_write: %s", arg, grpc_error_string(error));
   }
-  drop_uncovered(exec_ctx, (grpc_tcp*)arg);
-  tcp_handle_write(exec_ctx, arg, error);
+  drop_uncovered((grpc_tcp*)arg);
+  tcp_handle_write(arg, error);
 }
 
 static void add_to_estimate(grpc_tcp* tcp, size_t bytes) {
@@ -277,38 +270,38 @@
 
 static grpc_error* tcp_annotate_error(grpc_error* src_error, grpc_tcp* tcp) {
   return grpc_error_set_str(
-      grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
+      grpc_error_set_int(
+          grpc_error_set_int(src_error, GRPC_ERROR_INT_FD, tcp->fd),
+          /* All tcp errors are marked with UNAVAILABLE so that application may
+           * choose to retry. */
+          GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_UNAVAILABLE),
       GRPC_ERROR_STR_TARGET_ADDRESS,
       grpc_slice_from_copied_string(tcp->peer_string));
 }
 
-static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
-                            grpc_error* error);
-static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
-                             grpc_error* error);
+static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error);
+static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error);
 
-static void tcp_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                         grpc_error* why) {
+static void tcp_shutdown(grpc_endpoint* ep, grpc_error* why) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
-  grpc_fd_shutdown(exec_ctx, tcp->em_fd, why);
-  grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
+  grpc_fd_shutdown(tcp->em_fd, why);
+  grpc_resource_user_shutdown(tcp->resource_user);
 }
 
-static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
-  grpc_fd_orphan(exec_ctx, tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
+static void tcp_free(grpc_tcp* tcp) {
+  grpc_fd_orphan(tcp->em_fd, tcp->release_fd_cb, tcp->release_fd,
                  false /* already_closed */, "tcp_unref_orphan");
-  grpc_slice_buffer_destroy_internal(exec_ctx, &tcp->last_read_buffer);
-  grpc_resource_user_unref(exec_ctx, tcp->resource_user);
+  grpc_slice_buffer_destroy_internal(&tcp->last_read_buffer);
+  grpc_resource_user_unref(tcp->resource_user);
   gpr_free(tcp->peer_string);
   gpr_free(tcp);
 }
 
 #ifndef NDEBUG
-#define TCP_UNREF(cl, tcp, reason) \
-  tcp_unref((cl), (tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
-                      const char* reason, const char* file, int line) {
+static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
+                      int line) {
   if (grpc_tcp_trace.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -316,7 +309,7 @@
             val - 1);
   }
   if (gpr_unref(&tcp->refcount)) {
-    tcp_free(exec_ctx, tcp);
+    tcp_free(tcp);
   }
 }
 
@@ -331,26 +324,25 @@
   gpr_ref(&tcp->refcount);
 }
 #else
-#define TCP_UNREF(cl, tcp, reason) tcp_unref((cl), (tcp))
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
 #define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void tcp_unref(grpc_tcp* tcp) {
   if (gpr_unref(&tcp->refcount)) {
-    tcp_free(exec_ctx, tcp);
+    tcp_free(tcp);
   }
 }
 
 static void tcp_ref(grpc_tcp* tcp) { gpr_ref(&tcp->refcount); }
 #endif
 
-static void tcp_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+static void tcp_destroy(grpc_endpoint* ep) {
   grpc_network_status_unregister_endpoint(ep);
   grpc_tcp* tcp = (grpc_tcp*)ep;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer);
-  TCP_UNREF(exec_ctx, tcp, "destroy");
+  grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
+  TCP_UNREF(tcp, "destroy");
 }
 
-static void call_read_cb(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
-                         grpc_error* error) {
+static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
   grpc_closure* cb = tcp->read_cb;
 
   if (grpc_tcp_trace.enabled()) {
@@ -369,11 +361,11 @@
 
   tcp->read_cb = nullptr;
   tcp->incoming_buffer = nullptr;
-  GRPC_CLOSURE_RUN(exec_ctx, cb, error);
+  GRPC_CLOSURE_RUN(cb, error);
 }
 
 #define MAX_READ_IOVEC 4
-static void tcp_do_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void tcp_do_read(grpc_tcp* tcp) {
   struct msghdr msg;
   struct iovec iov[MAX_READ_IOVEC];
   ssize_t read_bytes;
@@ -396,12 +388,12 @@
   msg.msg_controllen = 0;
   msg.msg_flags = 0;
 
-  GRPC_STATS_INC_TCP_READ_OFFER(exec_ctx, tcp->incoming_buffer->length);
-  GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(exec_ctx, tcp->incoming_buffer->count);
+  GRPC_STATS_INC_TCP_READ_OFFER(tcp->incoming_buffer->length);
+  GRPC_STATS_INC_TCP_READ_OFFER_IOV_SIZE(tcp->incoming_buffer->count);
 
   GPR_TIMER_BEGIN("recvmsg", 0);
   do {
-    GRPC_STATS_INC_SYSCALL_READ(exec_ctx);
+    GRPC_STATS_INC_SYSCALL_READ();
     read_bytes = recvmsg(tcp->fd, &msg, 0);
   } while (read_bytes < 0 && errno == EINTR);
   GPR_TIMER_END("recvmsg", read_bytes >= 0);
@@ -412,24 +404,22 @@
     if (errno == EAGAIN) {
       finish_estimate(tcp);
       /* We've consumed the edge, request a new one */
-      notify_on_read(exec_ctx, tcp);
+      notify_on_read(tcp);
     } else {
-      grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                                 tcp->incoming_buffer);
-      call_read_cb(exec_ctx, tcp,
+      grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
+      call_read_cb(tcp,
                    tcp_annotate_error(GRPC_OS_ERROR(errno, "recvmsg"), tcp));
-      TCP_UNREF(exec_ctx, tcp, "read");
+      TCP_UNREF(tcp, "read");
     }
   } else if (read_bytes == 0) {
     /* 0 read size ==> end of stream */
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
+    grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
     call_read_cb(
-        exec_ctx, tcp,
-        tcp_annotate_error(
-            GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
-    TCP_UNREF(exec_ctx, tcp, "read");
+        tcp, tcp_annotate_error(
+                 GRPC_ERROR_CREATE_FROM_STATIC_STRING("Socket closed"), tcp));
+    TCP_UNREF(tcp, "read");
   } else {
-    GRPC_STATS_INC_TCP_READ_SIZE(exec_ctx, read_bytes);
+    GRPC_STATS_INC_TCP_READ_SIZE(read_bytes);
     add_to_estimate(tcp, (size_t)read_bytes);
     GPR_ASSERT((size_t)read_bytes <= tcp->incoming_buffer->length);
     if ((size_t)read_bytes < tcp->incoming_buffer->length) {
@@ -439,50 +429,47 @@
           &tcp->last_read_buffer);
     }
     GPR_ASSERT((size_t)read_bytes == tcp->incoming_buffer->length);
-    call_read_cb(exec_ctx, tcp, GRPC_ERROR_NONE);
-    TCP_UNREF(exec_ctx, tcp, "read");
+    call_read_cb(tcp, GRPC_ERROR_NONE);
+    TCP_UNREF(tcp, "read");
   }
 
   GPR_TIMER_END("tcp_continue_read", 0);
 }
 
-static void tcp_read_allocation_done(grpc_exec_ctx* exec_ctx, void* tcpp,
-                                     grpc_error* error) {
+static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
   grpc_tcp* tcp = (grpc_tcp*)tcpp;
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "TCP:%p read_allocation_done: %s", tcp,
             grpc_error_string(error));
   }
   if (error != GRPC_ERROR_NONE) {
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                               &tcp->last_read_buffer);
-    call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
-    TCP_UNREF(exec_ctx, tcp, "read");
+    grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
+    grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
+    call_read_cb(tcp, GRPC_ERROR_REF(error));
+    TCP_UNREF(tcp, "read");
   } else {
-    tcp_do_read(exec_ctx, tcp);
+    tcp_do_read(tcp);
   }
 }
 
-static void tcp_continue_read(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void tcp_continue_read(grpc_tcp* tcp) {
   size_t target_read_size = get_target_read_size(tcp);
   if (tcp->incoming_buffer->length < target_read_size &&
       tcp->incoming_buffer->count < MAX_READ_IOVEC) {
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "TCP:%p alloc_slices", tcp);
     }
-    grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
-                                    target_read_size, 1, tcp->incoming_buffer);
+    grpc_resource_user_alloc_slices(&tcp->slice_allocator, target_read_size, 1,
+                                    tcp->incoming_buffer);
   } else {
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "TCP:%p do_read", tcp);
     }
-    tcp_do_read(exec_ctx, tcp);
+    tcp_do_read(tcp);
   }
 }
 
-static void tcp_handle_read(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
-                            grpc_error* error) {
+static void tcp_handle_read(void* arg /* grpc_tcp */, grpc_error* error) {
   grpc_tcp* tcp = (grpc_tcp*)arg;
   GPR_ASSERT(!tcp->finished_edge);
   if (grpc_tcp_trace.enabled()) {
@@ -490,37 +477,35 @@
   }
 
   if (error != GRPC_ERROR_NONE) {
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->incoming_buffer);
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                               &tcp->last_read_buffer);
-    call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
-    TCP_UNREF(exec_ctx, tcp, "read");
+    grpc_slice_buffer_reset_and_unref_internal(tcp->incoming_buffer);
+    grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
+    call_read_cb(tcp, GRPC_ERROR_REF(error));
+    TCP_UNREF(tcp, "read");
   } else {
-    tcp_continue_read(exec_ctx, tcp);
+    tcp_continue_read(tcp);
   }
 }
 
-static void tcp_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                     grpc_slice_buffer* incoming_buffer, grpc_closure* cb) {
+static void tcp_read(grpc_endpoint* ep, grpc_slice_buffer* incoming_buffer,
+                     grpc_closure* cb) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   GPR_ASSERT(tcp->read_cb == nullptr);
   tcp->read_cb = cb;
   tcp->incoming_buffer = incoming_buffer;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, incoming_buffer);
+  grpc_slice_buffer_reset_and_unref_internal(incoming_buffer);
   grpc_slice_buffer_swap(incoming_buffer, &tcp->last_read_buffer);
   TCP_REF(tcp, "read");
   if (tcp->finished_edge) {
     tcp->finished_edge = false;
-    notify_on_read(exec_ctx, tcp);
+    notify_on_read(tcp);
   } else {
-    GRPC_CLOSURE_SCHED(exec_ctx, &tcp->read_done_closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&tcp->read_done_closure, GRPC_ERROR_NONE);
   }
 }
 
 /* returns true if done, false if pending; if returning true, *error is set */
 #define MAX_WRITE_IOVEC 1000
-static bool tcp_flush(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
-                      grpc_error** error) {
+static bool tcp_flush(grpc_tcp* tcp, grpc_error** error) {
   struct msghdr msg;
   struct iovec iov[MAX_WRITE_IOVEC];
   msg_iovlen_type iov_size;
@@ -562,13 +547,13 @@
     msg.msg_controllen = 0;
     msg.msg_flags = 0;
 
-    GRPC_STATS_INC_TCP_WRITE_SIZE(exec_ctx, sending_length);
-    GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(exec_ctx, iov_size);
+    GRPC_STATS_INC_TCP_WRITE_SIZE(sending_length);
+    GRPC_STATS_INC_TCP_WRITE_IOV_SIZE(iov_size);
 
     GPR_TIMER_BEGIN("sendmsg", 1);
     do {
       /* TODO(klempner): Cork if this is a partial write */
-      GRPC_STATS_INC_SYSCALL_WRITE(exec_ctx);
+      GRPC_STATS_INC_SYSCALL_WRITE();
       sent_length = sendmsg(tcp->fd, &msg, SENDMSG_FLAGS);
     } while (sent_length < 0 && errno == EINTR);
     GPR_TIMER_END("sendmsg", 0);
@@ -580,20 +565,16 @@
         // point
         for (size_t idx = 0; idx < unwind_slice_idx; ++idx) {
           grpc_slice_unref_internal(
-              exec_ctx, grpc_slice_buffer_take_first(tcp->outgoing_buffer));
+              grpc_slice_buffer_take_first(tcp->outgoing_buffer));
         }
         return false;
       } else if (errno == EPIPE) {
-        *error = grpc_error_set_int(GRPC_OS_ERROR(errno, "sendmsg"),
-                                    GRPC_ERROR_INT_GRPC_STATUS,
-                                    GRPC_STATUS_UNAVAILABLE);
-        grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                                   tcp->outgoing_buffer);
+        *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
+        grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
         return true;
       } else {
         *error = tcp_annotate_error(GRPC_OS_ERROR(errno, "sendmsg"), tcp);
-        grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                                   tcp->outgoing_buffer);
+        grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
         return true;
       }
     }
@@ -616,31 +597,29 @@
 
     if (outgoing_slice_idx == tcp->outgoing_buffer->count) {
       *error = GRPC_ERROR_NONE;
-      grpc_slice_buffer_reset_and_unref_internal(exec_ctx,
-                                                 tcp->outgoing_buffer);
+      grpc_slice_buffer_reset_and_unref_internal(tcp->outgoing_buffer);
       return true;
     }
   }
 }
 
-static void tcp_handle_write(grpc_exec_ctx* exec_ctx, void* arg /* grpc_tcp */,
-                             grpc_error* error) {
+static void tcp_handle_write(void* arg /* grpc_tcp */, grpc_error* error) {
   grpc_tcp* tcp = (grpc_tcp*)arg;
   grpc_closure* cb;
 
   if (error != GRPC_ERROR_NONE) {
     cb = tcp->write_cb;
     tcp->write_cb = nullptr;
-    cb->cb(exec_ctx, cb->cb_arg, error);
-    TCP_UNREF(exec_ctx, tcp, "write");
+    cb->cb(cb->cb_arg, error);
+    TCP_UNREF(tcp, "write");
     return;
   }
 
-  if (!tcp_flush(exec_ctx, tcp, &error)) {
+  if (!tcp_flush(tcp, &error)) {
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "write: delayed");
     }
-    notify_on_write(exec_ctx, tcp);
+    notify_on_write(tcp);
   } else {
     cb = tcp->write_cb;
     tcp->write_cb = nullptr;
@@ -649,13 +628,13 @@
       gpr_log(GPR_DEBUG, "write: %s", str);
     }
 
-    GRPC_CLOSURE_RUN(exec_ctx, cb, error);
-    TCP_UNREF(exec_ctx, tcp, "write");
+    GRPC_CLOSURE_RUN(cb, error);
+    TCP_UNREF(tcp, "write");
   }
 }
 
-static void tcp_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                      grpc_slice_buffer* buf, grpc_closure* cb) {
+static void tcp_write(grpc_endpoint* ep, grpc_slice_buffer* buf,
+                      grpc_closure* cb) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   grpc_error* error = GRPC_ERROR_NONE;
 
@@ -676,51 +655,48 @@
   if (buf->length == 0) {
     GPR_TIMER_END("tcp_write", 0);
     GRPC_CLOSURE_SCHED(
-        exec_ctx, cb,
-        grpc_fd_is_shutdown(tcp->em_fd)
-            ? tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"),
-                                 tcp)
-            : GRPC_ERROR_NONE);
+        cb, grpc_fd_is_shutdown(tcp->em_fd)
+                ? tcp_annotate_error(
+                      GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp)
+                : GRPC_ERROR_NONE);
     return;
   }
   tcp->outgoing_buffer = buf;
   tcp->outgoing_byte_idx = 0;
 
-  if (!tcp_flush(exec_ctx, tcp, &error)) {
+  if (!tcp_flush(tcp, &error)) {
     TCP_REF(tcp, "write");
     tcp->write_cb = cb;
     if (grpc_tcp_trace.enabled()) {
       gpr_log(GPR_DEBUG, "write: delayed");
     }
-    notify_on_write(exec_ctx, tcp);
+    notify_on_write(tcp);
   } else {
     if (grpc_tcp_trace.enabled()) {
       const char* str = grpc_error_string(error);
       gpr_log(GPR_DEBUG, "write: %s", str);
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
+    GRPC_CLOSURE_SCHED(cb, error);
   }
 
   GPR_TIMER_END("tcp_write", 0);
 }
 
-static void tcp_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                               grpc_pollset* pollset) {
+static void tcp_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
-  grpc_pollset_add_fd(exec_ctx, pollset, tcp->em_fd);
+  grpc_pollset_add_fd(pollset, tcp->em_fd);
 }
 
-static void tcp_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+static void tcp_add_to_pollset_set(grpc_endpoint* ep,
                                    grpc_pollset_set* pollset_set) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
-  grpc_pollset_set_add_fd(exec_ctx, pollset_set, tcp->em_fd);
+  grpc_pollset_set_add_fd(pollset_set, tcp->em_fd);
 }
 
-static void tcp_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_endpoint* ep,
+static void tcp_delete_from_pollset_set(grpc_endpoint* ep,
                                         grpc_pollset_set* pollset_set) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
-  grpc_pollset_set_del_fd(exec_ctx, pollset_set, tcp->em_fd);
+  grpc_pollset_set_del_fd(pollset_set, tcp->em_fd);
 }
 
 static char* tcp_get_peer(grpc_endpoint* ep) {
@@ -751,7 +727,7 @@
 
 #define MAX_CHUNK_SIZE 32 * 1024 * 1024
 
-grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_fd* em_fd,
+grpc_endpoint* grpc_tcp_create(grpc_fd* em_fd,
                                const grpc_channel_args* channel_args,
                                const char* peer_string) {
   int tcp_read_chunk_size = GRPC_TCP_DEFAULT_READ_SLICE_SIZE;
@@ -780,7 +756,7 @@
             grpc_channel_arg_get_integer(&channel_args->args[i], options);
       } else if (0 ==
                  strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
-        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+        grpc_resource_quota_unref_internal(resource_quota);
         resource_quota = grpc_resource_quota_ref_internal(
             (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
       }
@@ -817,7 +793,7 @@
       &tcp->slice_allocator, tcp->resource_user, tcp_read_allocation_done, tcp);
   /* Tell network status tracker about new endpoint */
   grpc_network_status_register_endpoint(&tcp->base);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
 
   return &tcp->base;
 }
@@ -828,15 +804,15 @@
   return grpc_fd_wrapped_fd(tcp->em_fd);
 }
 
-void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                     int* fd, grpc_closure* done) {
+void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
+                                     grpc_closure* done) {
   grpc_network_status_unregister_endpoint(ep);
   grpc_tcp* tcp = (grpc_tcp*)ep;
   GPR_ASSERT(ep->vtable == &vtable);
   tcp->release_fd = fd;
   tcp->release_fd_cb = done;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &tcp->last_read_buffer);
-  TCP_UNREF(exec_ctx, tcp, "destroy");
+  grpc_slice_buffer_reset_and_unref_internal(&tcp->last_read_buffer);
+  TCP_UNREF(tcp, "destroy");
 }
 
 #endif
diff --git a/src/core/lib/iomgr/tcp_posix.h b/src/core/lib/iomgr/tcp_posix.h
index ba85146..4529c02 100644
--- a/src/core/lib/iomgr/tcp_posix.h
+++ b/src/core/lib/iomgr/tcp_posix.h
@@ -33,16 +33,11 @@
 #include "src/core/lib/iomgr/endpoint.h"
 #include "src/core/lib/iomgr/ev_posix.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern grpc_core::TraceFlag grpc_tcp_trace;
 
 /* Create a tcp endpoint given a file desciptor and a read slice size.
    Takes ownership of fd. */
-grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               const grpc_channel_args* args,
+grpc_endpoint* grpc_tcp_create(grpc_fd* fd, const grpc_channel_args* args,
                                const char* peer_string);
 
 /* Return the tcp endpoint's fd, or -1 if this is not available. Does not
@@ -54,11 +49,7 @@
 /* Destroy the tcp endpoint without closing its fd. *fd will be set and done
  * will be called when the endpoint is destroyed.
  * Requires: ep must be a tcp endpoint and fd must not be NULL. */
-void grpc_tcp_destroy_and_release_fd(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                     int* fd, grpc_closure* done);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_tcp_destroy_and_release_fd(grpc_endpoint* ep, int* fd,
+                                     grpc_closure* done);
 
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_server.h b/src/core/lib/iomgr/tcp_server.h
index ef98319..038c765 100644
--- a/src/core/lib/iomgr/tcp_server.h
+++ b/src/core/lib/iomgr/tcp_server.h
@@ -25,10 +25,6 @@
 #include "src/core/lib/iomgr/endpoint.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Forward decl of grpc_tcp_server */
 typedef struct grpc_tcp_server grpc_tcp_server;
 
@@ -43,22 +39,20 @@
 
 /* Called for newly connected TCP connections.
    Takes ownership of acceptor. */
-typedef void (*grpc_tcp_server_cb)(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_endpoint* ep,
+typedef void (*grpc_tcp_server_cb)(void* arg, grpc_endpoint* ep,
                                    grpc_pollset* accepting_pollset,
                                    grpc_tcp_server_acceptor* acceptor);
 
 /* Create a server, initially not bound to any ports. The caller owns one ref.
    If shutdown_complete is not NULL, it will be used by
    grpc_tcp_server_unref() when the ref count reaches zero. */
-grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
-                                   grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
                                    const grpc_channel_args* args,
                                    grpc_tcp_server** server);
 
 /* Start listening to bound ports */
-void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* server,
-                           grpc_pollset** pollsets, size_t pollset_count,
+void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
+                           size_t pollset_count,
                            grpc_tcp_server_cb on_accept_cb, void* cb_arg);
 
 /* Add a port to the server, returning the newly allocated port on success, or
@@ -96,14 +90,9 @@
 
 /* If the refcount drops to zero, enqueue calls on exec_ctx to
    shutdown_listeners and delete s. */
-void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s);
+void grpc_tcp_server_unref(grpc_tcp_server* s);
 
 /* Shutdown the fds of listeners. */
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
-                                        grpc_tcp_server* s);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s);
 
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_H */
diff --git a/src/core/lib/iomgr/tcp_server_posix.cc b/src/core/lib/iomgr/tcp_server_posix.cc
index 6fed13c..99e1c6c 100644
--- a/src/core/lib/iomgr/tcp_server_posix.cc
+++ b/src/core/lib/iomgr/tcp_server_posix.cc
@@ -68,8 +68,7 @@
 #endif
 }
 
-grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
-                                   grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
                                    const grpc_channel_args* args,
                                    grpc_tcp_server** server) {
   gpr_once_init(&check_init, init);
@@ -116,12 +115,12 @@
   return GRPC_ERROR_NONE;
 }
 
-static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+static void finish_shutdown(grpc_tcp_server* s) {
   gpr_mu_lock(&s->mu);
   GPR_ASSERT(s->shutdown);
   gpr_mu_unlock(&s->mu);
   if (s->shutdown_complete != nullptr) {
-    GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   gpr_mu_destroy(&s->mu);
@@ -131,19 +130,18 @@
     s->head = sp->next;
     gpr_free(sp);
   }
-  grpc_channel_args_destroy(exec_ctx, s->channel_args);
+  grpc_channel_args_destroy(s->channel_args);
 
   gpr_free(s);
 }
 
-static void destroyed_port(grpc_exec_ctx* exec_ctx, void* server,
-                           grpc_error* error) {
+static void destroyed_port(void* server, grpc_error* error) {
   grpc_tcp_server* s = (grpc_tcp_server*)server;
   gpr_mu_lock(&s->mu);
   s->destroyed_ports++;
   if (s->destroyed_ports == s->nports) {
     gpr_mu_unlock(&s->mu);
-    finish_shutdown(exec_ctx, s);
+    finish_shutdown(s);
   } else {
     GPR_ASSERT(s->destroyed_ports < s->nports);
     gpr_mu_unlock(&s->mu);
@@ -153,7 +151,7 @@
 /* called when all listening endpoints have been shutdown, so no further
    events will be received on them - at this point it's safe to destroy
    things */
-static void deactivated_all_ports(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+static void deactivated_all_ports(grpc_tcp_server* s) {
   /* delete ALL the things */
   gpr_mu_lock(&s->mu);
 
@@ -165,17 +163,17 @@
       grpc_unlink_if_unix_domain_socket(&sp->addr);
       GRPC_CLOSURE_INIT(&sp->destroyed_closure, destroyed_port, s,
                         grpc_schedule_on_exec_ctx);
-      grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, nullptr,
+      grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, nullptr,
                      false /* already_closed */, "tcp_listener_shutdown");
     }
     gpr_mu_unlock(&s->mu);
   } else {
     gpr_mu_unlock(&s->mu);
-    finish_shutdown(exec_ctx, s);
+    finish_shutdown(s);
   }
 }
 
-static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+static void tcp_server_destroy(grpc_tcp_server* s) {
   gpr_mu_lock(&s->mu);
 
   GPR_ASSERT(!s->shutdown);
@@ -186,18 +184,17 @@
     grpc_tcp_listener* sp;
     for (sp = s->head; sp; sp = sp->next) {
       grpc_fd_shutdown(
-          exec_ctx, sp->emfd,
-          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
+          sp->emfd, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server destroyed"));
     }
     gpr_mu_unlock(&s->mu);
   } else {
     gpr_mu_unlock(&s->mu);
-    deactivated_all_ports(exec_ctx, s);
+    deactivated_all_ports(s);
   }
 }
 
 /* event manager callback when reads are ready */
-static void on_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* err) {
+static void on_read(void* arg, grpc_error* err) {
   grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
   grpc_pollset* read_notifier_pollset;
   if (err != GRPC_ERROR_NONE) {
@@ -223,7 +220,7 @@
         case EINTR:
           continue;
         case EAGAIN:
-          grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+          grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
           return;
         default:
           gpr_mu_lock(&sp->server->mu);
@@ -249,7 +246,7 @@
 
     grpc_fd* fdobj = grpc_fd_create(fd, name);
 
-    grpc_pollset_add_fd(exec_ctx, read_notifier_pollset, fdobj);
+    grpc_pollset_add_fd(read_notifier_pollset, fdobj);
 
     // Create acceptor.
     grpc_tcp_server_acceptor* acceptor =
@@ -259,8 +256,8 @@
     acceptor->fd_index = sp->fd_index;
 
     sp->server->on_accept_cb(
-        exec_ctx, sp->server->on_accept_cb_arg,
-        grpc_tcp_create(exec_ctx, fdobj, sp->server->channel_args, addr_str),
+        sp->server->on_accept_cb_arg,
+        grpc_tcp_create(fdobj, sp->server->channel_args, addr_str),
         read_notifier_pollset, acceptor);
 
     gpr_free(name);
@@ -273,7 +270,7 @@
   gpr_mu_lock(&sp->server->mu);
   if (0 == --sp->server->active_ports && sp->server->shutdown) {
     gpr_mu_unlock(&sp->server->mu);
-    deactivated_all_ports(exec_ctx, sp->server);
+    deactivated_all_ports(sp->server);
   } else {
     gpr_mu_unlock(&sp->server->mu);
   }
@@ -483,8 +480,8 @@
   return -1;
 }
 
-void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s,
-                           grpc_pollset** pollsets, size_t pollset_count,
+void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollsets,
+                           size_t pollset_count,
                            grpc_tcp_server_cb on_accept_cb,
                            void* on_accept_cb_arg) {
   size_t i;
@@ -504,20 +501,20 @@
       GPR_ASSERT(GRPC_LOG_IF_ERROR(
           "clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
       for (i = 0; i < pollset_count; i++) {
-        grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
+        grpc_pollset_add_fd(pollsets[i], sp->emfd);
         GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
                           grpc_schedule_on_exec_ctx);
-        grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+        grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
         s->active_ports++;
         sp = sp->next;
       }
     } else {
       for (i = 0; i < pollset_count; i++) {
-        grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
+        grpc_pollset_add_fd(pollsets[i], sp->emfd);
       }
       GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
                         grpc_schedule_on_exec_ctx);
-      grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+      grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
       s->active_ports++;
       sp = sp->next;
     }
@@ -538,25 +535,24 @@
   gpr_mu_unlock(&s->mu);
 }
 
-void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+void grpc_tcp_server_unref(grpc_tcp_server* s) {
   if (gpr_unref(&s->refs)) {
-    grpc_tcp_server_shutdown_listeners(exec_ctx, s);
+    grpc_tcp_server_shutdown_listeners(s);
     gpr_mu_lock(&s->mu);
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
+    GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
     gpr_mu_unlock(&s->mu);
-    tcp_server_destroy(exec_ctx, s);
+    tcp_server_destroy(s);
   }
 }
 
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
-                                        grpc_tcp_server* s) {
+void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {
   gpr_mu_lock(&s->mu);
   s->shutdown_listeners = true;
   /* shutdown all fd's */
   if (s->active_ports) {
     grpc_tcp_listener* sp;
     for (sp = s->head; sp; sp = sp->next) {
-      grpc_fd_shutdown(exec_ctx, sp->emfd,
+      grpc_fd_shutdown(sp->emfd,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown"));
     }
   }
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix.h b/src/core/lib/iomgr/tcp_server_utils_posix.h
index 608fba3..6046f25 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix.h
+++ b/src/core/lib/iomgr/tcp_server_utils_posix.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/iomgr/socket_utils_posix.h"
 #include "src/core/lib/iomgr/tcp_server.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* one listening port */
 typedef struct grpc_tcp_listener {
   int fd;
@@ -121,8 +117,4 @@
 /* Ruturn true if the platform supports ifaddrs */
 bool grpc_tcp_server_have_ifaddrs(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_SERVER_UTILS_POSIX_H */
diff --git a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
index 72443cc..5139760 100644
--- a/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
+++ b/src/core/lib/iomgr/tcp_server_utils_posix_common.cc
@@ -55,7 +55,7 @@
   if (fgets(buf, sizeof buf, fp)) {
     char* end;
     long i = strtol(buf, &end, 10);
-    if (i > 0 && i <= INT_MAX && end && *end == 0) {
+    if (i > 0 && i <= INT_MAX && end && *end == '\n') {
       n = (int)i;
     }
   }
diff --git a/src/core/lib/iomgr/tcp_server_uv.cc b/src/core/lib/iomgr/tcp_server_uv.cc
index ffadf0b..1ac4919 100644
--- a/src/core/lib/iomgr/tcp_server_uv.cc
+++ b/src/core/lib/iomgr/tcp_server_uv.cc
@@ -73,8 +73,7 @@
   grpc_resource_quota* resource_quota;
 };
 
-grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
-                                   grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
                                    const grpc_channel_args* args,
                                    grpc_tcp_server** server) {
   grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
@@ -82,11 +81,11 @@
   for (size_t i = 0; i < (args == NULL ? 0 : args->num_args); i++) {
     if (0 == strcmp(GRPC_ARG_RESOURCE_QUOTA, args->args[i].key)) {
       if (args->args[i].type == GRPC_ARG_POINTER) {
-        grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
+        grpc_resource_quota_unref_internal(s->resource_quota);
         s->resource_quota = grpc_resource_quota_ref_internal(
             (grpc_resource_quota*)args->args[i].value.pointer.p);
       } else {
-        grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
+        grpc_resource_quota_unref_internal(s->resource_quota);
         gpr_free(s);
         return GRPC_ERROR_CREATE_FROM_STATIC_STRING(
             GRPC_ARG_RESOURCE_QUOTA " must be a pointer to a buffer pool");
@@ -119,10 +118,10 @@
                            GRPC_ERROR_NONE);
 }
 
-static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+static void finish_shutdown(grpc_tcp_server* s) {
   GPR_ASSERT(s->shutdown);
   if (s->shutdown_complete != NULL) {
-    GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   while (s->head) {
@@ -132,18 +131,17 @@
     gpr_free(sp->handle);
     gpr_free(sp);
   }
-  grpc_resource_quota_unref_internal(exec_ctx, s->resource_quota);
+  grpc_resource_quota_unref_internal(s->resource_quota);
   gpr_free(s);
 }
 
 static void handle_close_callback(uv_handle_t* handle) {
   grpc_tcp_listener* sp = (grpc_tcp_listener*)handle->data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   sp->server->open_ports--;
   if (sp->server->open_ports == 0 && sp->server->shutdown) {
-    finish_shutdown(&exec_ctx, sp->server);
+    finish_shutdown(sp->server);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void close_listener(grpc_tcp_listener* sp) {
@@ -153,7 +151,7 @@
   }
 }
 
-static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+static void tcp_server_destroy(grpc_tcp_server* s) {
   int immediately_done = 0;
   grpc_tcp_listener* sp;
 
@@ -168,28 +166,22 @@
   }
 
   if (immediately_done) {
-    finish_shutdown(exec_ctx, s);
+    finish_shutdown(s);
   }
 }
 
-void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+void grpc_tcp_server_unref(grpc_tcp_server* s) {
   GRPC_UV_ASSERT_SAME_THREAD();
   if (gpr_unref(&s->refs)) {
     /* Complete shutdown_starting work before destroying. */
-    grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
-    GRPC_CLOSURE_LIST_SCHED(&local_exec_ctx, &s->shutdown_starting);
-    if (exec_ctx == NULL) {
-      grpc_exec_ctx_flush(&local_exec_ctx);
-      tcp_server_destroy(&local_exec_ctx, s);
-      grpc_exec_ctx_finish(&local_exec_ctx);
-    } else {
-      grpc_exec_ctx_finish(&local_exec_ctx);
-      tcp_server_destroy(exec_ctx, s);
-    }
+    grpc_core::ExecCtx exec_ctx;
+    GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
+    grpc_core::ExecCtx::Get()->Flush();
+    tcp_server_destroy(s);
   }
 }
 
-static void finish_accept(grpc_exec_ctx* exec_ctx, grpc_tcp_listener* sp) {
+static void finish_accept(grpc_tcp_listener* sp) {
   grpc_tcp_server_acceptor* acceptor =
       (grpc_tcp_server_acceptor*)gpr_malloc(sizeof(*acceptor));
   uv_tcp_t* client = NULL;
@@ -225,14 +217,13 @@
   acceptor->from_server = sp->server;
   acceptor->port_index = sp->port_index;
   acceptor->fd_index = 0;
-  sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
-                           acceptor);
+  sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor);
   gpr_free(peer_name_string);
 }
 
 static void on_connect(uv_stream_t* server, int status) {
   grpc_tcp_listener* sp = (grpc_tcp_listener*)server->data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   if (status < 0) {
     switch (status) {
@@ -253,11 +244,10 @@
 
   // Create acceptor.
   if (sp->server->on_accept_cb) {
-    finish_accept(&exec_ctx, sp);
+    finish_accept(sp);
   } else {
     sp->has_pending_connection = true;
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static grpc_error* add_addr_to_server(grpc_tcp_server* s,
@@ -454,8 +444,8 @@
   return error;
 }
 
-void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* server,
-                           grpc_pollset** pollsets, size_t pollset_count,
+void grpc_tcp_server_start(grpc_tcp_server* server, grpc_pollset** pollsets,
+                           size_t pollset_count,
                            grpc_tcp_server_cb on_accept_cb, void* cb_arg) {
   grpc_tcp_listener* sp;
   (void)pollsets;
@@ -470,13 +460,12 @@
   server->on_accept_cb_arg = cb_arg;
   for (sp = server->head; sp; sp = sp->next) {
     if (sp->has_pending_connection) {
-      finish_accept(exec_ctx, sp);
+      finish_accept(sp);
       sp->has_pending_connection = false;
     }
   }
 }
 
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
-                                        grpc_tcp_server* s) {}
+void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
 
 #endif /* GRPC_UV */
diff --git a/src/core/lib/iomgr/tcp_server_windows.cc b/src/core/lib/iomgr/tcp_server_windows.cc
index f538194..8a30dfd 100644
--- a/src/core/lib/iomgr/tcp_server_windows.cc
+++ b/src/core/lib/iomgr/tcp_server_windows.cc
@@ -94,8 +94,7 @@
 
 /* Public function. Allocates the proper data structures to hold a
    grpc_tcp_server. */
-grpc_error* grpc_tcp_server_create(grpc_exec_ctx* exec_ctx,
-                                   grpc_closure* shutdown_complete,
+grpc_error* grpc_tcp_server_create(grpc_closure* shutdown_complete,
                                    const grpc_channel_args* args,
                                    grpc_tcp_server** server) {
   grpc_tcp_server* s = (grpc_tcp_server*)gpr_malloc(sizeof(grpc_tcp_server));
@@ -114,8 +113,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_server(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
+static void destroy_server(void* arg, grpc_error* error) {
   grpc_tcp_server* s = (grpc_tcp_server*)arg;
 
   /* Now that the accepts have been aborted, we can destroy the sockets.
@@ -128,18 +126,16 @@
     grpc_winsocket_destroy(sp->socket);
     gpr_free(sp);
   }
-  grpc_channel_args_destroy(exec_ctx, s->channel_args);
+  grpc_channel_args_destroy(s->channel_args);
   gpr_free(s);
 }
 
-static void finish_shutdown_locked(grpc_exec_ctx* exec_ctx,
-                                   grpc_tcp_server* s) {
+static void finish_shutdown_locked(grpc_tcp_server* s) {
   if (s->shutdown_complete != NULL) {
-    GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_CREATE(destroy_server, s, grpc_schedule_on_exec_ctx),
       GRPC_ERROR_NONE);
 }
@@ -157,14 +153,14 @@
   gpr_mu_unlock(&s->mu);
 }
 
-static void tcp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+static void tcp_server_destroy(grpc_tcp_server* s) {
   grpc_tcp_listener* sp;
   gpr_mu_lock(&s->mu);
 
   /* First, shutdown all fd's. This will queue abortion calls for all
      of the pending accepts due to the normal operation mechanism. */
   if (s->active_ports == 0) {
-    finish_shutdown_locked(exec_ctx, s);
+    finish_shutdown_locked(s);
   } else {
     for (sp = s->head; sp; sp = sp->next) {
       sp->shutting_down = 1;
@@ -174,13 +170,13 @@
   gpr_mu_unlock(&s->mu);
 }
 
-void grpc_tcp_server_unref(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s) {
+void grpc_tcp_server_unref(grpc_tcp_server* s) {
   if (gpr_unref(&s->refs)) {
-    grpc_tcp_server_shutdown_listeners(exec_ctx, s);
+    grpc_tcp_server_shutdown_listeners(s);
     gpr_mu_lock(&s->mu);
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx, &s->shutdown_starting);
+    GRPC_CLOSURE_LIST_SCHED(&s->shutdown_starting);
     gpr_mu_unlock(&s->mu);
-    tcp_server_destroy(exec_ctx, s);
+    tcp_server_destroy(s);
   }
 }
 
@@ -234,19 +230,17 @@
   return error;
 }
 
-static void decrement_active_ports_and_notify_locked(grpc_exec_ctx* exec_ctx,
-                                                     grpc_tcp_listener* sp) {
+static void decrement_active_ports_and_notify_locked(grpc_tcp_listener* sp) {
   sp->shutting_down = 0;
   GPR_ASSERT(sp->server->active_ports > 0);
   if (0 == --sp->server->active_ports) {
-    finish_shutdown_locked(exec_ctx, sp->server);
+    finish_shutdown_locked(sp->server);
   }
 }
 
 /* In order to do an async accept, we need to create a socket first which
    will be the one assigned to the new incoming connection. */
-static grpc_error* start_accept_locked(grpc_exec_ctx* exec_ctx,
-                                       grpc_tcp_listener* port) {
+static grpc_error* start_accept_locked(grpc_tcp_listener* port) {
   SOCKET sock = INVALID_SOCKET;
   BOOL success;
   DWORD addrlen = sizeof(struct sockaddr_in6) + 16;
@@ -285,7 +279,7 @@
   /* We're ready to do the accept. Calling grpc_socket_notify_on_read may
      immediately process an accept that happened in the meantime. */
   port->new_socket = sock;
-  grpc_socket_notify_on_read(exec_ctx, port->socket, &port->on_accept);
+  grpc_socket_notify_on_read(port->socket, &port->on_accept);
   port->outstanding_calls++;
   return error;
 
@@ -296,7 +290,7 @@
 }
 
 /* Event manager callback when reads are ready. */
-static void on_accept(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_accept(void* arg, grpc_error* error) {
   grpc_tcp_listener* sp = (grpc_tcp_listener*)arg;
   SOCKET sock = sp->new_socket;
   grpc_winsocket_callback_info* info = &sp->socket->read_info;
@@ -357,7 +351,7 @@
         gpr_free(utf8_message);
       }
       gpr_asprintf(&fd_name, "tcp_server:%s", peer_name_string);
-      ep = grpc_tcp_create(exec_ctx, grpc_winsocket_create(sock, fd_name),
+      ep = grpc_tcp_create(grpc_winsocket_create(sock, fd_name),
                            sp->server->channel_args, peer_name_string);
       gpr_free(fd_name);
       gpr_free(peer_name_string);
@@ -375,17 +369,15 @@
     acceptor->from_server = sp->server;
     acceptor->port_index = sp->port_index;
     acceptor->fd_index = 0;
-    sp->server->on_accept_cb(exec_ctx, sp->server->on_accept_cb_arg, ep, NULL,
-                             acceptor);
+    sp->server->on_accept_cb(sp->server->on_accept_cb_arg, ep, NULL, acceptor);
   }
   /* As we were notified from the IOCP of one and exactly one accept,
      the former socked we created has now either been destroy or assigned
      to the new connection. We need to create a new one for the next
      connection. */
-  GPR_ASSERT(
-      GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
+  GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp)));
   if (0 == --sp->outstanding_calls) {
-    decrement_active_ports_and_notify_locked(exec_ctx, sp);
+    decrement_active_ports_and_notify_locked(sp);
   }
   gpr_mu_unlock(&sp->server->mu);
 }
@@ -522,8 +514,8 @@
   return error;
 }
 
-void grpc_tcp_server_start(grpc_exec_ctx* exec_ctx, grpc_tcp_server* s,
-                           grpc_pollset** pollset, size_t pollset_count,
+void grpc_tcp_server_start(grpc_tcp_server* s, grpc_pollset** pollset,
+                           size_t pollset_count,
                            grpc_tcp_server_cb on_accept_cb,
                            void* on_accept_cb_arg) {
   grpc_tcp_listener* sp;
@@ -534,14 +526,12 @@
   s->on_accept_cb = on_accept_cb;
   s->on_accept_cb_arg = on_accept_cb_arg;
   for (sp = s->head; sp; sp = sp->next) {
-    GPR_ASSERT(
-        GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(exec_ctx, sp)));
+    GPR_ASSERT(GRPC_LOG_IF_ERROR("start_accept", start_accept_locked(sp)));
     s->active_ports++;
   }
   gpr_mu_unlock(&s->mu);
 }
 
-void grpc_tcp_server_shutdown_listeners(grpc_exec_ctx* exec_ctx,
-                                        grpc_tcp_server* s) {}
+void grpc_tcp_server_shutdown_listeners(grpc_tcp_server* s) {}
 
 #endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_uv.cc b/src/core/lib/iomgr/tcp_uv.cc
index 327bd3d..c227f5a 100644
--- a/src/core/lib/iomgr/tcp_uv.cc
+++ b/src/core/lib/iomgr/tcp_uv.cc
@@ -77,18 +77,17 @@
 }
 
 static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
-  grpc_resource_user_unref(exec_ctx, tcp->resource_user);
+  grpc_resource_user_unref(tcp->resource_user);
   gpr_free(tcp->handle);
   gpr_free(tcp->peer_string);
   gpr_free(tcp);
 }
 
 #ifndef NDEBUG
-#define TCP_UNREF(exec_ctx, tcp, reason) \
-  tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
-                      const char* reason, const char* file, int line) {
+static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
+                      int line) {
   if (grpc_tcp_trace.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -96,7 +95,7 @@
             val - 1);
   }
   if (gpr_unref(&tcp->refcount)) {
-    tcp_free(exec_ctx, tcp);
+    tcp_free(tcp);
   }
 }
 
@@ -111,11 +110,11 @@
   gpr_ref(&tcp->refcount);
 }
 #else
-#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
 #define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void tcp_unref(grpc_tcp* tcp) {
   if (gpr_unref(&tcp->refcount)) {
-    tcp_free(exec_ctx, tcp);
+    tcp_free(tcp);
   }
 }
 
@@ -123,15 +122,14 @@
 #endif
 
 static void uv_close_callback(uv_handle_t* handle) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp* tcp = (grpc_tcp*)handle->data;
-  TCP_UNREF(&exec_ctx, tcp, "destroy");
-  grpc_exec_ctx_finish(&exec_ctx);
+  TCP_UNREF(tcp, "destroy");
 }
 
 static void alloc_uv_buf(uv_handle_t* handle, size_t suggested_size,
                          uv_buf_t* buf) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp* tcp = (grpc_tcp*)handle->data;
   (void)suggested_size;
   /* Before calling uv_read_start, we allocate a buffer with exactly one slice
@@ -139,11 +137,9 @@
    * allocation was successful. So slices[0] should always exist here */
   buf->base = (char*)GRPC_SLICE_START_PTR(tcp->read_slices->slices[0]);
   buf->len = GRPC_SLICE_LENGTH(tcp->read_slices->slices[0]);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void call_read_cb(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
-                         grpc_error* error) {
+static void call_read_cb(grpc_tcp* tcp, grpc_error* error) {
   grpc_closure* cb = tcp->read_cb;
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "TCP:%p call_cb %p %p:%p", tcp, cb, cb->cb, cb->cb_arg);
@@ -160,26 +156,26 @@
   }
   tcp->read_slices = NULL;
   tcp->read_cb = NULL;
-  GRPC_CLOSURE_RUN(exec_ctx, cb, error);
+  GRPC_CLOSURE_RUN(cb, error);
 }
 
 static void read_callback(uv_stream_t* stream, ssize_t nread,
                           const uv_buf_t* buf) {
   grpc_error* error;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp* tcp = (grpc_tcp*)stream->data;
   grpc_slice_buffer garbage;
   if (nread == 0) {
     // Nothing happened. Wait for the next callback
     return;
   }
-  TCP_UNREF(&exec_ctx, tcp, "read");
+  TCP_UNREF(tcp, "read");
   // TODO(murgatroid99): figure out what the return value here means
   uv_read_stop(stream);
   if (nread == UV_EOF) {
     error =
         tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING("EOF"), tcp);
-    grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, tcp->read_slices);
+    grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
   } else if (nread > 0) {
     // Successful read
     error = GRPC_ERROR_NONE;
@@ -189,20 +185,18 @@
       grpc_slice_buffer_init(&garbage);
       grpc_slice_buffer_trim_end(
           tcp->read_slices, tcp->read_slices->length - (size_t)nread, &garbage);
-      grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, &garbage);
+      grpc_slice_buffer_reset_and_unref_internal(&garbage);
     }
   } else {
     // nread < 0: Error
     error = tcp_annotate_error(
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("TCP Read failed"), tcp);
-    grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, tcp->read_slices);
+    grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
   }
-  call_read_cb(&exec_ctx, tcp, error);
-  grpc_exec_ctx_finish(&exec_ctx);
+  call_read_cb(tcp, error);
 }
 
-static void tcp_read_allocation_done(grpc_exec_ctx* exec_ctx, void* tcpp,
-                                     grpc_error* error) {
+static void tcp_read_allocation_done(void* tcpp, grpc_error* error) {
   int status;
   grpc_tcp* tcp = (grpc_tcp*)tcpp;
   if (grpc_tcp_trace.enabled()) {
@@ -222,9 +216,9 @@
     }
   }
   if (error != GRPC_ERROR_NONE) {
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, tcp->read_slices);
-    call_read_cb(exec_ctx, tcp, GRPC_ERROR_REF(error));
-    TCP_UNREF(exec_ctx, tcp, "read");
+    grpc_slice_buffer_reset_and_unref_internal(tcp->read_slices);
+    call_read_cb(tcp, GRPC_ERROR_REF(error));
+    TCP_UNREF(tcp, "read");
   }
   if (grpc_tcp_trace.enabled()) {
     const char* str = grpc_error_string(error);
@@ -232,16 +226,16 @@
   }
 }
 
-static void uv_endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                             grpc_slice_buffer* read_slices, grpc_closure* cb) {
+static void uv_endpoint_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
+                             grpc_closure* cb) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   GRPC_UV_ASSERT_SAME_THREAD();
   GPR_ASSERT(tcp->read_cb == NULL);
   tcp->read_cb = cb;
   tcp->read_slices = read_slices;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);
+  grpc_slice_buffer_reset_and_unref_internal(read_slices);
   TCP_REF(tcp, "read");
-  grpc_resource_user_alloc_slices(exec_ctx, &tcp->slice_allocator,
+  grpc_resource_user_alloc_slices(&tcp->slice_allocator,
                                   GRPC_TCP_DEFAULT_READ_SLICE_SIZE, 1,
                                   tcp->read_slices);
 }
@@ -249,10 +243,10 @@
 static void write_callback(uv_write_t* req, int status) {
   grpc_tcp* tcp = (grpc_tcp*)req->data;
   grpc_error* error;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_closure* cb = tcp->write_cb;
   tcp->write_cb = NULL;
-  TCP_UNREF(&exec_ctx, tcp, "write");
+  TCP_UNREF(tcp, "write");
   if (status == 0) {
     error = GRPC_ERROR_NONE;
   } else {
@@ -264,11 +258,10 @@
     gpr_log(GPR_DEBUG, "write complete on %p: error=%s", tcp, str);
   }
   gpr_free(tcp->write_buffers);
-  GRPC_CLOSURE_SCHED(&exec_ctx, cb, error);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CLOSURE_SCHED(cb, error);
 }
 
-static void uv_endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+static void uv_endpoint_write(grpc_endpoint* ep,
                               grpc_slice_buffer* write_slices,
                               grpc_closure* cb) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
@@ -291,7 +284,7 @@
   }
 
   if (tcp->shutting_down) {
-    GRPC_CLOSURE_SCHED(exec_ctx, cb,
+    GRPC_CLOSURE_SCHED(cb,
                        tcp_annotate_error(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                                               "TCP socket is shutting down"),
                                           tcp));
@@ -304,7 +297,7 @@
   if (tcp->write_slices->count == 0) {
     // No slices means we don't have to do anything,
     // and libuv doesn't like empty writes
-    GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
     return;
   }
 
@@ -325,37 +318,31 @@
            write_callback);
 }
 
-static void uv_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                              grpc_pollset* pollset) {
+static void uv_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
   // No-op. We're ignoring pollsets currently
-  (void)exec_ctx;
   (void)ep;
   (void)pollset;
   grpc_tcp* tcp = (grpc_tcp*)ep;
   tcp->pollset = pollset;
 }
 
-static void uv_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+static void uv_add_to_pollset_set(grpc_endpoint* ep,
                                   grpc_pollset_set* pollset) {
   // No-op. We're ignoring pollsets currently
-  (void)exec_ctx;
   (void)ep;
   (void)pollset;
 }
 
-static void uv_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                       grpc_endpoint* ep,
+static void uv_delete_from_pollset_set(grpc_endpoint* ep,
                                        grpc_pollset_set* pollset) {
   // No-op. We're ignoring pollsets currently
-  (void)exec_ctx;
   (void)ep;
   (void)pollset;
 }
 
 static void shutdown_callback(uv_shutdown_t* req, int status) {}
 
-static void uv_endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                 grpc_error* why) {
+static void uv_endpoint_shutdown(grpc_endpoint* ep, grpc_error* why) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   if (!tcp->shutting_down) {
     if (grpc_tcp_trace.enabled()) {
@@ -365,12 +352,12 @@
     tcp->shutting_down = true;
     uv_shutdown_t* req = &tcp->shutdown_req;
     uv_shutdown(req, (uv_stream_t*)tcp->handle, shutdown_callback);
-    grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
+    grpc_resource_user_shutdown(tcp->resource_user);
   }
   GRPC_ERROR_UNREF(why);
 }
 
-static void uv_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+static void uv_destroy(grpc_endpoint* ep) {
   grpc_network_status_unregister_endpoint(ep);
   grpc_tcp* tcp = (grpc_tcp*)ep;
   uv_close((uv_handle_t*)tcp->handle, uv_close_callback);
@@ -403,7 +390,7 @@
                                grpc_resource_quota* resource_quota,
                                char* peer_string) {
   grpc_tcp* tcp = (grpc_tcp*)gpr_malloc(sizeof(grpc_tcp));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   if (grpc_tcp_trace.enabled()) {
     gpr_log(GPR_DEBUG, "Creating TCP endpoint %p", tcp);
@@ -430,7 +417,6 @@
   uv_unref((uv_handle_t*)handle);
 #endif
 
-  grpc_exec_ctx_finish(&exec_ctx);
   return &tcp->base;
 }
 
diff --git a/src/core/lib/iomgr/tcp_uv.h b/src/core/lib/iomgr/tcp_uv.h
index 4b4da36..fd6d190 100644
--- a/src/core/lib/iomgr/tcp_uv.h
+++ b/src/core/lib/iomgr/tcp_uv.h
@@ -42,18 +42,10 @@
 
 #define GRPC_TCP_DEFAULT_READ_SLICE_SIZE 8192
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_endpoint* grpc_tcp_create(uv_tcp_t* handle,
                                grpc_resource_quota* resource_quota,
                                char* peer_string);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_UV */
 
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_UV_H */
diff --git a/src/core/lib/iomgr/tcp_windows.cc b/src/core/lib/iomgr/tcp_windows.cc
index 33868cd..6d091b7 100644
--- a/src/core/lib/iomgr/tcp_windows.cc
+++ b/src/core/lib/iomgr/tcp_windows.cc
@@ -109,21 +109,20 @@
   char* peer_string;
 } grpc_tcp;
 
-static void tcp_free(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void tcp_free(grpc_tcp* tcp) {
   grpc_winsocket_destroy(tcp->socket);
   gpr_mu_destroy(&tcp->mu);
   gpr_free(tcp->peer_string);
-  grpc_resource_user_unref(exec_ctx, tcp->resource_user);
+  grpc_resource_user_unref(tcp->resource_user);
   if (tcp->shutting_down) GRPC_ERROR_UNREF(tcp->shutdown_error);
   gpr_free(tcp);
 }
 
 #ifndef NDEBUG
-#define TCP_UNREF(exec_ctx, tcp, reason) \
-  tcp_unref((exec_ctx), (tcp), (reason), __FILE__, __LINE__)
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp), (reason), __FILE__, __LINE__)
 #define TCP_REF(tcp, reason) tcp_ref((tcp), (reason), __FILE__, __LINE__)
-static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp,
-                      const char* reason, const char* file, int line) {
+static void tcp_unref(grpc_tcp* tcp, const char* reason, const char* file,
+                      int line) {
   if (grpc_tcp_trace.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&tcp->refcount.count);
     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -131,7 +130,7 @@
             val - 1);
   }
   if (gpr_unref(&tcp->refcount)) {
-    tcp_free(exec_ctx, tcp);
+    tcp_free(tcp);
   }
 }
 
@@ -146,11 +145,11 @@
   gpr_ref(&tcp->refcount);
 }
 #else
-#define TCP_UNREF(exec_ctx, tcp, reason) tcp_unref((exec_ctx), (tcp))
+#define TCP_UNREF(tcp, reason) tcp_unref((tcp))
 #define TCP_REF(tcp, reason) tcp_ref((tcp))
-static void tcp_unref(grpc_exec_ctx* exec_ctx, grpc_tcp* tcp) {
+static void tcp_unref(grpc_tcp* tcp) {
   if (gpr_unref(&tcp->refcount)) {
-    tcp_free(exec_ctx, tcp);
+    tcp_free(tcp);
   }
 }
 
@@ -158,7 +157,7 @@
 #endif
 
 /* Asynchronous callback from the IOCP, or the background thread. */
-static void on_read(grpc_exec_ctx* exec_ctx, void* tcpp, grpc_error* error) {
+static void on_read(void* tcpp, grpc_error* error) {
   grpc_tcp* tcp = (grpc_tcp*)tcpp;
   grpc_closure* cb = tcp->read_cb;
   grpc_winsocket* socket = tcp->socket;
@@ -172,13 +171,13 @@
       char* utf8_message = gpr_format_message(info->wsa_error);
       error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(utf8_message);
       gpr_free(utf8_message);
-      grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
+      grpc_slice_unref_internal(tcp->read_slice);
     } else {
       if (info->bytes_transfered != 0 && !tcp->shutting_down) {
         sub = grpc_slice_sub_no_ref(tcp->read_slice, 0, info->bytes_transfered);
         grpc_slice_buffer_add(tcp->read_slices, sub);
       } else {
-        grpc_slice_unref_internal(exec_ctx, tcp->read_slice);
+        grpc_slice_unref_internal(tcp->read_slice);
         error = tcp->shutting_down
                     ? GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                           "TCP stream shutting down", &tcp->shutdown_error, 1)
@@ -188,12 +187,12 @@
   }
 
   tcp->read_cb = NULL;
-  TCP_UNREF(exec_ctx, tcp, "read");
-  GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
+  TCP_UNREF(tcp, "read");
+  GRPC_CLOSURE_SCHED(cb, error);
 }
 
-static void win_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                     grpc_slice_buffer* read_slices, grpc_closure* cb) {
+static void win_read(grpc_endpoint* ep, grpc_slice_buffer* read_slices,
+                     grpc_closure* cb) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   grpc_winsocket* handle = tcp->socket;
   grpc_winsocket_callback_info* info = &handle->read_info;
@@ -204,15 +203,14 @@
 
   if (tcp->shutting_down) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, cb,
-        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-            "TCP socket is shutting down", &tcp->shutdown_error, 1));
+        cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                "TCP socket is shutting down", &tcp->shutdown_error, 1));
     return;
   }
 
   tcp->read_cb = cb;
   tcp->read_slices = read_slices;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, read_slices);
+  grpc_slice_buffer_reset_and_unref_internal(read_slices);
 
   tcp->read_slice = GRPC_SLICE_MALLOC(8192);
 
@@ -230,7 +228,7 @@
   /* Did we get data immediately ? Yay. */
   if (info->wsa_error != WSAEWOULDBLOCK) {
     info->bytes_transfered = bytes_read;
-    GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&tcp->on_read, GRPC_ERROR_NONE);
     return;
   }
 
@@ -243,17 +241,17 @@
     int wsa_error = WSAGetLastError();
     if (wsa_error != WSA_IO_PENDING) {
       info->wsa_error = wsa_error;
-      GRPC_CLOSURE_SCHED(exec_ctx, &tcp->on_read,
+      GRPC_CLOSURE_SCHED(&tcp->on_read,
                          GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
       return;
     }
   }
 
-  grpc_socket_notify_on_read(exec_ctx, tcp->socket, &tcp->on_read);
+  grpc_socket_notify_on_read(tcp->socket, &tcp->on_read);
 }
 
 /* Asynchronous callback from the IOCP, or the background thread. */
-static void on_write(grpc_exec_ctx* exec_ctx, void* tcpp, grpc_error* error) {
+static void on_write(void* tcpp, grpc_error* error) {
   grpc_tcp* tcp = (grpc_tcp*)tcpp;
   grpc_winsocket* handle = tcp->socket;
   grpc_winsocket_callback_info* info = &handle->write_info;
@@ -274,13 +272,13 @@
     }
   }
 
-  TCP_UNREF(exec_ctx, tcp, "write");
-  GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
+  TCP_UNREF(tcp, "write");
+  GRPC_CLOSURE_SCHED(cb, error);
 }
 
 /* Initiates a write. */
-static void win_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                      grpc_slice_buffer* slices, grpc_closure* cb) {
+static void win_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                      grpc_closure* cb) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   grpc_winsocket* socket = tcp->socket;
   grpc_winsocket_callback_info* info = &socket->write_info;
@@ -294,9 +292,8 @@
 
   if (tcp->shutting_down) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, cb,
-        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-            "TCP socket is shutting down", &tcp->shutdown_error, 1));
+        cb, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                "TCP socket is shutting down", &tcp->shutdown_error, 1));
     return;
   }
 
@@ -327,7 +324,7 @@
     grpc_error* error = status == 0
                             ? GRPC_ERROR_NONE
                             : GRPC_WSA_ERROR(info->wsa_error, "WSASend");
-    GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
+    GRPC_CLOSURE_SCHED(cb, error);
     if (allocated) gpr_free(allocated);
     return;
   }
@@ -344,35 +341,32 @@
   if (status != 0) {
     int wsa_error = WSAGetLastError();
     if (wsa_error != WSA_IO_PENDING) {
-      TCP_UNREF(exec_ctx, tcp, "write");
-      GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
+      TCP_UNREF(tcp, "write");
+      GRPC_CLOSURE_SCHED(cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
       return;
     }
   }
 
   /* As all is now setup, we can now ask for the IOCP notification. It may
      trigger the callback immediately however, but no matter. */
-  grpc_socket_notify_on_write(exec_ctx, socket, &tcp->on_write);
+  grpc_socket_notify_on_write(socket, &tcp->on_write);
 }
 
-static void win_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                               grpc_pollset* ps) {
+static void win_add_to_pollset(grpc_endpoint* ep, grpc_pollset* ps) {
   grpc_tcp* tcp;
   (void)ps;
   tcp = (grpc_tcp*)ep;
   grpc_iocp_add_socket(tcp->socket);
 }
 
-static void win_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                   grpc_pollset_set* pss) {
+static void win_add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pss) {
   grpc_tcp* tcp;
   (void)pss;
   tcp = (grpc_tcp*)ep;
   grpc_iocp_add_socket(tcp->socket);
 }
 
-static void win_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_endpoint* ep,
+static void win_delete_from_pollset_set(grpc_endpoint* ep,
                                         grpc_pollset_set* pss) {}
 
 /* Initiates a shutdown of the TCP endpoint. This will queue abort callbacks
@@ -381,8 +375,7 @@
    we're not going to protect against these. However the IO Completion Port
    callback will happen from another thread, so we need to protect against
    concurrent access of the data structure in that regard. */
-static void win_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                         grpc_error* why) {
+static void win_shutdown(grpc_endpoint* ep, grpc_error* why) {
   grpc_tcp* tcp = (grpc_tcp*)ep;
   gpr_mu_lock(&tcp->mu);
   /* At that point, what may happen is that we're already inside the IOCP
@@ -395,13 +388,13 @@
   }
   grpc_winsocket_shutdown(tcp->socket);
   gpr_mu_unlock(&tcp->mu);
-  grpc_resource_user_shutdown(exec_ctx, tcp->resource_user);
+  grpc_resource_user_shutdown(tcp->resource_user);
 }
 
-static void win_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+static void win_destroy(grpc_endpoint* ep) {
   grpc_network_status_unregister_endpoint(ep);
   grpc_tcp* tcp = (grpc_tcp*)ep;
-  TCP_UNREF(exec_ctx, tcp, "destroy");
+  TCP_UNREF(tcp, "destroy");
 }
 
 static char* win_get_peer(grpc_endpoint* ep) {
@@ -427,14 +420,14 @@
                                       win_get_peer,
                                       win_get_fd};
 
-grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
                                grpc_channel_args* channel_args,
                                const char* peer_string) {
   grpc_resource_quota* resource_quota = grpc_resource_quota_create(NULL);
   if (channel_args != NULL) {
     for (size_t i = 0; i < channel_args->num_args; i++) {
       if (0 == strcmp(channel_args->args[i].key, GRPC_ARG_RESOURCE_QUOTA)) {
-        grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+        grpc_resource_quota_unref_internal(resource_quota);
         resource_quota = grpc_resource_quota_ref_internal(
             (grpc_resource_quota*)channel_args->args[i].value.pointer.p);
       }
@@ -452,7 +445,7 @@
   tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
   /* Tell network status tracking code about the new endpoint */
   grpc_network_status_register_endpoint(&tcp->base);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
 
   return &tcp->base;
 }
diff --git a/src/core/lib/iomgr/tcp_windows.h b/src/core/lib/iomgr/tcp_windows.h
index 9c7ccdf..8578a35 100644
--- a/src/core/lib/iomgr/tcp_windows.h
+++ b/src/core/lib/iomgr/tcp_windows.h
@@ -35,23 +35,15 @@
 #include "src/core/lib/iomgr/endpoint.h"
 #include "src/core/lib/iomgr/socket_windows.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Create a tcp endpoint given a winsock handle.
  * Takes ownership of the handle.
  */
-grpc_endpoint* grpc_tcp_create(grpc_exec_ctx* exec_ctx, grpc_winsocket* socket,
+grpc_endpoint* grpc_tcp_create(grpc_winsocket* socket,
                                grpc_channel_args* channel_args,
                                const char* peer_string);
 
 grpc_error* grpc_tcp_prepare_socket(SOCKET sock);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif
 
 #endif /* GRPC_CORE_LIB_IOMGR_TCP_WINDOWS_H */
diff --git a/src/core/lib/iomgr/time_averaged_stats.h b/src/core/lib/iomgr/time_averaged_stats.h
index d38ed27..8745f7f 100644
--- a/src/core/lib/iomgr/time_averaged_stats.h
+++ b/src/core/lib/iomgr/time_averaged_stats.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_CORE_LIB_IOMGR_TIME_AVERAGED_STATS_H
 #define GRPC_CORE_LIB_IOMGR_TIME_AVERAGED_STATS_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* This tracks a time-decaying weighted average.  It works by collecting
    batches of samples and then mixing their average into a time-decaying
    weighted mean.  It is designed for batch operations where we do many adds
@@ -74,8 +70,4 @@
    value. */
 double grpc_time_averaged_stats_update_average(grpc_time_averaged_stats* stats);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_TIME_AVERAGED_STATS_H */
diff --git a/src/core/lib/iomgr/timer.h b/src/core/lib/iomgr/timer.h
index cd8334e..8204985 100644
--- a/src/core/lib/iomgr/timer.h
+++ b/src/core/lib/iomgr/timer.h
@@ -32,10 +32,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/iomgr/iomgr.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_timer grpc_timer;
 
 /* Initialize *timer. When expired or canceled, closure will be called with
@@ -44,8 +40,8 @@
    application code should check the error to determine how it was invoked. The
    application callback is also responsible for maintaining information about
    when to free up any user-level state. */
-void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
-                     grpc_millis deadline, grpc_closure* closure);
+void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
+                     grpc_closure* closure);
 
 /* Initialize *timer without setting it. This can later be passed through
    the regular init or cancel */
@@ -77,7 +73,7 @@
    matches this aim.
 
    Requires: cancel() must happen after init() on a given timer */
-void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer);
+void grpc_timer_cancel(grpc_timer* timer);
 
 /* iomgr internal api for dealing with timers */
 
@@ -94,10 +90,9 @@
    *next is never guaranteed to be updated on any given execution; however,
    with high probability at least one thread in the system will see an update
    at any time slice. */
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
-                                         grpc_millis* next);
-void grpc_timer_list_init(grpc_exec_ctx* exec_ctx);
-void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx);
+grpc_timer_check_result grpc_timer_check(grpc_millis* next);
+void grpc_timer_list_init();
+void grpc_timer_list_shutdown();
 
 /* Consume a kick issued by grpc_kick_poller */
 void grpc_timer_consume_kick(void);
@@ -106,8 +101,4 @@
 
 void grpc_kick_poller(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_TIMER_H */
diff --git a/src/core/lib/iomgr/timer_generic.cc b/src/core/lib/iomgr/timer_generic.cc
index fa95c43..103144e 100644
--- a/src/core/lib/iomgr/timer_generic.cc
+++ b/src/core/lib/iomgr/timer_generic.cc
@@ -225,8 +225,7 @@
   return a + b;
 }
 
-static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx* exec_ctx,
-                                                       gpr_atm now,
+static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
                                                        gpr_atm* next,
                                                        grpc_error* error);
 
@@ -236,7 +235,7 @@
              : grpc_timer_heap_top(&shard->heap)->deadline;
 }
 
-void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {
+void grpc_timer_list_init() {
   uint32_t i;
 
   g_num_shards = GPR_MIN(1, 2 * gpr_cpu_num_cores());
@@ -247,7 +246,7 @@
   g_shared_mutables.initialized = true;
   g_shared_mutables.checker_mu = GPR_SPINLOCK_INITIALIZER;
   gpr_mu_init(&g_shared_mutables.mu);
-  g_shared_mutables.min_timer = grpc_exec_ctx_now(exec_ctx);
+  g_shared_mutables.min_timer = grpc_core::ExecCtx::Get()->Now();
   gpr_tls_init(&g_last_seen_min_timer);
   gpr_tls_set(&g_last_seen_min_timer, 0);
 
@@ -267,10 +266,10 @@
   INIT_TIMER_HASH_TABLE();
 }
 
-void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {
+void grpc_timer_list_shutdown() {
   size_t i;
   run_some_expired_timers(
-      exec_ctx, GPR_ATM_MAX, nullptr,
+      GPR_ATM_MAX, nullptr,
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Timer list shutdown"));
   for (i = 0; i < g_num_shards; i++) {
     timer_shard* shard = &g_shards[i];
@@ -323,8 +322,8 @@
 
 void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = false; }
 
-void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
-                     grpc_millis deadline, grpc_closure* closure) {
+void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
+                     grpc_closure* closure) {
   int is_first_timer = 0;
   timer_shard* shard = &g_shards[GPR_HASH_POINTER(timer, g_num_shards)];
   timer->closure = closure;
@@ -337,12 +336,12 @@
   if (grpc_timer_trace.enabled()) {
     gpr_log(GPR_DEBUG,
             "TIMER %p: SET %" PRIdPTR " now %" PRIdPTR " call %p[%p]", timer,
-            deadline, grpc_exec_ctx_now(exec_ctx), closure, closure->cb);
+            deadline, grpc_core::ExecCtx::Get()->Now(), closure, closure->cb);
   }
 
   if (!g_shared_mutables.initialized) {
     timer->pending = false;
-    GRPC_CLOSURE_SCHED(exec_ctx, timer->closure,
+    GRPC_CLOSURE_SCHED(timer->closure,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                            "Attempt to create timer before initialization"));
     return;
@@ -350,10 +349,10 @@
 
   gpr_mu_lock(&shard->mu);
   timer->pending = true;
-  grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+  grpc_millis now = grpc_core::ExecCtx::Get()->Now();
   if (deadline <= now) {
     timer->pending = false;
-    GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
     gpr_mu_unlock(&shard->mu);
     /* early out */
     return;
@@ -414,7 +413,7 @@
   gpr_tls_set(&g_last_seen_min_timer, 0);
 }
 
-void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
+void grpc_timer_cancel(grpc_timer* timer) {
   if (!g_shared_mutables.initialized) {
     /* must have already been cancelled, also the shard mutex is invalid */
     return;
@@ -430,7 +429,7 @@
   if (timer->pending) {
     REMOVE_FROM_HASH_TABLE(timer);
 
-    GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
+    GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
     timer->pending = false;
     if (timer->heap_index == INVALID_HEAP_INDEX) {
       list_remove(timer);
@@ -516,15 +515,14 @@
 }
 
 /* REQUIRES: shard->mu unlocked */
-static size_t pop_timers(grpc_exec_ctx* exec_ctx, timer_shard* shard,
-                         gpr_atm now, gpr_atm* new_min_deadline,
-                         grpc_error* error) {
+static size_t pop_timers(timer_shard* shard, gpr_atm now,
+                         gpr_atm* new_min_deadline, grpc_error* error) {
   size_t n = 0;
   grpc_timer* timer;
   gpr_mu_lock(&shard->mu);
   while ((timer = pop_one(shard, now))) {
     REMOVE_FROM_HASH_TABLE(timer);
-    GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_REF(error));
     n++;
   }
   *new_min_deadline = compute_min_deadline(shard);
@@ -536,8 +534,7 @@
   return n;
 }
 
-static grpc_timer_check_result run_some_expired_timers(grpc_exec_ctx* exec_ctx,
-                                                       gpr_atm now,
+static grpc_timer_check_result run_some_expired_timers(gpr_atm now,
                                                        gpr_atm* next,
                                                        grpc_error* error) {
   grpc_timer_check_result result = GRPC_TIMERS_NOT_CHECKED;
@@ -566,8 +563,7 @@
       /* For efficiency, we pop as many available timers as we can from the
          shard.  This may violate perfect timer deadline ordering, but that
          shouldn't be a big deal because we don't make ordering guarantees. */
-      if (pop_timers(exec_ctx, g_shard_queue[0], now, &new_min_deadline,
-                     error) > 0) {
+      if (pop_timers(g_shard_queue[0], now, &new_min_deadline, error) > 0) {
         result = GRPC_TIMERS_FIRED;
       }
 
@@ -604,10 +600,9 @@
   return result;
 }
 
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
-                                         grpc_millis* next) {
+grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
   // prelude
-  grpc_millis now = grpc_exec_ctx_now(exec_ctx);
+  grpc_millis now = grpc_core::ExecCtx::Get()->Now();
 
   /* fetch from a thread-local first: this avoids contention on a globally
      mutable cacheline in the common case */
@@ -646,7 +641,7 @@
   }
   // actual code
   grpc_timer_check_result r =
-      run_some_expired_timers(exec_ctx, now, next, shutdown_error);
+      run_some_expired_timers(now, next, shutdown_error);
   // tracing
   if (grpc_timer_check_trace.enabled()) {
     char* next_str;
diff --git a/src/core/lib/iomgr/timer_heap.h b/src/core/lib/iomgr/timer_heap.h
index ae56e5a..436eef5 100644
--- a/src/core/lib/iomgr/timer_heap.h
+++ b/src/core/lib/iomgr/timer_heap.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/iomgr/timer.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   grpc_timer** timers;
   uint32_t timer_count;
@@ -43,8 +39,4 @@
 
 int grpc_timer_heap_is_empty(grpc_timer_heap* heap);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_TIMER_HEAP_H */
diff --git a/src/core/lib/iomgr/timer_manager.cc b/src/core/lib/iomgr/timer_manager.cc
index dac74ae..7fb068f 100644
--- a/src/core/lib/iomgr/timer_manager.cc
+++ b/src/core/lib/iomgr/timer_manager.cc
@@ -93,18 +93,17 @@
   // to leak through g_completed_threads and be freed in gc_completed_threads()
   // before "&ct->t" is written to, causing a use-after-free.
   gpr_mu_lock(&g_mu);
-  gpr_thd_new(&ct->t, timer_thread, ct, &opt);
+  gpr_thd_new(&ct->t, "grpc_global_timer", timer_thread, ct, &opt);
   gpr_mu_unlock(&g_mu);
 }
 
 void grpc_timer_manager_tick() {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_millis next = GRPC_MILLIS_INF_FUTURE;
-  grpc_timer_check(&exec_ctx, &next);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_timer_check(&next);
 }
 
-static void run_some_timers(grpc_exec_ctx* exec_ctx) {
+static void run_some_timers() {
   // if there's something to execute...
   gpr_mu_lock(&g_mu);
   // remove a waiter from the pool, and start another thread if necessary
@@ -126,7 +125,7 @@
   if (grpc_timer_check_trace.enabled()) {
     gpr_log(GPR_DEBUG, "flush exec_ctx");
   }
-  grpc_exec_ctx_flush(exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_mu_lock(&g_mu);
   // garbage collect any threads hanging out that are dead
   gc_completed_threads();
@@ -138,7 +137,7 @@
 // wait until 'next' (or forever if there is already a timed waiter in the pool)
 // returns true if the thread should continue executing (false if it should
 // shutdown)
-static bool wait_until(grpc_exec_ctx* exec_ctx, grpc_millis next) {
+static bool wait_until(grpc_millis next) {
   gpr_mu_lock(&g_mu);
   // if we're not threaded anymore, leave
   if (!g_threaded) {
@@ -179,7 +178,7 @@
         g_timed_waiter_deadline = next;
 
         if (grpc_timer_check_trace.enabled()) {
-          grpc_millis wait_time = next - grpc_exec_ctx_now(exec_ctx);
+          grpc_millis wait_time = next - grpc_core::ExecCtx::Get()->Now();
           gpr_log(GPR_DEBUG, "sleep for a %" PRIdPTR " milliseconds",
                   wait_time);
         }
@@ -193,7 +192,7 @@
     }
 
     gpr_cv_wait(&g_cv_wait, &g_mu,
-                grpc_millis_to_timespec(next, GPR_CLOCK_REALTIME));
+                grpc_millis_to_timespec(next, GPR_CLOCK_MONOTONIC));
 
     if (grpc_timer_check_trace.enabled()) {
       gpr_log(GPR_DEBUG, "wait ended: was_timed:%d kicked:%d",
@@ -220,15 +219,15 @@
   return true;
 }
 
-static void timer_main_loop(grpc_exec_ctx* exec_ctx) {
+static void timer_main_loop() {
   for (;;) {
     grpc_millis next = GRPC_MILLIS_INF_FUTURE;
-    grpc_exec_ctx_invalidate_now(exec_ctx);
+    grpc_core::ExecCtx::Get()->InvalidateNow();
 
     // check timer state, updates next to the next time to run a check
-    switch (grpc_timer_check(exec_ctx, &next)) {
+    switch (grpc_timer_check(&next)) {
       case GRPC_TIMERS_FIRED:
-        run_some_timers(exec_ctx);
+        run_some_timers();
         break;
       case GRPC_TIMERS_NOT_CHECKED:
         /* This case only happens under contention, meaning more than one timer
@@ -246,7 +245,7 @@
         next = GRPC_MILLIS_INF_FUTURE;
       /* fall through */
       case GRPC_TIMERS_CHECKED_AND_EMPTY:
-        if (!wait_until(exec_ctx, next)) {
+        if (!wait_until(next)) {
           return;
         }
         break;
@@ -274,10 +273,9 @@
 static void timer_thread(void* completed_thread_ptr) {
   // this threads exec_ctx: we try to run things through to completion here
   // since it's easy to spin up new threads
-  grpc_exec_ctx exec_ctx =
-      GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, nullptr);
-  timer_main_loop(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx(0);
+  timer_main_loop();
+
   timer_thread_cleanup((completed_thread*)completed_thread_ptr);
 }
 
@@ -319,7 +317,7 @@
       gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
     }
     while (g_thread_count > 0) {
-      gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+      gpr_cv_wait(&g_cv_shutdown, &g_mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
       if (grpc_timer_check_trace.enabled()) {
         gpr_log(GPR_DEBUG, "num timer threads: %d", g_thread_count);
       }
diff --git a/src/core/lib/iomgr/timer_manager.h b/src/core/lib/iomgr/timer_manager.h
index 72960d6..0ba5029 100644
--- a/src/core/lib/iomgr/timer_manager.h
+++ b/src/core/lib/iomgr/timer_manager.h
@@ -21,10 +21,6 @@
 
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Timer Manager tries to keep one thread waiting for the next timeout at all
    times */
 
@@ -38,8 +34,4 @@
  * disabled */
 void grpc_timer_manager_tick(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_TIMER_MANAGER_H */
diff --git a/src/core/lib/iomgr/timer_uv.cc b/src/core/lib/iomgr/timer_uv.cc
index fac2026..5d238da 100644
--- a/src/core/lib/iomgr/timer_uv.cc
+++ b/src/core/lib/iomgr/timer_uv.cc
@@ -42,28 +42,27 @@
 
 void run_expired_timer(uv_timer_t* handle) {
   grpc_timer* timer = (grpc_timer*)handle->data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_UV_ASSERT_SAME_THREAD();
   GPR_ASSERT(timer->pending);
   timer->pending = 0;
-  GRPC_CLOSURE_SCHED(&exec_ctx, timer->closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
   stop_uv_timer(handle);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-void grpc_timer_init(grpc_exec_ctx* exec_ctx, grpc_timer* timer,
-                     grpc_millis deadline, grpc_closure* closure) {
+void grpc_timer_init(grpc_timer* timer, grpc_millis deadline,
+                     grpc_closure* closure) {
   uint64_t timeout;
   uv_timer_t* uv_timer;
   GRPC_UV_ASSERT_SAME_THREAD();
   timer->closure = closure;
-  if (deadline <= grpc_exec_ctx_now(exec_ctx)) {
+  if (deadline <= grpc_core::ExecCtx::Get()->Now()) {
     timer->pending = 0;
-    GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_NONE);
     return;
   }
   timer->pending = 1;
-  timeout = (uint64_t)(deadline - grpc_exec_ctx_now(exec_ctx));
+  timeout = (uint64_t)(deadline - grpc_core::ExecCtx::Get()->Now());
   uv_timer = (uv_timer_t*)gpr_malloc(sizeof(uv_timer_t));
   uv_timer_init(uv_default_loop(), uv_timer);
   uv_timer->data = timer;
@@ -77,22 +76,21 @@
 
 void grpc_timer_init_unset(grpc_timer* timer) { timer->pending = 0; }
 
-void grpc_timer_cancel(grpc_exec_ctx* exec_ctx, grpc_timer* timer) {
+void grpc_timer_cancel(grpc_timer* timer) {
   GRPC_UV_ASSERT_SAME_THREAD();
   if (timer->pending) {
     timer->pending = 0;
-    GRPC_CLOSURE_SCHED(exec_ctx, timer->closure, GRPC_ERROR_CANCELLED);
+    GRPC_CLOSURE_SCHED(timer->closure, GRPC_ERROR_CANCELLED);
     stop_uv_timer((uv_timer_t*)timer->uv_timer);
   }
 }
 
-grpc_timer_check_result grpc_timer_check(grpc_exec_ctx* exec_ctx,
-                                         grpc_millis* next) {
+grpc_timer_check_result grpc_timer_check(grpc_millis* next) {
   return GRPC_TIMERS_NOT_CHECKED;
 }
 
-void grpc_timer_list_init(grpc_exec_ctx* exec_ctx) {}
-void grpc_timer_list_shutdown(grpc_exec_ctx* exec_ctx) {}
+void grpc_timer_list_init() {}
+void grpc_timer_list_shutdown() {}
 
 void grpc_timer_consume_kick(void) {}
 
diff --git a/src/core/lib/iomgr/udp_server.cc b/src/core/lib/iomgr/udp_server.cc
index 68ab935..4a97f33 100644
--- a/src/core/lib/iomgr/udp_server.cc
+++ b/src/core/lib/iomgr/udp_server.cc
@@ -47,6 +47,7 @@
 #include "src/core/lib/channel/channel_args.h"
 #include "src/core/lib/iomgr/error.h"
 #include "src/core/lib/iomgr/ev_posix.h"
+#include "src/core/lib/iomgr/executor.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 #include "src/core/lib/iomgr/sockaddr.h"
 #include "src/core/lib/iomgr/sockaddr_utils.h"
@@ -71,14 +72,23 @@
   grpc_udp_server_read_cb read_cb;
   grpc_udp_server_write_cb write_cb;
   grpc_udp_server_orphan_cb orphan_cb;
+  grpc_udp_server_start_cb start_cb;
+  // To be scheduled on another thread to actually read/write.
+  grpc_closure do_read_closure;
+  grpc_closure do_write_closure;
+  grpc_closure notify_on_write_closure;
   // True if orphan_cb is trigered.
   bool orphan_notified;
+  // True if grpc_fd_notify_on_write() is called after on_write() call.
+  bool notify_on_write_armed;
+  // True if fd has been shutdown.
+  bool already_shutdown;
 
   struct grpc_udp_listener* next;
 };
 
 struct shutdown_fd_args {
-  grpc_fd* fd;
+  grpc_udp_listener* sp;
   gpr_mu* server_mu;
 };
 
@@ -141,26 +151,35 @@
   return s;
 }
 
-static void shutdown_fd(grpc_exec_ctx* exec_ctx, void* args,
-                        grpc_error* error) {
+static void shutdown_fd(void* args, grpc_error* error) {
   struct shutdown_fd_args* shutdown_args = (struct shutdown_fd_args*)args;
+  grpc_udp_listener* sp = shutdown_args->sp;
+  gpr_log(GPR_DEBUG, "shutdown fd %d", sp->fd);
   gpr_mu_lock(shutdown_args->server_mu);
-  grpc_fd_shutdown(exec_ctx, shutdown_args->fd, GRPC_ERROR_REF(error));
+  grpc_fd_shutdown(sp->emfd, GRPC_ERROR_REF(error));
+  sp->already_shutdown = true;
+  if (!sp->notify_on_write_armed) {
+    // Re-arm write notification to notify listener with error. This is
+    // necessary to decrement active_ports.
+    sp->notify_on_write_armed = true;
+    grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+  }
   gpr_mu_unlock(shutdown_args->server_mu);
   gpr_free(shutdown_args);
 }
 
-static void dummy_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void dummy_cb(void* arg, grpc_error* error) {
   // No-op.
 }
 
-static void finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_udp_server* s) {
+static void finish_shutdown(grpc_udp_server* s) {
   if (s->shutdown_complete != nullptr) {
-    GRPC_CLOSURE_SCHED(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   gpr_mu_destroy(&s->mu);
 
+  gpr_log(GPR_DEBUG, "Destroy all listeners.");
   while (s->head) {
     grpc_udp_listener* sp = s->head;
     s->head = sp->next;
@@ -174,14 +193,13 @@
   gpr_free(s);
 }
 
-static void destroyed_port(grpc_exec_ctx* exec_ctx, void* server,
-                           grpc_error* error) {
+static void destroyed_port(void* server, grpc_error* error) {
   grpc_udp_server* s = (grpc_udp_server*)server;
   gpr_mu_lock(&s->mu);
   s->destroyed_ports++;
   if (s->destroyed_ports == s->nports) {
     gpr_mu_unlock(&s->mu);
-    finish_shutdown(exec_ctx, s);
+    finish_shutdown(s);
   } else {
     gpr_mu_unlock(&s->mu);
   }
@@ -190,7 +208,7 @@
 /* called when all listening endpoints have been shutdown, so no further
    events will be received on them - at this point it's safe to destroy
    things */
-static void deactivated_all_ports(grpc_exec_ctx* exec_ctx, grpc_udp_server* s) {
+static void deactivated_all_ports(grpc_udp_server* s) {
   /* delete ALL the things */
   gpr_mu_lock(&s->mu);
 
@@ -207,24 +225,23 @@
         /* Call the orphan_cb to signal that the FD is about to be closed and
          * should no longer be used. Because at this point, all listening ports
          * have been shutdown already, no need to shutdown again.*/
-        GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp->emfd,
+        GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, dummy_cb, sp,
                           grpc_schedule_on_exec_ctx);
         GPR_ASSERT(sp->orphan_cb);
-        sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
-                      sp->server->user_data);
+        gpr_log(GPR_DEBUG, "Orphan fd %d", sp->fd);
+        sp->orphan_cb(sp->emfd, &sp->orphan_fd_closure, sp->server->user_data);
       }
-      grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, nullptr,
+      grpc_fd_orphan(sp->emfd, &sp->destroyed_closure, nullptr,
                      false /* already_closed */, "udp_listener_shutdown");
     }
     gpr_mu_unlock(&s->mu);
   } else {
     gpr_mu_unlock(&s->mu);
-    finish_shutdown(exec_ctx, s);
+    finish_shutdown(s);
   }
 }
 
-void grpc_udp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_udp_server* s,
-                             grpc_closure* on_done) {
+void grpc_udp_server_destroy(grpc_udp_server* s, grpc_closure* on_done) {
   grpc_udp_listener* sp;
   gpr_mu_lock(&s->mu);
 
@@ -233,24 +250,24 @@
 
   s->shutdown_complete = on_done;
 
+  gpr_log(GPR_DEBUG, "start to destroy udp_server");
   /* shutdown all fd's */
   if (s->active_ports) {
     for (sp = s->head; sp; sp = sp->next) {
       GPR_ASSERT(sp->orphan_cb);
       struct shutdown_fd_args* args =
           (struct shutdown_fd_args*)gpr_malloc(sizeof(*args));
-      args->fd = sp->emfd;
+      args->sp = sp;
       args->server_mu = &s->mu;
       GRPC_CLOSURE_INIT(&sp->orphan_fd_closure, shutdown_fd, args,
                         grpc_schedule_on_exec_ctx);
-      sp->orphan_cb(exec_ctx, sp->emfd, &sp->orphan_fd_closure,
-                    sp->server->user_data);
+      sp->orphan_cb(sp->emfd, &sp->orphan_fd_closure, sp->server->user_data);
       sp->orphan_notified = true;
     }
     gpr_mu_unlock(&s->mu);
   } else {
     gpr_mu_unlock(&s->mu);
-    deactivated_all_ports(exec_ctx, s);
+    deactivated_all_ports(s);
   }
 }
 
@@ -329,55 +346,112 @@
   return -1;
 }
 
+static void do_read(void* arg, grpc_error* error) {
+  grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
+  GPR_ASSERT(sp->read_cb && error == GRPC_ERROR_NONE);
+  /* TODO: the reason we hold server->mu here is merely to prevent fd
+   * shutdown while we are reading. However, it blocks do_write(). Switch to
+   * read lock if available. */
+  gpr_mu_lock(&sp->server->mu);
+  /* Tell the registered callback that data is available to read. */
+  if (!sp->already_shutdown && sp->read_cb(sp->emfd)) {
+    /* There maybe more packets to read. Schedule read_more_cb_ closure to run
+     * after finishing this event loop. */
+    GRPC_CLOSURE_SCHED(&sp->do_read_closure, GRPC_ERROR_NONE);
+  } else {
+    /* Finish reading all the packets, re-arm the notification event so we can
+     * get another chance to read. Or fd already shutdown, re-arm to get a
+     * notification with shutdown error. */
+    grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+  }
+  gpr_mu_unlock(&sp->server->mu);
+}
+
 /* event manager callback when reads are ready */
-static void on_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_read(void* arg, grpc_error* error) {
   grpc_udp_listener* sp = (grpc_udp_listener*)arg;
 
   gpr_mu_lock(&sp->server->mu);
   if (error != GRPC_ERROR_NONE) {
     if (0 == --sp->server->active_ports && sp->server->shutdown) {
       gpr_mu_unlock(&sp->server->mu);
-      deactivated_all_ports(exec_ctx, sp->server);
+      deactivated_all_ports(sp->server);
     } else {
       gpr_mu_unlock(&sp->server->mu);
     }
     return;
   }
-
-  /* Tell the registered callback that data is available to read. */
+  /* Read once. If there is more data to read, off load the work to another
+   * thread to finish. */
   GPR_ASSERT(sp->read_cb);
-  sp->read_cb(exec_ctx, sp->emfd, sp->server->user_data);
-
-  /* Re-arm the notification event so we get another chance to read. */
-  grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+  if (sp->read_cb(sp->emfd)) {
+    /* There maybe more packets to read. Schedule read_more_cb_ closure to run
+     * after finishing this event loop. */
+    GRPC_CLOSURE_INIT(&sp->do_read_closure, do_read, arg,
+                      grpc_executor_scheduler(GRPC_EXECUTOR_LONG));
+    GRPC_CLOSURE_SCHED(&sp->do_read_closure, GRPC_ERROR_NONE);
+  } else {
+    /* Finish reading all the packets, re-arm the notification event so we can
+     * get another chance to read. Or fd already shutdown, re-arm to get a
+     * notification with shutdown error. */
+    grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
+  }
   gpr_mu_unlock(&sp->server->mu);
 }
 
-static void on_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+// Wrapper of grpc_fd_notify_on_write() with a grpc_closure callback interface.
+void fd_notify_on_write_wrapper(void* arg, grpc_error* error) {
+  grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
+  gpr_mu_lock(&sp->server->mu);
+  if (!sp->notify_on_write_armed) {
+    grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+    sp->notify_on_write_armed = true;
+  }
+  gpr_mu_unlock(&sp->server->mu);
+}
+
+static void do_write(void* arg, grpc_error* error) {
+  grpc_udp_listener* sp = reinterpret_cast<grpc_udp_listener*>(arg);
+  gpr_mu_lock(&sp->server->mu);
+  if (sp->already_shutdown) {
+    // If fd has been shutdown, don't write any more and re-arm notification.
+    grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
+  } else {
+    sp->notify_on_write_armed = false;
+    /* Tell the registered callback that the socket is writeable. */
+    GPR_ASSERT(sp->write_cb && error == GRPC_ERROR_NONE);
+    GRPC_CLOSURE_INIT(&sp->notify_on_write_closure, fd_notify_on_write_wrapper,
+                      arg, grpc_schedule_on_exec_ctx);
+    sp->write_cb(sp->emfd, sp->server->user_data, &sp->notify_on_write_closure);
+  }
+  gpr_mu_unlock(&sp->server->mu);
+}
+
+static void on_write(void* arg, grpc_error* error) {
   grpc_udp_listener* sp = (grpc_udp_listener*)arg;
 
-  gpr_mu_lock(&(sp->server->mu));
+  gpr_mu_lock(&sp->server->mu);
   if (error != GRPC_ERROR_NONE) {
     if (0 == --sp->server->active_ports && sp->server->shutdown) {
       gpr_mu_unlock(&sp->server->mu);
-      deactivated_all_ports(exec_ctx, sp->server);
+      deactivated_all_ports(sp->server);
     } else {
       gpr_mu_unlock(&sp->server->mu);
     }
     return;
   }
 
-  /* Tell the registered callback that the socket is writeable. */
-  GPR_ASSERT(sp->write_cb);
-  sp->write_cb(exec_ctx, sp->emfd, sp->server->user_data);
+  /* Schedule actual write in another thread. */
+  GRPC_CLOSURE_INIT(&sp->do_write_closure, do_write, arg,
+                    grpc_executor_scheduler(GRPC_EXECUTOR_LONG));
 
-  /* Re-arm the notification event so we get another chance to write. */
-  grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
+  GRPC_CLOSURE_SCHED(&sp->do_write_closure, GRPC_ERROR_NONE);
   gpr_mu_unlock(&sp->server->mu);
 }
 
 static int add_socket_to_server(grpc_udp_server* s, int fd,
                                 const grpc_resolved_address* addr,
+                                grpc_udp_server_start_cb start_cb,
                                 grpc_udp_server_read_cb read_cb,
                                 grpc_udp_server_write_cb write_cb,
                                 grpc_udp_server_orphan_cb orphan_cb) {
@@ -408,7 +482,9 @@
     sp->read_cb = read_cb;
     sp->write_cb = write_cb;
     sp->orphan_cb = orphan_cb;
+    sp->start_cb = start_cb;
     sp->orphan_notified = false;
+    sp->already_shutdown = false;
     GPR_ASSERT(sp->emfd);
     gpr_mu_unlock(&s->mu);
     gpr_free(name);
@@ -419,6 +495,7 @@
 
 int grpc_udp_server_add_port(grpc_udp_server* s,
                              const grpc_resolved_address* addr,
+                             grpc_udp_server_start_cb start_cb,
                              grpc_udp_server_read_cb read_cb,
                              grpc_udp_server_write_cb write_cb,
                              grpc_udp_server_orphan_cb orphan_cb) {
@@ -468,8 +545,8 @@
     // TODO(rjshade): Test and propagate the returned grpc_error*:
     GRPC_ERROR_UNREF(grpc_create_dualstack_socket_using_factory(
         s->socket_factory, addr, SOCK_DGRAM, IPPROTO_UDP, &dsmode, &fd));
-    allocated_port1 =
-        add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb);
+    allocated_port1 = add_socket_to_server(s, fd, addr, start_cb, read_cb,
+                                           write_cb, orphan_cb);
     if (fd >= 0 && dsmode == GRPC_DSMODE_DUALSTACK) {
       goto done;
     }
@@ -492,7 +569,7 @@
     addr = &addr4_copy;
   }
   allocated_port2 =
-      add_socket_to_server(s, fd, addr, read_cb, write_cb, orphan_cb);
+      add_socket_to_server(s, fd, addr, start_cb, read_cb, write_cb, orphan_cb);
 
 done:
   gpr_free(allocated_addr);
@@ -512,9 +589,9 @@
   return sp->fd;
 }
 
-void grpc_udp_server_start(grpc_exec_ctx* exec_ctx, grpc_udp_server* s,
-                           grpc_pollset** pollsets, size_t pollset_count,
-                           void* user_data) {
+void grpc_udp_server_start(grpc_udp_server* s, grpc_pollset** pollsets,
+                           size_t pollset_count, void* user_data) {
+  gpr_log(GPR_DEBUG, "grpc_udp_server_start");
   size_t i;
   gpr_mu_lock(&s->mu);
   grpc_udp_listener* sp;
@@ -524,16 +601,18 @@
 
   sp = s->head;
   while (sp != nullptr) {
+    sp->start_cb(sp->emfd, sp->server->user_data);
     for (i = 0; i < pollset_count; i++) {
-      grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
+      grpc_pollset_add_fd(pollsets[i], sp->emfd);
     }
     GRPC_CLOSURE_INIT(&sp->read_closure, on_read, sp,
                       grpc_schedule_on_exec_ctx);
-    grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
+    grpc_fd_notify_on_read(sp->emfd, &sp->read_closure);
 
     GRPC_CLOSURE_INIT(&sp->write_closure, on_write, sp,
                       grpc_schedule_on_exec_ctx);
-    grpc_fd_notify_on_write(exec_ctx, sp->emfd, &sp->write_closure);
+    sp->notify_on_write_armed = true;
+    grpc_fd_notify_on_write(sp->emfd, &sp->write_closure);
 
     /* Registered for both read and write callbacks: increment active_ports
      * twice to account for this, and delay free-ing of memory until both
diff --git a/src/core/lib/iomgr/udp_server.h b/src/core/lib/iomgr/udp_server.h
index bca0f04..a469ab9 100644
--- a/src/core/lib/iomgr/udp_server.h
+++ b/src/core/lib/iomgr/udp_server.h
@@ -23,10 +23,6 @@
 #include "src/core/lib/iomgr/ev_posix.h"
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Forward decl of struct grpc_server */
 /* This is not typedef'ed to avoid a typedef-redefinition error */
 struct grpc_server;
@@ -34,17 +30,20 @@
 /* Forward decl of grpc_udp_server */
 typedef struct grpc_udp_server grpc_udp_server;
 
-/* Called when data is available to read from the socket. */
-typedef void (*grpc_udp_server_read_cb)(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
-                                        void* user_data);
+/* Called when grpc server starts to listening on the grpc_fd. */
+typedef void (*grpc_udp_server_start_cb)(grpc_fd* emfd, void* user_data);
 
-/* Called when the socket is writeable. */
-typedef void (*grpc_udp_server_write_cb)(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
-                                         void* user_data);
+/* Called when data is available to read from the socket.
+ * Return true if there is more data to read from fd. */
+typedef bool (*grpc_udp_server_read_cb)(grpc_fd* emfd);
+
+/* Called when the socket is writeable. The given closure should be scheduled
+ * when the socket becomes blocked next time. */
+typedef void (*grpc_udp_server_write_cb)(grpc_fd* emfd, void* user_data,
+                                         grpc_closure* notify_on_write_closure);
 
 /* Called when the grpc_fd is about to be orphaned (and the FD closed). */
-typedef void (*grpc_udp_server_orphan_cb)(grpc_exec_ctx* exec_ctx,
-                                          grpc_fd* emfd,
+typedef void (*grpc_udp_server_orphan_cb)(grpc_fd* emfd,
                                           grpc_closure* shutdown_fd_callback,
                                           void* user_data);
 
@@ -52,9 +51,8 @@
 grpc_udp_server* grpc_udp_server_create(const grpc_channel_args* args);
 
 /* Start listening to bound ports. user_data is passed to callbacks. */
-void grpc_udp_server_start(grpc_exec_ctx* exec_ctx, grpc_udp_server* udp_server,
-                           grpc_pollset** pollsets, size_t pollset_count,
-                           void* user_data);
+void grpc_udp_server_start(grpc_udp_server* udp_server, grpc_pollset** pollsets,
+                           size_t pollset_count, void* user_data);
 
 int grpc_udp_server_get_fd(grpc_udp_server* s, unsigned port_index);
 
@@ -70,15 +68,11 @@
                   all of the multiple socket port matching logic in one place */
 int grpc_udp_server_add_port(grpc_udp_server* s,
                              const grpc_resolved_address* addr,
+                             grpc_udp_server_start_cb start_cb,
                              grpc_udp_server_read_cb read_cb,
                              grpc_udp_server_write_cb write_cb,
                              grpc_udp_server_orphan_cb orphan_cb);
 
-void grpc_udp_server_destroy(grpc_exec_ctx* exec_ctx, grpc_udp_server* server,
-                             grpc_closure* on_done);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_udp_server_destroy(grpc_udp_server* server, grpc_closure* on_done);
 
 #endif /* GRPC_CORE_LIB_IOMGR_UDP_SERVER_H */
diff --git a/src/core/lib/iomgr/unix_sockets_posix.h b/src/core/lib/iomgr/unix_sockets_posix.h
index be3c33d..1c079e6 100644
--- a/src/core/lib/iomgr/unix_sockets_posix.h
+++ b/src/core/lib/iomgr/unix_sockets_posix.h
@@ -25,10 +25,6 @@
 
 #include "src/core/lib/iomgr/resolve_address.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_create_socketpair_if_unix(int sv[2]);
 
 grpc_error* grpc_resolve_unix_domain_address(
@@ -42,8 +38,4 @@
 char* grpc_sockaddr_to_uri_unix_if_possible(
     const grpc_resolved_address* resolved_addr);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_UNIX_SOCKETS_POSIX_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.cc b/src/core/lib/iomgr/wakeup_fd_cv.cc
index 5c1f16d..c785114 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.cc
+++ b/src/core/lib/iomgr/wakeup_fd_cv.cc
@@ -34,7 +34,7 @@
 
 #define MAX_TABLE_RESIZE 256
 
-extern cv_fd_table g_cvfds;
+extern grpc_cv_fd_table g_cvfds;
 
 static grpc_error* cv_fd_init(grpc_wakeup_fd* fd_info) {
   unsigned int i, newsize;
@@ -42,8 +42,8 @@
   gpr_mu_lock(&g_cvfds.mu);
   if (!g_cvfds.free_fds) {
     newsize = GPR_MIN(g_cvfds.size * 2, g_cvfds.size + MAX_TABLE_RESIZE);
-    g_cvfds.cvfds =
-        (fd_node*)gpr_realloc(g_cvfds.cvfds, sizeof(fd_node) * newsize);
+    g_cvfds.cvfds = (grpc_fd_node*)gpr_realloc(g_cvfds.cvfds,
+                                               sizeof(grpc_fd_node) * newsize);
     for (i = g_cvfds.size; i < newsize; i++) {
       g_cvfds.cvfds[i].is_set = 0;
       g_cvfds.cvfds[i].cvs = nullptr;
@@ -64,7 +64,7 @@
 }
 
 static grpc_error* cv_fd_wakeup(grpc_wakeup_fd* fd_info) {
-  cv_node* cvn;
+  grpc_cv_node* cvn;
   gpr_mu_lock(&g_cvfds.mu);
   g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].is_set = 1;
   cvn = g_cvfds.cvfds[GRPC_FD_TO_IDX(fd_info->read_fd)].cvs;
diff --git a/src/core/lib/iomgr/wakeup_fd_cv.h b/src/core/lib/iomgr/wakeup_fd_cv.h
index dcd7bdb..399620a 100644
--- a/src/core/lib/iomgr/wakeup_fd_cv.h
+++ b/src/core/lib/iomgr/wakeup_fd_cv.h
@@ -40,36 +40,28 @@
 #define GRPC_FD_TO_IDX(fd) (-(fd)-1)
 #define GRPC_IDX_TO_FD(idx) (-(idx)-1)
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct cv_node {
+typedef struct grpc_cv_node {
   gpr_cv* cv;
-  struct cv_node* next;
-  struct cv_node* prev;
-} cv_node;
+  struct grpc_cv_node* next;
+  struct grpc_cv_node* prev;
+} grpc_cv_node;
 
-typedef struct fd_node {
+typedef struct grpc_fd_node {
   int is_set;
-  cv_node* cvs;
-  struct fd_node* next_free;
-} fd_node;
+  grpc_cv_node* cvs;
+  struct grpc_fd_node* next_free;
+} grpc_fd_node;
 
-typedef struct cv_fd_table {
+typedef struct grpc_cv_fd_table {
   gpr_mu mu;
   gpr_refcount pollcount;
   gpr_cv shutdown_cv;
-  fd_node* cvfds;
-  fd_node* free_fds;
+  grpc_fd_node* cvfds;
+  grpc_fd_node* free_fds;
   unsigned int size;
   grpc_poll_function_type poll;
-} cv_fd_table;
+} grpc_cv_fd_table;
 
 extern const grpc_wakeup_fd_vtable grpc_cv_wakeup_fd_vtable;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_CV_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_pipe.h b/src/core/lib/iomgr/wakeup_fd_pipe.h
index 9bbb5e2..326a0c4 100644
--- a/src/core/lib/iomgr/wakeup_fd_pipe.h
+++ b/src/core/lib/iomgr/wakeup_fd_pipe.h
@@ -21,14 +21,6 @@
 
 #include "src/core/lib/iomgr/wakeup_fd_posix.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_wakeup_fd_vtable grpc_pipe_wakeup_fd_vtable;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_PIPE_H */
diff --git a/src/core/lib/iomgr/wakeup_fd_posix.h b/src/core/lib/iomgr/wakeup_fd_posix.h
index ae7849f..a9584d0 100644
--- a/src/core/lib/iomgr/wakeup_fd_posix.h
+++ b/src/core/lib/iomgr/wakeup_fd_posix.h
@@ -49,10 +49,6 @@
 
 #include "src/core/lib/iomgr/error.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_wakeup_fd_global_init(void);
 void grpc_wakeup_fd_global_destroy(void);
 
@@ -95,8 +91,4 @@
  * wakeup_fd_nospecial.c if no such implementation exists. */
 extern const grpc_wakeup_fd_vtable grpc_specialized_wakeup_fd_vtable;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_IOMGR_WAKEUP_FD_POSIX_H */
diff --git a/src/core/lib/json/json.h b/src/core/lib/json/json.h
index c9fdec4..bbd4302 100644
--- a/src/core/lib/json/json.h
+++ b/src/core/lib/json/json.h
@@ -23,10 +23,6 @@
 
 #include "src/core/lib/json/json_common.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A tree-like structure to hold json values. The key and value pointers
  * are not owned by it.
  */
@@ -74,8 +70,4 @@
 grpc_json* grpc_json_create(grpc_json_type type);
 void grpc_json_destroy(grpc_json* json);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_JSON_JSON_H */
diff --git a/src/core/lib/json/json_reader.h b/src/core/lib/json/json_reader.h
index 2636d2b..03185cb 100644
--- a/src/core/lib/json/json_reader.h
+++ b/src/core/lib/json/json_reader.h
@@ -22,10 +22,6 @@
 #include <grpc/support/port_platform.h>
 #include "src/core/lib/json/json_common.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   GRPC_JSON_STATE_OBJECT_KEY_BEGIN,
   GRPC_JSON_STATE_OBJECT_KEY_STRING,
@@ -146,8 +142,4 @@
  */
 int grpc_json_reader_is_complete(grpc_json_reader* reader);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_JSON_JSON_READER_H */
diff --git a/src/core/lib/json/json_writer.h b/src/core/lib/json/json_writer.h
index 93eeb20..a4f2d4d 100644
--- a/src/core/lib/json/json_writer.h
+++ b/src/core/lib/json/json_writer.h
@@ -35,10 +35,6 @@
 
 #include "src/core/lib/json/json_common.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_json_writer_vtable {
   /* Adds a character to the output stream. */
   void (*output_char)(void* userdata, char);
@@ -83,8 +79,4 @@
 void grpc_json_writer_value_string(grpc_json_writer* writer,
                                    const char* string);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_JSON_JSON_WRITER_H */
diff --git a/src/core/lib/profiling/basic_timers.cc b/src/core/lib/profiling/basic_timers.cc
index 3ec6280..87dd4ab 100644
--- a/src/core/lib/profiling/basic_timers.cc
+++ b/src/core/lib/profiling/basic_timers.cc
@@ -203,7 +203,8 @@
 static void init_output() {
   gpr_thd_options options = gpr_thd_options_default();
   gpr_thd_options_set_joinable(&options);
-  GPR_ASSERT(gpr_thd_new(&g_writing_thread, writing_thread, NULL, &options));
+  GPR_ASSERT(gpr_thd_new(&g_writing_thread, "timer_output_thread",
+                         writing_thread, NULL, &options));
   atexit(finish_writing);
 }
 
diff --git a/src/core/lib/profiling/timers.h b/src/core/lib/profiling/timers.h
index 8b6c254..9f11f77 100644
--- a/src/core/lib/profiling/timers.h
+++ b/src/core/lib/profiling/timers.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_CORE_LIB_PROFILING_TIMERS_H
 #define GRPC_CORE_LIB_PROFILING_TIMERS_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void gpr_timers_global_init(void);
 void gpr_timers_global_destroy(void);
 
@@ -84,9 +80,6 @@
 
 #endif /* at least one profiler requested. */
 
-#ifdef __cplusplus
-}
-
 #if (defined(GRPC_STAP_PROFILER) + defined(GRPC_BASIC_PROFILER) + \
      defined(GRPC_CUSTOM_PROFILER))
 namespace grpc {
@@ -111,6 +104,5 @@
   do {                                  \
   } while (false)
 #endif
-#endif
 
 #endif /* GRPC_CORE_LIB_PROFILING_TIMERS_H */
diff --git a/src/core/lib/security/context/security_context.cc b/src/core/lib/security/context/security_context.cc
index 19c6148..0371027 100644
--- a/src/core/lib/security/context/security_context.cc
+++ b/src/core/lib/security/context/security_context.cc
@@ -36,7 +36,7 @@
 
 grpc_call_error grpc_call_set_credentials(grpc_call* call,
                                           grpc_call_credentials* creds) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_client_security_context* ctx = nullptr;
   GRPC_API_TRACE("grpc_call_set_credentials(call=%p, creds=%p)", 2,
                  (call, creds));
@@ -52,10 +52,10 @@
     grpc_call_context_set(call, GRPC_CONTEXT_SECURITY, ctx,
                           grpc_client_security_context_destroy);
   } else {
-    grpc_call_credentials_unref(&exec_ctx, ctx->creds);
+    grpc_call_credentials_unref(ctx->creds);
     ctx->creds = grpc_call_credentials_ref(creds);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return GRPC_CALL_OK;
 }
 
@@ -85,15 +85,14 @@
 }
 
 void grpc_client_security_context_destroy(void* ctx) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_client_security_context* c = (grpc_client_security_context*)ctx;
-  grpc_call_credentials_unref(&exec_ctx, c->creds);
+  grpc_call_credentials_unref(c->creds);
   GRPC_AUTH_CONTEXT_UNREF(c->auth_context, "client_security_context");
   if (c->extension.instance != nullptr && c->extension.destroy != nullptr) {
     c->extension.destroy(c->extension.instance);
   }
   gpr_free(ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 /* --- grpc_server_security_context --- */
@@ -141,7 +140,7 @@
   }
 #else
 grpc_auth_context* grpc_auth_context_ref(grpc_auth_context* ctx) {
-  if (ctx == NULL) return NULL;
+  if (ctx == nullptr) return nullptr;
 #endif
   gpr_ref(&ctx->refcount);
   return ctx;
@@ -159,7 +158,7 @@
   }
 #else
 void grpc_auth_context_unref(grpc_auth_context* ctx) {
-  if (ctx == NULL) return;
+  if (ctx == nullptr) return;
 #endif
   if (gpr_unref(&ctx->refcount)) {
     size_t i;
@@ -303,7 +302,7 @@
   memset(property, 0, sizeof(grpc_auth_property));
 }
 
-static void auth_context_pointer_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
+static void auth_context_pointer_arg_destroy(void* p) {
   GRPC_AUTH_CONTEXT_UNREF((grpc_auth_context*)p, "auth_context_pointer_arg");
 }
 
diff --git a/src/core/lib/security/context/security_context.h b/src/core/lib/security/context/security_context.h
index 5b27d1a..34f8c24 100644
--- a/src/core/lib/security/context/security_context.h
+++ b/src/core/lib/security/context/security_context.h
@@ -24,10 +24,6 @@
 
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_auth_context_refcount;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* --- grpc_auth_context ---
 
    High level authentication context object. Can optionally be chained. */
@@ -114,8 +110,4 @@
 grpc_auth_context* grpc_find_auth_context_in_args(
     const grpc_channel_args* args);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CONTEXT_SECURITY_CONTEXT_H */
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.cc b/src/core/lib/security/credentials/composite/composite_credentials.cc
index 93dd721..e4c1604 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.cc
+++ b/src/core/lib/security/credentials/composite/composite_credentials.cc
@@ -39,17 +39,15 @@
   grpc_closure internal_on_request_metadata;
 } grpc_composite_call_credentials_metadata_context;
 
-static void composite_call_destruct(grpc_exec_ctx* exec_ctx,
-                                    grpc_call_credentials* creds) {
+static void composite_call_destruct(grpc_call_credentials* creds) {
   grpc_composite_call_credentials* c = (grpc_composite_call_credentials*)creds;
   for (size_t i = 0; i < c->inner.num_creds; i++) {
-    grpc_call_credentials_unref(exec_ctx, c->inner.creds_array[i]);
+    grpc_call_credentials_unref(c->inner.creds_array[i]);
   }
   gpr_free(c->inner.creds_array);
 }
 
-static void composite_call_metadata_cb(grpc_exec_ctx* exec_ctx, void* arg,
-                                       grpc_error* error) {
+static void composite_call_metadata_cb(void* arg, grpc_error* error) {
   grpc_composite_call_credentials_metadata_context* ctx =
       (grpc_composite_call_credentials_metadata_context*)arg;
   if (error == GRPC_ERROR_NONE) {
@@ -58,23 +56,23 @@
       grpc_call_credentials* inner_creds =
           ctx->composite_creds->inner.creds_array[ctx->creds_index++];
       if (grpc_call_credentials_get_request_metadata(
-              exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context,
-              ctx->md_array, &ctx->internal_on_request_metadata, &error)) {
+              inner_creds, ctx->pollent, ctx->auth_md_context, ctx->md_array,
+              &ctx->internal_on_request_metadata, &error)) {
         // Synchronous response, so call ourselves recursively.
-        composite_call_metadata_cb(exec_ctx, arg, error);
+        composite_call_metadata_cb(arg, error);
         GRPC_ERROR_UNREF(error);
       }
       return;
     }
     // We're done!
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, ctx->on_request_metadata, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(ctx->on_request_metadata, GRPC_ERROR_REF(error));
   gpr_free(ctx);
 }
 
 static bool composite_call_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_polling_entity* pollent, grpc_auth_metadata_context auth_md_context,
+    grpc_call_credentials* creds, grpc_polling_entity* pollent,
+    grpc_auth_metadata_context auth_md_context,
     grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
     grpc_error** error) {
   grpc_composite_call_credentials* c = (grpc_composite_call_credentials*)creds;
@@ -93,8 +91,8 @@
     grpc_call_credentials* inner_creds =
         ctx->composite_creds->inner.creds_array[ctx->creds_index++];
     if (grpc_call_credentials_get_request_metadata(
-            exec_ctx, inner_creds, ctx->pollent, ctx->auth_md_context,
-            ctx->md_array, &ctx->internal_on_request_metadata, error)) {
+            inner_creds, ctx->pollent, ctx->auth_md_context, ctx->md_array,
+            &ctx->internal_on_request_metadata, error)) {
       if (*error != GRPC_ERROR_NONE) break;
     } else {
       synchronous = false;  // Async return.
@@ -106,12 +104,12 @@
 }
 
 static void composite_call_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   grpc_composite_call_credentials* c = (grpc_composite_call_credentials*)creds;
   for (size_t i = 0; i < c->inner.num_creds; ++i) {
     grpc_call_credentials_cancel_get_request_metadata(
-        exec_ctx, c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error));
+        c->inner.creds_array[i], md_array, GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
 }
@@ -200,19 +198,17 @@
 
 /* -- Composite channel credentials. -- */
 
-static void composite_channel_destruct(grpc_exec_ctx* exec_ctx,
-                                       grpc_channel_credentials* creds) {
+static void composite_channel_destruct(grpc_channel_credentials* creds) {
   grpc_composite_channel_credentials* c =
       (grpc_composite_channel_credentials*)creds;
-  grpc_channel_credentials_unref(exec_ctx, c->inner_creds);
-  grpc_call_credentials_unref(exec_ctx, c->call_creds);
+  grpc_channel_credentials_unref(c->inner_creds);
+  grpc_call_credentials_unref(c->call_creds);
 }
 
 static grpc_security_status composite_channel_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* creds,
-    grpc_call_credentials* call_creds, const char* target,
-    const grpc_channel_args* args, grpc_channel_security_connector** sc,
-    grpc_channel_args** new_args) {
+    grpc_channel_credentials* creds, grpc_call_credentials* call_creds,
+    const char* target, const grpc_channel_args* args,
+    grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
   grpc_composite_channel_credentials* c =
       (grpc_composite_channel_credentials*)creds;
   grpc_security_status status = GRPC_SECURITY_ERROR;
@@ -227,12 +223,11 @@
         grpc_composite_call_credentials_create(c->call_creds, call_creds,
                                                nullptr);
     status = c->inner_creds->vtable->create_security_connector(
-        exec_ctx, c->inner_creds, composite_call_creds, target, args, sc,
-        new_args);
-    grpc_call_credentials_unref(exec_ctx, composite_call_creds);
+        c->inner_creds, composite_call_creds, target, args, sc, new_args);
+    grpc_call_credentials_unref(composite_call_creds);
   } else {
     status = c->inner_creds->vtable->create_security_connector(
-        exec_ctx, c->inner_creds, c->call_creds, target, args, sc, new_args);
+        c->inner_creds, c->call_creds, target, args, sc, new_args);
   }
   return status;
 }
diff --git a/src/core/lib/security/credentials/composite/composite_credentials.h b/src/core/lib/security/credentials/composite/composite_credentials.h
index efb5f4f..11990d3 100644
--- a/src/core/lib/security/credentials/composite/composite_credentials.h
+++ b/src/core/lib/security/credentials/composite/composite_credentials.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/security/credentials/credentials.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   grpc_call_credentials** creds_array;
   size_t num_creds;
@@ -57,9 +53,5 @@
   grpc_call_credentials_array inner;
 } grpc_composite_call_credentials;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_COMPOSITE_COMPOSITE_CREDENTIALS_H \
         */
diff --git a/src/core/lib/security/credentials/credentials.cc b/src/core/lib/security/credentials/credentials.cc
index 90576e6..48b459e 100644
--- a/src/core/lib/security/credentials/credentials.cc
+++ b/src/core/lib/security/credentials/credentials.cc
@@ -47,8 +47,8 @@
 }
 
 void grpc_credentials_metadata_request_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* r) {
-  grpc_call_credentials_unref(exec_ctx, r->creds);
+    grpc_credentials_metadata_request* r) {
+  grpc_call_credentials_unref(r->creds);
   grpc_http_response_destroy(&r->response);
   gpr_free(r);
 }
@@ -60,12 +60,11 @@
   return creds;
 }
 
-void grpc_channel_credentials_unref(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_credentials* creds) {
+void grpc_channel_credentials_unref(grpc_channel_credentials* creds) {
   if (creds == nullptr) return;
   if (gpr_unref(&creds->refcount)) {
     if (creds->vtable->destruct != nullptr) {
-      creds->vtable->destruct(exec_ctx, creds);
+      creds->vtable->destruct(creds);
     }
     gpr_free(creds);
   }
@@ -73,9 +72,8 @@
 
 void grpc_channel_credentials_release(grpc_channel_credentials* creds) {
   GRPC_API_TRACE("grpc_channel_credentials_release(creds=%p)", 1, (creds));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_channel_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_channel_credentials_unref(creds);
 }
 
 grpc_call_credentials* grpc_call_credentials_ref(grpc_call_credentials* creds) {
@@ -84,12 +82,11 @@
   return creds;
 }
 
-void grpc_call_credentials_unref(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_credentials* creds) {
+void grpc_call_credentials_unref(grpc_call_credentials* creds) {
   if (creds == nullptr) return;
   if (gpr_unref(&creds->refcount)) {
     if (creds->vtable->destruct != nullptr) {
-      creds->vtable->destruct(exec_ctx, creds);
+      creds->vtable->destruct(creds);
     }
     gpr_free(creds);
   }
@@ -97,44 +94,42 @@
 
 void grpc_call_credentials_release(grpc_call_credentials* creds) {
   GRPC_API_TRACE("grpc_call_credentials_release(creds=%p)", 1, (creds));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_call_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_call_credentials_unref(creds);
 }
 
 bool grpc_call_credentials_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_polling_entity* pollent, grpc_auth_metadata_context context,
-    grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
-    grpc_error** error) {
+    grpc_call_credentials* creds, grpc_polling_entity* pollent,
+    grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array,
+    grpc_closure* on_request_metadata, grpc_error** error) {
   if (creds == nullptr || creds->vtable->get_request_metadata == nullptr) {
     return true;
   }
-  return creds->vtable->get_request_metadata(
-      exec_ctx, creds, pollent, context, md_array, on_request_metadata, error);
+  return creds->vtable->get_request_metadata(creds, pollent, context, md_array,
+                                             on_request_metadata, error);
 }
 
 void grpc_call_credentials_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   if (creds == nullptr ||
       creds->vtable->cancel_get_request_metadata == nullptr) {
     return;
   }
-  creds->vtable->cancel_get_request_metadata(exec_ctx, creds, md_array, error);
+  creds->vtable->cancel_get_request_metadata(creds, md_array, error);
 }
 
 grpc_security_status grpc_channel_credentials_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* channel_creds,
-    const char* target, const grpc_channel_args* args,
-    grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
+    grpc_channel_credentials* channel_creds, const char* target,
+    const grpc_channel_args* args, grpc_channel_security_connector** sc,
+    grpc_channel_args** new_args) {
   *new_args = nullptr;
   if (channel_creds == nullptr) {
     return GRPC_SECURITY_ERROR;
   }
   GPR_ASSERT(channel_creds->vtable->create_security_connector != nullptr);
   return channel_creds->vtable->create_security_connector(
-      exec_ctx, channel_creds, nullptr, target, args, sc, new_args);
+      channel_creds, nullptr, target, args, sc, new_args);
 }
 
 grpc_channel_credentials*
@@ -149,8 +144,8 @@
   }
 }
 
-static void credentials_pointer_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
-  grpc_channel_credentials_unref(exec_ctx, (grpc_channel_credentials*)p);
+static void credentials_pointer_arg_destroy(void* p) {
+  grpc_channel_credentials_unref((grpc_channel_credentials*)p);
 }
 
 static void* credentials_pointer_arg_copy(void* p) {
@@ -200,12 +195,11 @@
   return creds;
 }
 
-void grpc_server_credentials_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_server_credentials* creds) {
+void grpc_server_credentials_unref(grpc_server_credentials* creds) {
   if (creds == nullptr) return;
   if (gpr_unref(&creds->refcount)) {
     if (creds->vtable->destruct != nullptr) {
-      creds->vtable->destruct(exec_ctx, creds);
+      creds->vtable->destruct(creds);
     }
     if (creds->processor.destroy != nullptr &&
         creds->processor.state != nullptr) {
@@ -217,19 +211,17 @@
 
 void grpc_server_credentials_release(grpc_server_credentials* creds) {
   GRPC_API_TRACE("grpc_server_credentials_release(creds=%p)", 1, (creds));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_server_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_server_credentials_unref(creds);
 }
 
 grpc_security_status grpc_server_credentials_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_server_credentials* creds,
-    grpc_server_security_connector** sc) {
+    grpc_server_credentials* creds, grpc_server_security_connector** sc) {
   if (creds == nullptr || creds->vtable->create_security_connector == nullptr) {
     gpr_log(GPR_ERROR, "Server credentials cannot create security context.");
     return GRPC_SECURITY_ERROR;
   }
-  return creds->vtable->create_security_connector(exec_ctx, creds, sc);
+  return creds->vtable->create_security_connector(creds, sc);
 }
 
 void grpc_server_credentials_set_auth_metadata_processor(
@@ -247,9 +239,8 @@
   creds->processor = processor;
 }
 
-static void server_credentials_pointer_arg_destroy(grpc_exec_ctx* exec_ctx,
-                                                   void* p) {
-  grpc_server_credentials_unref(exec_ctx, (grpc_server_credentials*)p);
+static void server_credentials_pointer_arg_destroy(void* p) {
+  grpc_server_credentials_unref((grpc_server_credentials*)p);
 }
 
 static void* server_credentials_pointer_arg_copy(void* p) {
diff --git a/src/core/lib/security/credentials/credentials.h b/src/core/lib/security/credentials/credentials.h
index c65b966..4825b65 100644
--- a/src/core/lib/security/credentials/credentials.h
+++ b/src/core/lib/security/credentials/credentials.h
@@ -29,10 +29,6 @@
 #include "src/core/lib/iomgr/polling_entity.h"
 #include "src/core/lib/security/transport/security_connector.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 struct grpc_http_response;
 
 /* --- Constants. --- */
@@ -92,13 +88,12 @@
 #define GRPC_ARG_CHANNEL_CREDENTIALS "grpc.channel_credentials"
 
 typedef struct {
-  void (*destruct)(grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c);
+  void (*destruct)(grpc_channel_credentials* c);
 
   grpc_security_status (*create_security_connector)(
-      grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c,
-      grpc_call_credentials* call_creds, const char* target,
-      const grpc_channel_args* args, grpc_channel_security_connector** sc,
-      grpc_channel_args** new_args);
+      grpc_channel_credentials* c, grpc_call_credentials* call_creds,
+      const char* target, const grpc_channel_args* args,
+      grpc_channel_security_connector** sc, grpc_channel_args** new_args);
 
   grpc_channel_credentials* (*duplicate_without_call_credentials)(
       grpc_channel_credentials* c);
@@ -112,17 +107,16 @@
 
 grpc_channel_credentials* grpc_channel_credentials_ref(
     grpc_channel_credentials* creds);
-void grpc_channel_credentials_unref(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_credentials* creds);
+void grpc_channel_credentials_unref(grpc_channel_credentials* creds);
 
 /* Creates a security connector for the channel. May also create new channel
    args for the channel to be used in place of the passed in const args if
    returned non NULL. In that case the caller is responsible for destroying
    new_args after channel creation. */
 grpc_security_status grpc_channel_credentials_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* creds,
-    const char* target, const grpc_channel_args* args,
-    grpc_channel_security_connector** sc, grpc_channel_args** new_args);
+    grpc_channel_credentials* creds, const char* target,
+    const grpc_channel_args* args, grpc_channel_security_connector** sc,
+    grpc_channel_args** new_args);
 
 /* Creates a version of the channel credentials without any attached call
    credentials. This can be used in order to open a channel to a non-trusted
@@ -157,22 +151,19 @@
 void grpc_credentials_mdelem_array_append(grpc_credentials_mdelem_array* dst,
                                           grpc_credentials_mdelem_array* src);
 
-void grpc_credentials_mdelem_array_destroy(grpc_exec_ctx* exec_ctx,
-                                           grpc_credentials_mdelem_array* list);
+void grpc_credentials_mdelem_array_destroy(grpc_credentials_mdelem_array* list);
 
 /* --- grpc_call_credentials. --- */
 
 typedef struct {
-  void (*destruct)(grpc_exec_ctx* exec_ctx, grpc_call_credentials* c);
-  bool (*get_request_metadata)(grpc_exec_ctx* exec_ctx,
-                               grpc_call_credentials* c,
+  void (*destruct)(grpc_call_credentials* c);
+  bool (*get_request_metadata)(grpc_call_credentials* c,
                                grpc_polling_entity* pollent,
                                grpc_auth_metadata_context context,
                                grpc_credentials_mdelem_array* md_array,
                                grpc_closure* on_request_metadata,
                                grpc_error** error);
-  void (*cancel_get_request_metadata)(grpc_exec_ctx* exec_ctx,
-                                      grpc_call_credentials* c,
+  void (*cancel_get_request_metadata)(grpc_call_credentials* c,
                                       grpc_credentials_mdelem_array* md_array,
                                       grpc_error* error);
 } grpc_call_credentials_vtable;
@@ -184,39 +175,35 @@
 };
 
 grpc_call_credentials* grpc_call_credentials_ref(grpc_call_credentials* creds);
-void grpc_call_credentials_unref(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_credentials* creds);
+void grpc_call_credentials_unref(grpc_call_credentials* creds);
 
 /// Returns true if completed synchronously, in which case \a error will
 /// be set to indicate the result.  Otherwise, \a on_request_metadata will
 /// be invoked asynchronously when complete.  \a md_array will be populated
 /// with the resulting metadata once complete.
 bool grpc_call_credentials_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_polling_entity* pollent, grpc_auth_metadata_context context,
-    grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
-    grpc_error** error);
+    grpc_call_credentials* creds, grpc_polling_entity* pollent,
+    grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array,
+    grpc_closure* on_request_metadata, grpc_error** error);
 
 /// Cancels a pending asynchronous operation started by
 /// grpc_call_credentials_get_request_metadata() with the corresponding
 /// value of \a md_array.
 void grpc_call_credentials_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error);
+    grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error);
 
 /* Metadata-only credentials with the specified key and value where
    asynchronicity can be simulated for testing. */
 grpc_call_credentials* grpc_md_only_test_credentials_create(
-    grpc_exec_ctx* exec_ctx, const char* md_key, const char* md_value,
-    bool is_async);
+    const char* md_key, const char* md_value, bool is_async);
 
 /* --- grpc_server_credentials. --- */
 
 typedef struct {
-  void (*destruct)(grpc_exec_ctx* exec_ctx, grpc_server_credentials* c);
+  void (*destruct)(grpc_server_credentials* c);
   grpc_security_status (*create_security_connector)(
-      grpc_exec_ctx* exec_ctx, grpc_server_credentials* c,
-      grpc_server_security_connector** sc);
+      grpc_server_credentials* c, grpc_server_security_connector** sc);
 } grpc_server_credentials_vtable;
 
 struct grpc_server_credentials {
@@ -227,14 +214,12 @@
 };
 
 grpc_security_status grpc_server_credentials_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_server_credentials* creds,
-    grpc_server_security_connector** sc);
+    grpc_server_credentials* creds, grpc_server_security_connector** sc);
 
 grpc_server_credentials* grpc_server_credentials_ref(
     grpc_server_credentials* creds);
 
-void grpc_server_credentials_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_server_credentials* creds);
+void grpc_server_credentials_unref(grpc_server_credentials* creds);
 
 #define GRPC_SERVER_CREDENTIALS_ARG "grpc.server_credentials"
 
@@ -254,10 +239,6 @@
     grpc_call_credentials* creds);
 
 void grpc_credentials_metadata_request_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* r);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_credentials_metadata_request* r);
 
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/credentials_metadata.cc b/src/core/lib/security/credentials/credentials_metadata.cc
index a3623fa..9ceaf21 100644
--- a/src/core/lib/security/credentials/credentials_metadata.cc
+++ b/src/core/lib/security/credentials/credentials_metadata.cc
@@ -52,9 +52,9 @@
 }
 
 void grpc_credentials_mdelem_array_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_credentials_mdelem_array* list) {
+    grpc_credentials_mdelem_array* list) {
   for (size_t i = 0; i < list->size; ++i) {
-    GRPC_MDELEM_UNREF(exec_ctx, list->md[i]);
+    GRPC_MDELEM_UNREF(list->md[i]);
   }
   gpr_free(list->md);
 }
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.cc b/src/core/lib/security/credentials/fake/fake_credentials.cc
index a535a31..99b1214 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.cc
+++ b/src/core/lib/security/credentials/fake/fake_credentials.cc
@@ -34,10 +34,9 @@
   "grpc.fake_security.expected_targets"
 
 static grpc_security_status fake_transport_security_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c,
-    grpc_call_credentials* call_creds, const char* target,
-    const grpc_channel_args* args, grpc_channel_security_connector** sc,
-    grpc_channel_args** new_args) {
+    grpc_channel_credentials* c, grpc_call_credentials* call_creds,
+    const char* target, const grpc_channel_args* args,
+    grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
   *sc =
       grpc_fake_channel_security_connector_create(c, call_creds, target, args);
   return GRPC_SECURITY_OK;
@@ -45,8 +44,7 @@
 
 static grpc_security_status
 fake_transport_security_server_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_server_credentials* c,
-    grpc_server_security_connector** sc) {
+    grpc_server_credentials* c, grpc_server_security_connector** sc) {
   *sc = grpc_fake_server_security_connector_create(c);
   return GRPC_SECURITY_OK;
 }
@@ -98,29 +96,27 @@
 
 /* -- Metadata-only test credentials. -- */
 
-static void md_only_test_destruct(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_credentials* creds) {
+static void md_only_test_destruct(grpc_call_credentials* creds) {
   grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)creds;
-  GRPC_MDELEM_UNREF(exec_ctx, c->md);
+  GRPC_MDELEM_UNREF(c->md);
 }
 
 static bool md_only_test_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_polling_entity* pollent, grpc_auth_metadata_context context,
-    grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
-    grpc_error** error) {
+    grpc_call_credentials* creds, grpc_polling_entity* pollent,
+    grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array,
+    grpc_closure* on_request_metadata, grpc_error** error) {
   grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)creds;
   grpc_credentials_mdelem_array_add(md_array, c->md);
   if (c->is_async) {
-    GRPC_CLOSURE_SCHED(exec_ctx, on_request_metadata, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(on_request_metadata, GRPC_ERROR_NONE);
     return false;
   }
   return true;
 }
 
 static void md_only_test_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   GRPC_ERROR_UNREF(error);
 }
 
@@ -129,16 +125,14 @@
     md_only_test_cancel_get_request_metadata};
 
 grpc_call_credentials* grpc_md_only_test_credentials_create(
-    grpc_exec_ctx* exec_ctx, const char* md_key, const char* md_value,
-    bool is_async) {
+    const char* md_key, const char* md_value, bool is_async) {
   grpc_md_only_test_credentials* c = (grpc_md_only_test_credentials*)gpr_zalloc(
       sizeof(grpc_md_only_test_credentials));
   c->base.type = GRPC_CALL_CREDENTIALS_TYPE_OAUTH2;
   c->base.vtable = &md_only_test_vtable;
   gpr_ref_init(&c->base.refcount, 1);
-  c->md =
-      grpc_mdelem_from_slices(exec_ctx, grpc_slice_from_copied_string(md_key),
-                              grpc_slice_from_copied_string(md_value));
+  c->md = grpc_mdelem_from_slices(grpc_slice_from_copied_string(md_key),
+                                  grpc_slice_from_copied_string(md_value));
   c->is_async = is_async;
   return &c->base;
 }
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.h b/src/core/lib/security/credentials/fake/fake_credentials.h
index b8b58cc..0e9ff15 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.h
+++ b/src/core/lib/security/credentials/fake/fake_credentials.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/security/credentials/credentials.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* -- Fake transport security credentials. -- */
 
 /* Creates a fake transport security credentials object for testing. */
@@ -60,8 +56,4 @@
   bool is_async;
 } grpc_md_only_test_credentials;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_FAKE_FAKE_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/google_default/credentials_generic.cc b/src/core/lib/security/credentials/google_default/credentials_generic.cc
index 58ee080..af103f5 100644
--- a/src/core/lib/security/credentials/google_default/credentials_generic.cc
+++ b/src/core/lib/security/credentials/google_default/credentials_generic.cc
@@ -29,7 +29,7 @@
   char* result = nullptr;
   char* base = gpr_getenv(GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR);
   if (base == nullptr) {
-    gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_ENV_VAR
+    gpr_log(GPR_ERROR, "Could not get " GRPC_GOOGLE_CREDENTIALS_PATH_ENV_VAR
                        " environment variable.");
     return nullptr;
   }
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.cc b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
index f586c7b..03d5285 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.cc
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.cc
@@ -58,8 +58,7 @@
   grpc_http_response response;
 } compute_engine_detector;
 
-static void on_compute_engine_detection_http_response(grpc_exec_ctx* exec_ctx,
-                                                      void* user_data,
+static void on_compute_engine_detection_http_response(void* user_data,
                                                       grpc_error* error) {
   compute_engine_detector* detector = (compute_engine_detector*)user_data;
   if (error == GRPC_ERROR_NONE && detector->response.status == 200 &&
@@ -80,16 +79,16 @@
   detector->is_done = 1;
   GRPC_LOG_IF_ERROR(
       "Pollset kick",
-      grpc_pollset_kick(
-          exec_ctx, grpc_polling_entity_pollset(&detector->pollent), nullptr));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&detector->pollent),
+                        nullptr));
   gpr_mu_unlock(g_polling_mu);
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p, grpc_error* e) {
-  grpc_pollset_destroy(exec_ctx, (grpc_pollset*)p);
+static void destroy_pollset(void* p, grpc_error* e) {
+  grpc_pollset_destroy((grpc_pollset*)p);
 }
 
-static int is_stack_running_on_compute_engine(grpc_exec_ctx* exec_ctx) {
+static int is_stack_running_on_compute_engine() {
   compute_engine_detector detector;
   grpc_httpcli_request request;
   grpc_httpcli_context context;
@@ -115,14 +114,14 @@
   grpc_resource_quota* resource_quota =
       grpc_resource_quota_create("google_default_credentials");
   grpc_httpcli_get(
-      exec_ctx, &context, &detector.pollent, resource_quota, &request,
-      grpc_exec_ctx_now(exec_ctx) + max_detection_delay,
+      &context, &detector.pollent, resource_quota, &request,
+      grpc_core::ExecCtx::Get()->Now() + max_detection_delay,
       GRPC_CLOSURE_CREATE(on_compute_engine_detection_http_response, &detector,
                           grpc_schedule_on_exec_ctx),
       &detector.response);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
 
-  grpc_exec_ctx_flush(exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Block until we get the response. This is not ideal but this should only be
      called once for the lifetime of the process by the default credentials. */
@@ -131,8 +130,7 @@
     grpc_pollset_worker* worker = nullptr;
     if (!GRPC_LOG_IF_ERROR(
             "pollset_work",
-            grpc_pollset_work(exec_ctx,
-                              grpc_polling_entity_pollset(&detector.pollent),
+            grpc_pollset_work(grpc_polling_entity_pollset(&detector.pollent),
                               &worker, GRPC_MILLIS_INF_FUTURE))) {
       detector.is_done = 1;
       detector.success = 0;
@@ -140,15 +138,14 @@
   }
   gpr_mu_unlock(g_polling_mu);
 
-  grpc_httpcli_context_destroy(exec_ctx, &context);
+  grpc_httpcli_context_destroy(&context);
   GRPC_CLOSURE_INIT(&destroy_closure, destroy_pollset,
                     grpc_polling_entity_pollset(&detector.pollent),
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(exec_ctx,
-                        grpc_polling_entity_pollset(&detector.pollent),
+  grpc_pollset_shutdown(grpc_polling_entity_pollset(&detector.pollent),
                         &destroy_closure);
   g_polling_mu = nullptr;
-  grpc_exec_ctx_flush(exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   gpr_free(grpc_polling_entity_pollset(&detector.pollent));
   grpc_http_response_destroy(&detector.response);
@@ -158,7 +155,7 @@
 
 /* Takes ownership of creds_path if not NULL. */
 static grpc_error* create_default_creds_from_path(
-    grpc_exec_ctx* exec_ctx, char* creds_path, grpc_call_credentials** creds) {
+    char* creds_path, grpc_call_credentials** creds) {
   grpc_json* json = nullptr;
   grpc_auth_json_key key;
   grpc_auth_refresh_token token;
@@ -187,7 +184,7 @@
   if (grpc_auth_json_key_is_valid(&key)) {
     result =
         grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
-            exec_ctx, key, grpc_max_auth_token_lifetime());
+            key, grpc_max_auth_token_lifetime());
     if (result == nullptr) {
       error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
           "grpc_service_account_jwt_access_credentials_create_from_auth_json_"
@@ -212,7 +209,7 @@
 end:
   GPR_ASSERT((result == nullptr) + (error == GRPC_ERROR_NONE) == 1);
   if (creds_path != nullptr) gpr_free(creds_path);
-  grpc_slice_unref_internal(exec_ctx, creds_data);
+  grpc_slice_unref_internal(creds_data);
   if (json != nullptr) grpc_json_destroy(json);
   *creds = result;
   return error;
@@ -224,7 +221,7 @@
   grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
       "Failed to create Google credentials");
   grpc_error* err;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_google_default_credentials_create(void)", 0, ());
 
@@ -239,22 +236,20 @@
 
   /* First, try the environment variable. */
   err = create_default_creds_from_path(
-      &exec_ctx, gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds);
+      gpr_getenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR), &call_creds);
   if (err == GRPC_ERROR_NONE) goto end;
   error = grpc_error_add_child(error, err);
 
   /* Then the well-known file. */
   err = create_default_creds_from_path(
-      &exec_ctx, grpc_get_well_known_google_credentials_file_path(),
-      &call_creds);
+      grpc_get_well_known_google_credentials_file_path(), &call_creds);
   if (err == GRPC_ERROR_NONE) goto end;
   error = grpc_error_add_child(error, err);
 
   /* At last try to see if we're on compute engine (do the detection only once
      since it requires a network test). */
   if (!compute_engine_detection_done) {
-    int need_compute_engine_creds =
-        is_stack_running_on_compute_engine(&exec_ctx);
+    int need_compute_engine_creds = is_stack_running_on_compute_engine();
     compute_engine_detection_done = 1;
     if (need_compute_engine_creds) {
       call_creds = grpc_google_compute_engine_credentials_create(nullptr);
@@ -278,8 +273,8 @@
           grpc_composite_channel_credentials_create(ssl_creds, call_creds,
                                                     nullptr));
       GPR_ASSERT(default_credentials != nullptr);
-      grpc_channel_credentials_unref(&exec_ctx, ssl_creds);
-      grpc_call_credentials_unref(&exec_ctx, call_creds);
+      grpc_channel_credentials_unref(ssl_creds);
+      grpc_call_credentials_unref(call_creds);
       result = default_credentials;
     } else {
       gpr_log(GPR_ERROR, "Could not create google default credentials.");
@@ -291,21 +286,20 @@
   } else {
     GRPC_ERROR_UNREF(error);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return result;
 }
 
 void grpc_flush_cached_google_default_credentials(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_once_init(&g_once, init_default_credentials);
   gpr_mu_lock(&g_state_mu);
   if (default_credentials != nullptr) {
-    grpc_channel_credentials_unref(&exec_ctx, default_credentials);
+    grpc_channel_credentials_unref(default_credentials);
     default_credentials = nullptr;
   }
   compute_engine_detection_done = 0;
   gpr_mu_unlock(&g_state_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 /* -- Well known credentials path. -- */
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.h b/src/core/lib/security/credentials/google_default/google_default_credentials.h
index a0f8dc9..b163e48 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.h
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.h
@@ -23,10 +23,6 @@
 
 #include "src/core/lib/security/credentials/credentials.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #define GRPC_GOOGLE_CLOUD_SDK_CONFIG_DIRECTORY "gcloud"
 #define GRPC_GOOGLE_WELL_KNOWN_CREDENTIALS_FILE \
   "application_default_credentials.json"
@@ -45,9 +41,5 @@
 
 void grpc_flush_cached_google_default_credentials(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_GOOGLE_DEFAULT_GOOGLE_DEFAULT_CREDENTIALS_H \
         */
diff --git a/src/core/lib/security/credentials/iam/iam_credentials.cc b/src/core/lib/security/credentials/iam/iam_credentials.cc
index 1741bf3..75acb2a 100644
--- a/src/core/lib/security/credentials/iam/iam_credentials.cc
+++ b/src/core/lib/security/credentials/iam/iam_credentials.cc
@@ -27,14 +27,12 @@
 #include <grpc/support/string_util.h>
 #include <grpc/support/sync.h>
 
-static void iam_destruct(grpc_exec_ctx* exec_ctx,
-                         grpc_call_credentials* creds) {
+static void iam_destruct(grpc_call_credentials* creds) {
   grpc_google_iam_credentials* c = (grpc_google_iam_credentials*)creds;
-  grpc_credentials_mdelem_array_destroy(exec_ctx, &c->md_array);
+  grpc_credentials_mdelem_array_destroy(&c->md_array);
 }
 
-static bool iam_get_request_metadata(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_credentials* creds,
+static bool iam_get_request_metadata(grpc_call_credentials* creds,
                                      grpc_polling_entity* pollent,
                                      grpc_auth_metadata_context context,
                                      grpc_credentials_mdelem_array* md_array,
@@ -46,8 +44,8 @@
 }
 
 static void iam_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   GRPC_ERROR_UNREF(error);
 }
 
@@ -56,7 +54,7 @@
 
 grpc_call_credentials* grpc_google_iam_credentials_create(
     const char* token, const char* authority_selector, void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE(
       "grpc_iam_credentials_create(token=%s, authority_selector=%s, "
       "reserved=%p)",
@@ -70,17 +68,15 @@
   c->base.vtable = &iam_vtable;
   gpr_ref_init(&c->base.refcount, 1);
   grpc_mdelem md = grpc_mdelem_from_slices(
-      &exec_ctx,
       grpc_slice_from_static_string(GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY),
       grpc_slice_from_copied_string(token));
   grpc_credentials_mdelem_array_add(&c->md_array, md);
-  GRPC_MDELEM_UNREF(&exec_ctx, md);
+  GRPC_MDELEM_UNREF(md);
   md = grpc_mdelem_from_slices(
-      &exec_ctx,
       grpc_slice_from_static_string(GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY),
       grpc_slice_from_copied_string(authority_selector));
   grpc_credentials_mdelem_array_add(&c->md_array, md);
-  GRPC_MDELEM_UNREF(&exec_ctx, md);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(md);
+
   return &c->base;
 }
diff --git a/src/core/lib/security/credentials/jwt/json_token.h b/src/core/lib/security/credentials/jwt/json_token.h
index b2c3c09..9b77488 100644
--- a/src/core/lib/security/credentials/jwt/json_token.h
+++ b/src/core/lib/security/credentials/jwt/json_token.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H
 #define GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #include <grpc/slice.h>
 #include <openssl/rsa.h>
 
@@ -74,8 +70,4 @@
 void grpc_jwt_encode_and_sign_set_override(
     grpc_jwt_encode_and_sign_override func);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JSON_TOKEN_H */
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.cc b/src/core/lib/security/credentials/jwt/jwt_credentials.cc
index 77163c0..2404e86 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.cc
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.cc
@@ -30,9 +30,8 @@
 #include <grpc/support/string_util.h>
 #include <grpc/support/sync.h>
 
-static void jwt_reset_cache(grpc_exec_ctx* exec_ctx,
-                            grpc_service_account_jwt_access_credentials* c) {
-  GRPC_MDELEM_UNREF(exec_ctx, c->cached.jwt_md);
+static void jwt_reset_cache(grpc_service_account_jwt_access_credentials* c) {
+  GRPC_MDELEM_UNREF(c->cached.jwt_md);
   c->cached.jwt_md = GRPC_MDNULL;
   if (c->cached.service_url != nullptr) {
     gpr_free(c->cached.service_url);
@@ -41,17 +40,15 @@
   c->cached.jwt_expiration = gpr_inf_past(GPR_CLOCK_REALTIME);
 }
 
-static void jwt_destruct(grpc_exec_ctx* exec_ctx,
-                         grpc_call_credentials* creds) {
+static void jwt_destruct(grpc_call_credentials* creds) {
   grpc_service_account_jwt_access_credentials* c =
       (grpc_service_account_jwt_access_credentials*)creds;
   grpc_auth_json_key_destruct(&c->key);
-  jwt_reset_cache(exec_ctx, c);
+  jwt_reset_cache(c);
   gpr_mu_destroy(&c->cache_mu);
 }
 
-static bool jwt_get_request_metadata(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_credentials* creds,
+static bool jwt_get_request_metadata(grpc_call_credentials* creds,
                                      grpc_polling_entity* pollent,
                                      grpc_auth_metadata_context context,
                                      grpc_credentials_mdelem_array* md_array,
@@ -81,7 +78,7 @@
     char* jwt = nullptr;
     /* Generate a new jwt. */
     gpr_mu_lock(&c->cache_mu);
-    jwt_reset_cache(exec_ctx, c);
+    jwt_reset_cache(c);
     jwt = grpc_jwt_encode_and_sign(&c->key, context.service_url,
                                    c->jwt_lifetime, nullptr);
     if (jwt != nullptr) {
@@ -92,7 +89,6 @@
           gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), c->jwt_lifetime);
       c->cached.service_url = gpr_strdup(context.service_url);
       c->cached.jwt_md = grpc_mdelem_from_slices(
-          exec_ctx,
           grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY),
           grpc_slice_from_copied_string(md_value));
       gpr_free(md_value);
@@ -103,7 +99,7 @@
 
   if (!GRPC_MDISNULL(jwt_md)) {
     grpc_credentials_mdelem_array_add(md_array, jwt_md);
-    GRPC_MDELEM_UNREF(exec_ctx, jwt_md);
+    GRPC_MDELEM_UNREF(jwt_md);
   } else {
     *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Could not generate JWT.");
   }
@@ -111,8 +107,8 @@
 }
 
 static void jwt_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   GRPC_ERROR_UNREF(error);
 }
 
@@ -121,8 +117,7 @@
 
 grpc_call_credentials*
 grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
-    grpc_exec_ctx* exec_ctx, grpc_auth_json_key key,
-    gpr_timespec token_lifetime) {
+    grpc_auth_json_key key, gpr_timespec token_lifetime) {
   grpc_service_account_jwt_access_credentials* c;
   if (!grpc_auth_json_key_is_valid(&key)) {
     gpr_log(GPR_ERROR, "Invalid input for jwt credentials creation");
@@ -143,7 +138,7 @@
   }
   c->jwt_lifetime = token_lifetime;
   gpr_mu_init(&c->cache_mu);
-  jwt_reset_cache(exec_ctx, c);
+  jwt_reset_cache(c);
   return &c->base;
 }
 
@@ -186,11 +181,10 @@
     gpr_free(clean_json);
   }
   GPR_ASSERT(reserved == nullptr);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_call_credentials* creds =
       grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
-          &exec_ctx, grpc_auth_json_key_create_from_string(json_key),
-          token_lifetime);
-  grpc_exec_ctx_finish(&exec_ctx);
+          grpc_auth_json_key_create_from_string(json_key), token_lifetime);
+
   return creds;
 }
diff --git a/src/core/lib/security/credentials/jwt/jwt_credentials.h b/src/core/lib/security/credentials/jwt/jwt_credentials.h
index d554613..f58a8b6 100644
--- a/src/core/lib/security/credentials/jwt/jwt_credentials.h
+++ b/src/core/lib/security/credentials/jwt/jwt_credentials.h
@@ -22,10 +22,6 @@
 #include "src/core/lib/security/credentials/credentials.h"
 #include "src/core/lib/security/credentials/jwt/json_token.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   grpc_call_credentials base;
 
@@ -46,11 +42,6 @@
 // Takes ownership of the key.
 grpc_call_credentials*
 grpc_service_account_jwt_access_credentials_create_from_auth_json_key(
-    grpc_exec_ctx* exec_ctx, grpc_auth_json_key key,
-    gpr_timespec token_lifetime);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_auth_json_key key, gpr_timespec token_lifetime);
 
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.cc b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
index 3709b83..39339f0 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.cc
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.cc
@@ -74,12 +74,11 @@
   }
 }
 
-static grpc_json* parse_json_part_from_jwt(grpc_exec_ctx* exec_ctx,
-                                           const char* str, size_t len,
+static grpc_json* parse_json_part_from_jwt(const char* str, size_t len,
                                            grpc_slice* buffer) {
   grpc_json* json;
 
-  *buffer = grpc_base64_decode_with_len(exec_ctx, str, len, 1);
+  *buffer = grpc_base64_decode_with_len(str, len, 1);
   if (GRPC_SLICE_IS_EMPTY(*buffer)) {
     gpr_log(GPR_ERROR, "Invalid base64.");
     return nullptr;
@@ -87,7 +86,7 @@
   json = grpc_json_parse_string_with_len((char*)GRPC_SLICE_START_PTR(*buffer),
                                          GRPC_SLICE_LENGTH(*buffer));
   if (json == nullptr) {
-    grpc_slice_unref_internal(exec_ctx, *buffer);
+    grpc_slice_unref_internal(*buffer);
     gpr_log(GPR_ERROR, "JSON parsing error.");
   }
   return json;
@@ -123,14 +122,13 @@
   grpc_slice buffer;
 } jose_header;
 
-static void jose_header_destroy(grpc_exec_ctx* exec_ctx, jose_header* h) {
-  grpc_slice_unref_internal(exec_ctx, h->buffer);
+static void jose_header_destroy(jose_header* h) {
+  grpc_slice_unref_internal(h->buffer);
   gpr_free(h);
 }
 
 /* Takes ownership of json and buffer. */
-static jose_header* jose_header_from_json(grpc_exec_ctx* exec_ctx,
-                                          grpc_json* json, grpc_slice buffer) {
+static jose_header* jose_header_from_json(grpc_json* json, grpc_slice buffer) {
   grpc_json* cur;
   jose_header* h = (jose_header*)gpr_zalloc(sizeof(jose_header));
   h->buffer = buffer;
@@ -164,7 +162,7 @@
 
 error:
   grpc_json_destroy(json);
-  jose_header_destroy(exec_ctx, h);
+  jose_header_destroy(h);
   return nullptr;
 }
 
@@ -184,9 +182,9 @@
   grpc_slice buffer;
 };
 
-void grpc_jwt_claims_destroy(grpc_exec_ctx* exec_ctx, grpc_jwt_claims* claims) {
+void grpc_jwt_claims_destroy(grpc_jwt_claims* claims) {
   grpc_json_destroy(claims->json);
-  grpc_slice_unref_internal(exec_ctx, claims->buffer);
+  grpc_slice_unref_internal(claims->buffer);
   gpr_free(claims);
 }
 
@@ -231,8 +229,7 @@
 }
 
 /* Takes ownership of json and buffer even in case of failure. */
-grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_exec_ctx* exec_ctx,
-                                           grpc_json* json, grpc_slice buffer) {
+grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer) {
   grpc_json* cur;
   grpc_jwt_claims* claims =
       (grpc_jwt_claims*)gpr_malloc(sizeof(grpc_jwt_claims));
@@ -274,7 +271,7 @@
   return claims;
 
 error:
-  grpc_jwt_claims_destroy(exec_ctx, claims);
+  grpc_jwt_claims_destroy(claims);
   return nullptr;
 }
 
@@ -350,7 +347,7 @@
     grpc_jwt_claims* claims, const char* audience, grpc_slice signature,
     const char* signed_jwt, size_t signed_jwt_len, void* user_data,
     grpc_jwt_verification_done_cb cb) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   verifier_cb_ctx* ctx = (verifier_cb_ctx*)gpr_zalloc(sizeof(verifier_cb_ctx));
   ctx->verifier = verifier;
   ctx->pollent = grpc_polling_entity_create_from_pollset(pollset);
@@ -361,16 +358,16 @@
   ctx->signed_data = grpc_slice_from_copied_buffer(signed_jwt, signed_jwt_len);
   ctx->user_data = user_data;
   ctx->user_cb = cb;
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return ctx;
 }
 
-void verifier_cb_ctx_destroy(grpc_exec_ctx* exec_ctx, verifier_cb_ctx* ctx) {
+void verifier_cb_ctx_destroy(verifier_cb_ctx* ctx) {
   if (ctx->audience != nullptr) gpr_free(ctx->audience);
-  if (ctx->claims != nullptr) grpc_jwt_claims_destroy(exec_ctx, ctx->claims);
-  grpc_slice_unref_internal(exec_ctx, ctx->signature);
-  grpc_slice_unref_internal(exec_ctx, ctx->signed_data);
-  jose_header_destroy(exec_ctx, ctx->header);
+  if (ctx->claims != nullptr) grpc_jwt_claims_destroy(ctx->claims);
+  grpc_slice_unref_internal(ctx->signature);
+  grpc_slice_unref_internal(ctx->signed_data);
+  jose_header_destroy(ctx->header);
   for (size_t i = 0; i < HTTP_RESPONSE_COUNT; i++) {
     grpc_http_response_destroy(&ctx->responses[i]);
   }
@@ -450,19 +447,19 @@
   return result;
 }
 
-static BIGNUM* bignum_from_base64(grpc_exec_ctx* exec_ctx, const char* b64) {
+static BIGNUM* bignum_from_base64(const char* b64) {
   BIGNUM* result = nullptr;
   grpc_slice bin;
 
   if (b64 == nullptr) return nullptr;
-  bin = grpc_base64_decode(exec_ctx, b64, 1);
+  bin = grpc_base64_decode(b64, 1);
   if (GRPC_SLICE_IS_EMPTY(bin)) {
     gpr_log(GPR_ERROR, "Invalid base64 for big num.");
     return nullptr;
   }
   result = BN_bin2bn(GRPC_SLICE_START_PTR(bin),
                      TSI_SIZE_AS_SIZE(GRPC_SLICE_LENGTH(bin)), nullptr);
-  grpc_slice_unref_internal(exec_ctx, bin);
+  grpc_slice_unref_internal(bin);
   return result;
 }
 
@@ -495,8 +492,7 @@
 }
 #endif  // OPENSSL_VERSION_NUMBER < 0x10100000L
 
-static EVP_PKEY* pkey_from_jwk(grpc_exec_ctx* exec_ctx, const grpc_json* json,
-                               const char* kty) {
+static EVP_PKEY* pkey_from_jwk(const grpc_json* json, const char* kty) {
   const grpc_json* key_prop;
   RSA* rsa = nullptr;
   EVP_PKEY* result = nullptr;
@@ -515,12 +511,10 @@
   }
   for (key_prop = json->child; key_prop != nullptr; key_prop = key_prop->next) {
     if (strcmp(key_prop->key, "n") == 0) {
-      tmp_n =
-          bignum_from_base64(exec_ctx, validate_string_field(key_prop, "n"));
+      tmp_n = bignum_from_base64(validate_string_field(key_prop, "n"));
       if (tmp_n == nullptr) goto end;
     } else if (strcmp(key_prop->key, "e") == 0) {
-      tmp_e =
-          bignum_from_base64(exec_ctx, validate_string_field(key_prop, "e"));
+      tmp_e = bignum_from_base64(validate_string_field(key_prop, "e"));
       if (tmp_e == nullptr) goto end;
     }
   }
@@ -545,8 +539,7 @@
   return result;
 }
 
-static EVP_PKEY* find_verification_key(grpc_exec_ctx* exec_ctx,
-                                       const grpc_json* json,
+static EVP_PKEY* find_verification_key(const grpc_json* json,
                                        const char* header_alg,
                                        const char* header_kid) {
   const grpc_json* jkey;
@@ -591,7 +584,7 @@
     }
     if (alg != nullptr && kid != nullptr && kty != nullptr &&
         strcmp(kid, header_kid) == 0 && strcmp(alg, header_alg) == 0) {
-      return pkey_from_jwk(exec_ctx, jkey, kty);
+      return pkey_from_jwk(jkey, kty);
     }
   }
   gpr_log(GPR_ERROR,
@@ -632,8 +625,7 @@
   return result;
 }
 
-static void on_keys_retrieved(grpc_exec_ctx* exec_ctx, void* user_data,
-                              grpc_error* error) {
+static void on_keys_retrieved(void* user_data, grpc_error* error) {
   verifier_cb_ctx* ctx = (verifier_cb_ctx*)user_data;
   grpc_json* json = json_from_http(&ctx->responses[HTTP_RESPONSE_KEYS]);
   EVP_PKEY* verification_key = nullptr;
@@ -645,7 +637,7 @@
     goto end;
   }
   verification_key =
-      find_verification_key(exec_ctx, json, ctx->header->alg, ctx->header->kid);
+      find_verification_key(json, ctx->header->alg, ctx->header->kid);
   if (verification_key == nullptr) {
     gpr_log(GPR_ERROR, "Could not find verification key with kid %s.",
             ctx->header->kid);
@@ -669,12 +661,11 @@
 end:
   if (json != nullptr) grpc_json_destroy(json);
   EVP_PKEY_free(verification_key);
-  ctx->user_cb(exec_ctx, ctx->user_data, status, claims);
-  verifier_cb_ctx_destroy(exec_ctx, ctx);
+  ctx->user_cb(ctx->user_data, status, claims);
+  verifier_cb_ctx_destroy(ctx);
 }
 
-static void on_openid_config_retrieved(grpc_exec_ctx* exec_ctx, void* user_data,
-                                       grpc_error* error) {
+static void on_openid_config_retrieved(void* user_data, grpc_error* error) {
   const grpc_json* cur;
   verifier_cb_ctx* ctx = (verifier_cb_ctx*)user_data;
   const grpc_http_response* response = &ctx->responses[HTTP_RESPONSE_OPENID];
@@ -711,20 +702,19 @@
      extreme memory pressure. */
   resource_quota = grpc_resource_quota_create("jwt_verifier");
   grpc_httpcli_get(
-      exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
-      grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay,
+      &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
+      grpc_core::ExecCtx::Get()->Now() + grpc_jwt_verifier_max_delay,
       GRPC_CLOSURE_CREATE(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
       &ctx->responses[HTTP_RESPONSE_KEYS]);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
   grpc_json_destroy(json);
   gpr_free(req.host);
   return;
 
 error:
   if (json != nullptr) grpc_json_destroy(json);
-  ctx->user_cb(exec_ctx, ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR,
-               nullptr);
-  verifier_cb_ctx_destroy(exec_ctx, ctx);
+  ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, nullptr);
+  verifier_cb_ctx_destroy(ctx);
 }
 
 static email_key_mapping* verifier_get_mapping(grpc_jwt_verifier* v,
@@ -772,8 +762,7 @@
 }
 
 /* Takes ownership of ctx. */
-static void retrieve_key_and_verify(grpc_exec_ctx* exec_ctx,
-                                    verifier_cb_ctx* ctx) {
+static void retrieve_key_and_verify(verifier_cb_ctx* ctx) {
   const char* email_domain;
   grpc_closure* http_cb;
   char* path_prefix = nullptr;
@@ -840,23 +829,21 @@
      channel. This would allow us to cancel an authentication query when under
      extreme memory pressure. */
   resource_quota = grpc_resource_quota_create("jwt_verifier");
-  grpc_httpcli_get(exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent,
-                   resource_quota, &req,
-                   grpc_exec_ctx_now(exec_ctx) + grpc_jwt_verifier_max_delay,
-                   http_cb, &ctx->responses[rsp_idx]);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_httpcli_get(
+      &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
+      grpc_core::ExecCtx::Get()->Now() + grpc_jwt_verifier_max_delay, http_cb,
+      &ctx->responses[rsp_idx]);
+  grpc_resource_quota_unref_internal(resource_quota);
   gpr_free(req.host);
   gpr_free(req.http.path);
   return;
 
 error:
-  ctx->user_cb(exec_ctx, ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR,
-               nullptr);
-  verifier_cb_ctx_destroy(exec_ctx, ctx);
+  ctx->user_cb(ctx->user_data, GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR, nullptr);
+  verifier_cb_ctx_destroy(ctx);
 }
 
-void grpc_jwt_verifier_verify(grpc_exec_ctx* exec_ctx,
-                              grpc_jwt_verifier* verifier,
+void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier,
                               grpc_pollset* pollset, const char* jwt,
                               const char* audience,
                               grpc_jwt_verification_done_cb cb,
@@ -875,35 +862,32 @@
              cb != nullptr);
   dot = strchr(cur, '.');
   if (dot == nullptr) goto error;
-  json = parse_json_part_from_jwt(exec_ctx, cur, (size_t)(dot - cur),
-                                  &header_buffer);
+  json = parse_json_part_from_jwt(cur, (size_t)(dot - cur), &header_buffer);
   if (json == nullptr) goto error;
-  header = jose_header_from_json(exec_ctx, json, header_buffer);
+  header = jose_header_from_json(json, header_buffer);
   if (header == nullptr) goto error;
 
   cur = dot + 1;
   dot = strchr(cur, '.');
   if (dot == nullptr) goto error;
-  json = parse_json_part_from_jwt(exec_ctx, cur, (size_t)(dot - cur),
-                                  &claims_buffer);
+  json = parse_json_part_from_jwt(cur, (size_t)(dot - cur), &claims_buffer);
   if (json == nullptr) goto error;
-  claims = grpc_jwt_claims_from_json(exec_ctx, json, claims_buffer);
+  claims = grpc_jwt_claims_from_json(json, claims_buffer);
   if (claims == nullptr) goto error;
 
   signed_jwt_len = (size_t)(dot - jwt);
   cur = dot + 1;
-  signature = grpc_base64_decode(exec_ctx, cur, 1);
+  signature = grpc_base64_decode(cur, 1);
   if (GRPC_SLICE_IS_EMPTY(signature)) goto error;
   retrieve_key_and_verify(
-      exec_ctx,
       verifier_cb_ctx_create(verifier, pollset, header, claims, audience,
                              signature, jwt, signed_jwt_len, user_data, cb));
   return;
 
 error:
-  if (header != nullptr) jose_header_destroy(exec_ctx, header);
-  if (claims != nullptr) grpc_jwt_claims_destroy(exec_ctx, claims);
-  cb(exec_ctx, user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, nullptr);
+  if (header != nullptr) jose_header_destroy(header);
+  if (claims != nullptr) grpc_jwt_claims_destroy(claims);
+  cb(user_data, GRPC_JWT_VERIFIER_BAD_FORMAT, nullptr);
 }
 
 grpc_jwt_verifier* grpc_jwt_verifier_create(
@@ -930,10 +914,10 @@
   return v;
 }
 
-void grpc_jwt_verifier_destroy(grpc_exec_ctx* exec_ctx, grpc_jwt_verifier* v) {
+void grpc_jwt_verifier_destroy(grpc_jwt_verifier* v) {
   size_t i;
   if (v == nullptr) return;
-  grpc_httpcli_context_destroy(exec_ctx, &v->http_ctx);
+  grpc_httpcli_context_destroy(&v->http_ctx);
   if (v->mappings != nullptr) {
     for (i = 0; i < v->num_mappings; i++) {
       gpr_free(v->mappings[i].email_domain);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.h b/src/core/lib/security/credentials/jwt/jwt_verifier.h
index 8083cf9..b3805e7 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.h
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.h
@@ -32,10 +32,6 @@
 #define GRPC_GOOGLE_SERVICE_ACCOUNTS_KEY_URL_PREFIX \
   "www.googleapis.com/robot/v1/metadata/x509"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* --- grpc_jwt_verifier_status. --- */
 
 typedef enum {
@@ -55,7 +51,7 @@
 
 typedef struct grpc_jwt_claims grpc_jwt_claims;
 
-void grpc_jwt_claims_destroy(grpc_exec_ctx* exec_ctx, grpc_jwt_claims* claims);
+void grpc_jwt_claims_destroy(grpc_jwt_claims* claims);
 
 /* Returns the whole JSON tree of the claims. */
 const grpc_json* grpc_jwt_claims_json(const grpc_jwt_claims* claims);
@@ -98,21 +94,18 @@
     size_t num_mappings);
 
 /*The verifier must not be destroyed if there are still outstanding callbacks.*/
-void grpc_jwt_verifier_destroy(grpc_exec_ctx* exec_ctx,
-                               grpc_jwt_verifier* verifier);
+void grpc_jwt_verifier_destroy(grpc_jwt_verifier* verifier);
 
 /* User provided callback that will be called when the verification of the JWT
    is done (maybe in another thread).
    It is the responsibility of the callee to call grpc_jwt_claims_destroy on
    the claims. */
-typedef void (*grpc_jwt_verification_done_cb)(grpc_exec_ctx* exec_ctx,
-                                              void* user_data,
+typedef void (*grpc_jwt_verification_done_cb)(void* user_data,
                                               grpc_jwt_verifier_status status,
                                               grpc_jwt_claims* claims);
 
 /* Verifies for the JWT for the given expected audience. */
-void grpc_jwt_verifier_verify(grpc_exec_ctx* exec_ctx,
-                              grpc_jwt_verifier* verifier,
+void grpc_jwt_verifier_verify(grpc_jwt_verifier* verifier,
                               grpc_pollset* pollset, const char* jwt,
                               const char* audience,
                               grpc_jwt_verification_done_cb cb,
@@ -120,14 +113,9 @@
 
 /* --- TESTING ONLY exposed functions. --- */
 
-grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_exec_ctx* exec_ctx,
-                                           grpc_json* json, grpc_slice buffer);
+grpc_jwt_claims* grpc_jwt_claims_from_json(grpc_json* json, grpc_slice buffer);
 grpc_jwt_verifier_status grpc_jwt_claims_check(const grpc_jwt_claims* claims,
                                                const char* audience);
 const char* grpc_jwt_issuer_email_domain(const char* issuer);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_JWT_JWT_VERIFIER_H */
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
index ccefb4d..e243ea5 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.cc
@@ -103,21 +103,19 @@
 // Oauth2 Token Fetcher credentials.
 //
 
-static void oauth2_token_fetcher_destruct(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_credentials* creds) {
+static void oauth2_token_fetcher_destruct(grpc_call_credentials* creds) {
   grpc_oauth2_token_fetcher_credentials* c =
       (grpc_oauth2_token_fetcher_credentials*)creds;
-  GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md);
+  GRPC_MDELEM_UNREF(c->access_token_md);
   gpr_mu_destroy(&c->mu);
-  grpc_pollset_set_destroy(exec_ctx,
-                           grpc_polling_entity_pollset_set(&c->pollent));
-  grpc_httpcli_context_destroy(exec_ctx, &c->httpcli_context);
+  grpc_pollset_set_destroy(grpc_polling_entity_pollset_set(&c->pollent));
+  grpc_httpcli_context_destroy(&c->httpcli_context);
 }
 
 grpc_credentials_status
 grpc_oauth2_token_fetcher_credentials_parse_server_response(
-    grpc_exec_ctx* exec_ctx, const grpc_http_response* response,
-    grpc_mdelem* token_md, grpc_millis* token_lifetime) {
+    const grpc_http_response* response, grpc_mdelem* token_md,
+    grpc_millis* token_lifetime) {
   char* null_terminated_body = nullptr;
   char* new_access_token = nullptr;
   grpc_credentials_status status = GRPC_CREDENTIALS_OK;
@@ -184,9 +182,8 @@
     gpr_asprintf(&new_access_token, "%s %s", token_type->value,
                  access_token->value);
     *token_lifetime = strtol(expires_in->value, nullptr, 10) * GPR_MS_PER_SEC;
-    if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(exec_ctx, *token_md);
+    if (!GRPC_MDISNULL(*token_md)) GRPC_MDELEM_UNREF(*token_md);
     *token_md = grpc_mdelem_from_slices(
-        exec_ctx,
         grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY),
         grpc_slice_from_copied_string(new_access_token));
     status = GRPC_CREDENTIALS_OK;
@@ -194,7 +191,7 @@
 
 end:
   if (status != GRPC_CREDENTIALS_OK && !GRPC_MDISNULL(*token_md)) {
-    GRPC_MDELEM_UNREF(exec_ctx, *token_md);
+    GRPC_MDELEM_UNREF(*token_md);
     *token_md = GRPC_MDNULL;
   }
   if (null_terminated_body != nullptr) gpr_free(null_terminated_body);
@@ -203,8 +200,7 @@
   return status;
 }
 
-static void on_oauth2_token_fetcher_http_response(grpc_exec_ctx* exec_ctx,
-                                                  void* user_data,
+static void on_oauth2_token_fetcher_http_response(void* user_data,
                                                   grpc_error* error) {
   GRPC_LOG_IF_ERROR("oauth_fetch", GRPC_ERROR_REF(error));
   grpc_credentials_metadata_request* r =
@@ -215,13 +211,13 @@
   grpc_millis token_lifetime;
   grpc_credentials_status status =
       grpc_oauth2_token_fetcher_credentials_parse_server_response(
-          exec_ctx, &r->response, &access_token_md, &token_lifetime);
+          &r->response, &access_token_md, &token_lifetime);
   // Update cache and grab list of pending requests.
   gpr_mu_lock(&c->mu);
   c->token_fetch_pending = false;
   c->access_token_md = GRPC_MDELEM_REF(access_token_md);
   c->token_expiration = status == GRPC_CREDENTIALS_OK
-                            ? grpc_exec_ctx_now(exec_ctx) + token_lifetime
+                            ? grpc_core::ExecCtx::Get()->Now() + token_lifetime
                             : 0;
   grpc_oauth2_pending_get_request_metadata* pending_request =
       c->pending_requests;
@@ -236,24 +232,22 @@
       error = GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
           "Error occured when fetching oauth2 token.", &error, 1);
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata, error);
+    GRPC_CLOSURE_SCHED(pending_request->on_request_metadata, error);
     grpc_polling_entity_del_from_pollset_set(
-        exec_ctx, pending_request->pollent,
-        grpc_polling_entity_pollset_set(&c->pollent));
+        pending_request->pollent, grpc_polling_entity_pollset_set(&c->pollent));
     grpc_oauth2_pending_get_request_metadata* prev = pending_request;
     pending_request = pending_request->next;
     gpr_free(prev);
   }
-  GRPC_MDELEM_UNREF(exec_ctx, access_token_md);
-  grpc_call_credentials_unref(exec_ctx, r->creds);
-  grpc_credentials_metadata_request_destroy(exec_ctx, r);
+  GRPC_MDELEM_UNREF(access_token_md);
+  grpc_call_credentials_unref(r->creds);
+  grpc_credentials_metadata_request_destroy(r);
 }
 
 static bool oauth2_token_fetcher_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_polling_entity* pollent, grpc_auth_metadata_context context,
-    grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
-    grpc_error** error) {
+    grpc_call_credentials* creds, grpc_polling_entity* pollent,
+    grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array,
+    grpc_closure* on_request_metadata, grpc_error** error) {
   grpc_oauth2_token_fetcher_credentials* c =
       (grpc_oauth2_token_fetcher_credentials*)creds;
   // Check if we can use the cached token.
@@ -262,13 +256,14 @@
   grpc_mdelem cached_access_token_md = GRPC_MDNULL;
   gpr_mu_lock(&c->mu);
   if (!GRPC_MDISNULL(c->access_token_md) &&
-      (c->token_expiration - grpc_exec_ctx_now(exec_ctx) > refresh_threshold)) {
+      (c->token_expiration - grpc_core::ExecCtx::Get()->Now() >
+       refresh_threshold)) {
     cached_access_token_md = GRPC_MDELEM_REF(c->access_token_md);
   }
   if (!GRPC_MDISNULL(cached_access_token_md)) {
     gpr_mu_unlock(&c->mu);
     grpc_credentials_mdelem_array_add(md_array, cached_access_token_md);
-    GRPC_MDELEM_UNREF(exec_ctx, cached_access_token_md);
+    GRPC_MDELEM_UNREF(cached_access_token_md);
     return true;
   }
   // Couldn't get the token from the cache.
@@ -280,7 +275,7 @@
   pending_request->on_request_metadata = on_request_metadata;
   pending_request->pollent = pollent;
   grpc_polling_entity_add_to_pollset_set(
-      exec_ctx, pollent, grpc_polling_entity_pollset_set(&c->pollent));
+      pollent, grpc_polling_entity_pollset_set(&c->pollent));
   pending_request->next = c->pending_requests;
   c->pending_requests = pending_request;
   bool start_fetch = false;
@@ -291,17 +286,17 @@
   gpr_mu_unlock(&c->mu);
   if (start_fetch) {
     grpc_call_credentials_ref(creds);
-    c->fetch_func(exec_ctx, grpc_credentials_metadata_request_create(creds),
+    c->fetch_func(grpc_credentials_metadata_request_create(creds),
                   &c->httpcli_context, &c->pollent,
                   on_oauth2_token_fetcher_http_response,
-                  grpc_exec_ctx_now(exec_ctx) + refresh_threshold);
+                  grpc_core::ExecCtx::Get()->Now() + refresh_threshold);
   }
   return false;
 }
 
 static void oauth2_token_fetcher_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   grpc_oauth2_token_fetcher_credentials* c =
       (grpc_oauth2_token_fetcher_credentials*)creds;
   gpr_mu_lock(&c->mu);
@@ -317,7 +312,7 @@
         c->pending_requests = pending_request->next;
       }
       // Invoke the callback immediately with an error.
-      GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata,
+      GRPC_CLOSURE_SCHED(pending_request->on_request_metadata,
                          GRPC_ERROR_REF(error));
       gpr_free(pending_request);
       break;
@@ -351,7 +346,7 @@
     oauth2_token_fetcher_cancel_get_request_metadata};
 
 static void compute_engine_fetch_oauth2(
-    grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* metadata_req,
+    grpc_credentials_metadata_request* metadata_req,
     grpc_httpcli_context* httpcli_context, grpc_polling_entity* pollent,
     grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
   grpc_http_header header = {(char*)"Metadata-Flavor", (char*)"Google"};
@@ -367,10 +362,10 @@
   grpc_resource_quota* resource_quota =
       grpc_resource_quota_create("oauth2_credentials");
   grpc_httpcli_get(
-      exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
+      httpcli_context, pollent, resource_quota, &request, deadline,
       GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
       &metadata_req->response);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
 }
 
 grpc_call_credentials* grpc_google_compute_engine_credentials_create(
@@ -390,12 +385,11 @@
 // Google Refresh Token credentials.
 //
 
-static void refresh_token_destruct(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_credentials* creds) {
+static void refresh_token_destruct(grpc_call_credentials* creds) {
   grpc_google_refresh_token_credentials* c =
       (grpc_google_refresh_token_credentials*)creds;
   grpc_auth_refresh_token_destruct(&c->refresh_token);
-  oauth2_token_fetcher_destruct(exec_ctx, &c->base.base);
+  oauth2_token_fetcher_destruct(&c->base.base);
 }
 
 static grpc_call_credentials_vtable refresh_token_vtable = {
@@ -403,7 +397,7 @@
     oauth2_token_fetcher_cancel_get_request_metadata};
 
 static void refresh_token_fetch_oauth2(
-    grpc_exec_ctx* exec_ctx, grpc_credentials_metadata_request* metadata_req,
+    grpc_credentials_metadata_request* metadata_req,
     grpc_httpcli_context* httpcli_context, grpc_polling_entity* pollent,
     grpc_iomgr_cb_func response_cb, grpc_millis deadline) {
   grpc_google_refresh_token_credentials* c =
@@ -427,11 +421,11 @@
   grpc_resource_quota* resource_quota =
       grpc_resource_quota_create("oauth2_credentials_refresh");
   grpc_httpcli_post(
-      exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
-      strlen(body), deadline,
+      httpcli_context, pollent, resource_quota, &request, body, strlen(body),
+      deadline,
       GRPC_CLOSURE_CREATE(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
       &metadata_req->response);
-  grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
   gpr_free(body);
 }
 
@@ -483,25 +477,23 @@
 // Oauth2 Access Token credentials.
 //
 
-static void access_token_destruct(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_credentials* creds) {
+static void access_token_destruct(grpc_call_credentials* creds) {
   grpc_access_token_credentials* c = (grpc_access_token_credentials*)creds;
-  GRPC_MDELEM_UNREF(exec_ctx, c->access_token_md);
+  GRPC_MDELEM_UNREF(c->access_token_md);
 }
 
 static bool access_token_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_polling_entity* pollent, grpc_auth_metadata_context context,
-    grpc_credentials_mdelem_array* md_array, grpc_closure* on_request_metadata,
-    grpc_error** error) {
+    grpc_call_credentials* creds, grpc_polling_entity* pollent,
+    grpc_auth_metadata_context context, grpc_credentials_mdelem_array* md_array,
+    grpc_closure* on_request_metadata, grpc_error** error) {
   grpc_access_token_credentials* c = (grpc_access_token_credentials*)creds;
   grpc_credentials_mdelem_array_add(md_array, c->access_token_md);
   return true;
 }
 
 static void access_token_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* c,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* c, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   GRPC_ERROR_UNREF(error);
 }
 
@@ -523,11 +515,11 @@
   gpr_ref_init(&c->base.refcount, 1);
   char* token_md_value;
   gpr_asprintf(&token_md_value, "Bearer %s", access_token);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   c->access_token_md = grpc_mdelem_from_slices(
-      &exec_ctx, grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY),
+      grpc_slice_from_static_string(GRPC_AUTHORIZATION_METADATA_KEY),
       grpc_slice_from_copied_string(token_md_value));
-  grpc_exec_ctx_finish(&exec_ctx);
+
   gpr_free(token_md_value);
   return &c->base;
 }
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
index 32d3ff7..e5b8df8 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.h
@@ -22,10 +22,6 @@
 #include "src/core/lib/json/json.h"
 #include "src/core/lib/security/credentials/credentials.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // auth_refresh_token parsing.
 typedef struct {
   const char* type;
@@ -56,8 +52,7 @@
 //  This object is a base for credentials that need to acquire an oauth2 token
 //  from an http service.
 
-typedef void (*grpc_fetch_oauth2_func)(grpc_exec_ctx* exec_ctx,
-                                       grpc_credentials_metadata_request* req,
+typedef void (*grpc_fetch_oauth2_func)(grpc_credentials_metadata_request* req,
                                        grpc_httpcli_context* http_context,
                                        grpc_polling_entity* pollent,
                                        grpc_iomgr_cb_func cb,
@@ -103,11 +98,7 @@
 // Exposed for testing only.
 grpc_credentials_status
 grpc_oauth2_token_fetcher_credentials_parse_server_response(
-    grpc_exec_ctx* exec_ctx, const struct grpc_http_response* response,
-    grpc_mdelem* token_md, grpc_millis* token_lifetime);
-
-#ifdef __cplusplus
-}
-#endif
+    const struct grpc_http_response* response, grpc_mdelem* token_md,
+    grpc_millis* token_lifetime);
 
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_OAUTH2_OAUTH2_CREDENTIALS_H */
diff --git a/src/core/lib/security/credentials/plugin/plugin_credentials.cc b/src/core/lib/security/credentials/plugin/plugin_credentials.cc
index 1f1efd0..203ba58 100644
--- a/src/core/lib/security/credentials/plugin/plugin_credentials.cc
+++ b/src/core/lib/security/credentials/plugin/plugin_credentials.cc
@@ -33,8 +33,7 @@
 
 grpc_core::TraceFlag grpc_plugin_credentials_trace(false, "plugin_credentials");
 
-static void plugin_destruct(grpc_exec_ctx* exec_ctx,
-                            grpc_call_credentials* creds) {
+static void plugin_destruct(grpc_call_credentials* creds) {
   grpc_plugin_credentials* c = (grpc_plugin_credentials*)creds;
   gpr_mu_destroy(&c->mu);
   if (c->plugin.state != nullptr && c->plugin.destroy != nullptr) {
@@ -61,18 +60,17 @@
 // When this returns, r->cancelled indicates whether the request was
 // cancelled before completion.
 static void pending_request_complete(
-    grpc_exec_ctx* exec_ctx, grpc_plugin_credentials_pending_request* r) {
+    grpc_plugin_credentials_pending_request* r) {
   gpr_mu_lock(&r->creds->mu);
   if (!r->cancelled) pending_request_remove_locked(r->creds, r);
   gpr_mu_unlock(&r->creds->mu);
   // Ref to credentials not needed anymore.
-  grpc_call_credentials_unref(exec_ctx, &r->creds->base);
+  grpc_call_credentials_unref(&r->creds->base);
 }
 
 static grpc_error* process_plugin_result(
-    grpc_exec_ctx* exec_ctx, grpc_plugin_credentials_pending_request* r,
-    const grpc_metadata* md, size_t num_md, grpc_status_code status,
-    const char* error_details) {
+    grpc_plugin_credentials_pending_request* r, const grpc_metadata* md,
+    size_t num_md, grpc_status_code status, const char* error_details) {
   grpc_error* error = GRPC_ERROR_NONE;
   if (status != GRPC_STATUS_OK) {
     char* msg;
@@ -100,11 +98,11 @@
       error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Illegal metadata");
     } else {
       for (size_t i = 0; i < num_md; ++i) {
-        grpc_mdelem mdelem = grpc_mdelem_from_slices(
-            exec_ctx, grpc_slice_ref_internal(md[i].key),
-            grpc_slice_ref_internal(md[i].value));
+        grpc_mdelem mdelem =
+            grpc_mdelem_from_slices(grpc_slice_ref_internal(md[i].key),
+                                    grpc_slice_ref_internal(md[i].value));
         grpc_credentials_mdelem_array_add(r->md_array, mdelem);
-        GRPC_MDELEM_UNREF(exec_ctx, mdelem);
+        GRPC_MDELEM_UNREF(mdelem);
       }
     }
   }
@@ -117,9 +115,8 @@
                                              grpc_status_code status,
                                              const char* error_details) {
   /* called from application code */
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INITIALIZER(
-      GRPC_EXEC_CTX_FLAG_IS_FINISHED | GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP,
-      nullptr, nullptr);
+  grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_FINISHED |
+                              GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP);
   grpc_plugin_credentials_pending_request* r =
       (grpc_plugin_credentials_pending_request*)request;
   if (grpc_plugin_credentials_trace.enabled()) {
@@ -129,12 +126,12 @@
             r->creds, r);
   }
   // Remove request from pending list if not previously cancelled.
-  pending_request_complete(&exec_ctx, r);
+  pending_request_complete(r);
   // If it has not been cancelled, process it.
   if (!r->cancelled) {
     grpc_error* error =
-        process_plugin_result(&exec_ctx, r, md, num_md, status, error_details);
-    GRPC_CLOSURE_SCHED(&exec_ctx, r->on_request_metadata, error);
+        process_plugin_result(r, md, num_md, status, error_details);
+    GRPC_CLOSURE_SCHED(r->on_request_metadata, error);
   } else if (grpc_plugin_credentials_trace.enabled()) {
     gpr_log(GPR_INFO,
             "plugin_credentials[%p]: request %p: plugin was previously "
@@ -142,11 +139,9 @@
             r->creds, r);
   }
   gpr_free(r);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static bool plugin_get_request_metadata(grpc_exec_ctx* exec_ctx,
-                                        grpc_call_credentials* creds,
+static bool plugin_get_request_metadata(grpc_call_credentials* creds,
                                         grpc_polling_entity* pollent,
                                         grpc_auth_metadata_context context,
                                         grpc_credentials_mdelem_array* md_array,
@@ -194,7 +189,7 @@
     }
     // Returned synchronously.
     // Remove request from pending list if not previously cancelled.
-    pending_request_complete(exec_ctx, pending_request);
+    pending_request_complete(pending_request);
     // If the request was cancelled, the error will have been returned
     // asynchronously by plugin_cancel_get_request_metadata(), so return
     // false.  Otherwise, process the result.
@@ -213,13 +208,13 @@
                 "synchronously",
                 c, pending_request);
       }
-      *error = process_plugin_result(exec_ctx, pending_request, creds_md,
-                                     num_creds_md, status, error_details);
+      *error = process_plugin_result(pending_request, creds_md, num_creds_md,
+                                     status, error_details);
     }
     // Clean up.
     for (size_t i = 0; i < num_creds_md; ++i) {
-      grpc_slice_unref_internal(exec_ctx, creds_md[i].key);
-      grpc_slice_unref_internal(exec_ctx, creds_md[i].value);
+      grpc_slice_unref_internal(creds_md[i].key);
+      grpc_slice_unref_internal(creds_md[i].value);
     }
     gpr_free((void*)error_details);
     gpr_free(pending_request);
@@ -228,8 +223,8 @@
 }
 
 static void plugin_cancel_get_request_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call_credentials* creds,
-    grpc_credentials_mdelem_array* md_array, grpc_error* error) {
+    grpc_call_credentials* creds, grpc_credentials_mdelem_array* md_array,
+    grpc_error* error) {
   grpc_plugin_credentials* c = (grpc_plugin_credentials*)creds;
   gpr_mu_lock(&c->mu);
   for (grpc_plugin_credentials_pending_request* pending_request =
@@ -241,7 +236,7 @@
                 pending_request);
       }
       pending_request->cancelled = true;
-      GRPC_CLOSURE_SCHED(exec_ctx, pending_request->on_request_metadata,
+      GRPC_CLOSURE_SCHED(pending_request->on_request_metadata,
                          GRPC_ERROR_REF(error));
       pending_request_remove_locked(c, pending_request);
       break;
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.cc b/src/core/lib/security/credentials/ssl/ssl_credentials.cc
index 39dd38c..d854644 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.cc
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.cc
@@ -41,18 +41,16 @@
   gpr_free(kp);
 }
 
-static void ssl_destruct(grpc_exec_ctx* exec_ctx,
-                         grpc_channel_credentials* creds) {
+static void ssl_destruct(grpc_channel_credentials* creds) {
   grpc_ssl_credentials* c = (grpc_ssl_credentials*)creds;
   gpr_free(c->config.pem_root_certs);
   grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pair, 1);
 }
 
 static grpc_security_status ssl_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* creds,
-    grpc_call_credentials* call_creds, const char* target,
-    const grpc_channel_args* args, grpc_channel_security_connector** sc,
-    grpc_channel_args** new_args) {
+    grpc_channel_credentials* creds, grpc_call_credentials* call_creds,
+    const char* target, const grpc_channel_args* args,
+    grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
   grpc_ssl_credentials* c = (grpc_ssl_credentials*)creds;
   grpc_security_status status = GRPC_SECURITY_OK;
   const char* overridden_target_name = nullptr;
@@ -65,8 +63,7 @@
     }
   }
   status = grpc_ssl_channel_security_connector_create(
-      exec_ctx, creds, call_creds, &c->config, target, overridden_target_name,
-      sc);
+      creds, call_creds, &c->config, target, overridden_target_name, sc);
   if (status != GRPC_SECURITY_OK) {
     return status;
   }
@@ -125,8 +122,7 @@
   grpc_ssl_server_certificate_config_fetcher* certificate_config_fetcher;
 };
 
-static void ssl_server_destruct(grpc_exec_ctx* exec_ctx,
-                                grpc_server_credentials* creds) {
+static void ssl_server_destruct(grpc_server_credentials* creds) {
   grpc_ssl_server_credentials* c = (grpc_ssl_server_credentials*)creds;
   grpc_tsi_ssl_pem_key_cert_pairs_destroy(c->config.pem_key_cert_pairs,
                                           c->config.num_key_cert_pairs);
@@ -134,9 +130,8 @@
 }
 
 static grpc_security_status ssl_server_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_server_credentials* creds,
-    grpc_server_security_connector** sc) {
-  return grpc_ssl_server_security_connector_create(exec_ctx, creds, sc);
+    grpc_server_credentials* creds, grpc_server_security_connector** sc) {
+  return grpc_ssl_server_security_connector_create(creds, sc);
 }
 
 static grpc_server_credentials_vtable ssl_server_vtable = {
diff --git a/src/core/lib/security/credentials/ssl/ssl_credentials.h b/src/core/lib/security/credentials/ssl/ssl_credentials.h
index 82b9ce1..0003905 100644
--- a/src/core/lib/security/credentials/ssl/ssl_credentials.h
+++ b/src/core/lib/security/credentials/ssl/ssl_credentials.h
@@ -20,10 +20,6 @@
 
 #include "src/core/lib/security/credentials/credentials.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct {
   grpc_channel_credentials base;
   grpc_ssl_config config;
@@ -53,8 +49,4 @@
 void grpc_tsi_ssl_pem_key_cert_pairs_destroy(tsi_ssl_pem_key_cert_pair* kp,
                                              size_t num_key_cert_pairs);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_CREDENTIALS_SSL_SSL_CREDENTIALS_H */
diff --git a/src/core/lib/security/transport/auth_filters.h b/src/core/lib/security/transport/auth_filters.h
index 6376929..e999a02 100644
--- a/src/core/lib/security/transport/auth_filters.h
+++ b/src/core/lib/security/transport/auth_filters.h
@@ -22,10 +22,6 @@
 #include <grpc/grpc_security.h>
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_client_auth_filter;
 extern const grpc_channel_filter grpc_server_auth_filter;
 
@@ -36,8 +32,4 @@
 
 void grpc_auth_metadata_context_reset(grpc_auth_metadata_context* context);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_AUTH_FILTERS_H */
diff --git a/src/core/lib/security/transport/client_auth_filter.cc b/src/core/lib/security/transport/client_auth_filter.cc
index 326f4d7..cd3c2e3 100644
--- a/src/core/lib/security/transport/client_auth_filter.cc
+++ b/src/core/lib/security/transport/client_auth_filter.cc
@@ -90,8 +90,7 @@
   *combined = grpc_error_add_child(*combined, error);
 }
 
-static void on_credentials_metadata(grpc_exec_ctx* exec_ctx, void* arg,
-                                    grpc_error* input_error) {
+static void on_credentials_metadata(void* arg, grpc_error* input_error) {
   grpc_transport_stream_op_batch* batch = (grpc_transport_stream_op_batch*)arg;
   grpc_call_element* elem =
       (grpc_call_element*)batch->handler_private.extra_arg;
@@ -105,16 +104,16 @@
         batch->payload->send_initial_metadata.send_initial_metadata;
     for (size_t i = 0; i < calld->md_array.size; ++i) {
       add_error(&error, grpc_metadata_batch_add_tail(
-                            exec_ctx, mdb, &calld->md_links[i],
+                            mdb, &calld->md_links[i],
                             GRPC_MDELEM_REF(calld->md_array.md[i])));
     }
   }
   if (error == GRPC_ERROR_NONE) {
-    grpc_call_next_op(exec_ctx, elem, batch);
+    grpc_call_next_op(elem, batch);
   } else {
     error = grpc_error_set_int(error, GRPC_ERROR_INT_GRPC_STATUS,
                                GRPC_STATUS_UNAUTHENTICATED);
-    grpc_transport_stream_op_batch_finish_with_failure(exec_ctx, batch, error,
+    grpc_transport_stream_op_batch_finish_with_failure(batch, error,
                                                        calld->call_combiner);
   }
 }
@@ -156,20 +155,17 @@
   gpr_free(host_and_port);
 }
 
-static void cancel_get_request_metadata(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void cancel_get_request_metadata(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   if (error != GRPC_ERROR_NONE) {
     grpc_call_credentials_cancel_get_request_metadata(
-        exec_ctx, calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
+        calld->creds, &calld->md_array, GRPC_ERROR_REF(error));
   }
-  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call,
-                        "cancel_get_request_metadata");
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_get_request_metadata");
 }
 
-static void send_security_metadata(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem,
+static void send_security_metadata(grpc_call_element* elem,
                                    grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -183,7 +179,7 @@
 
   if (channel_call_creds == nullptr && !call_creds_has_md) {
     /* Skip sending metadata altogether. */
-    grpc_call_next_op(exec_ctx, elem, batch);
+    grpc_call_next_op(elem, batch);
     return;
   }
 
@@ -192,7 +188,7 @@
                                                           ctx->creds, nullptr);
     if (calld->creds == nullptr) {
       grpc_transport_stream_op_batch_finish_with_failure(
-          exec_ctx, batch,
+          batch,
           grpc_error_set_int(
               GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                   "Incompatible credentials set on channel and call."),
@@ -215,30 +211,29 @@
                     batch, grpc_schedule_on_exec_ctx);
   grpc_error* error = GRPC_ERROR_NONE;
   if (grpc_call_credentials_get_request_metadata(
-          exec_ctx, calld->creds, calld->pollent, calld->auth_md_context,
+          calld->creds, calld->pollent, calld->auth_md_context,
           &calld->md_array, &calld->async_result_closure, &error)) {
     // Synchronous return; invoke on_credentials_metadata() directly.
-    on_credentials_metadata(exec_ctx, batch, error);
+    on_credentials_metadata(batch, error);
     GRPC_ERROR_UNREF(error);
   } else {
     // Async return; register cancellation closure with call combiner.
     GRPC_CALL_STACK_REF(calld->owning_call, "cancel_get_request_metadata");
     grpc_call_combiner_set_notify_on_cancel(
-        exec_ctx, calld->call_combiner,
+        calld->call_combiner,
         GRPC_CLOSURE_INIT(&calld->get_request_metadata_cancel_closure,
                           cancel_get_request_metadata, elem,
                           grpc_schedule_on_exec_ctx));
   }
 }
 
-static void on_host_checked(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void on_host_checked(void* arg, grpc_error* error) {
   grpc_transport_stream_op_batch* batch = (grpc_transport_stream_op_batch*)arg;
   grpc_call_element* elem =
       (grpc_call_element*)batch->handler_private.extra_arg;
   call_data* calld = (call_data*)elem->call_data;
   if (error == GRPC_ERROR_NONE) {
-    send_security_metadata(exec_ctx, elem, batch);
+    send_security_metadata(elem, batch);
   } else {
     char* error_msg;
     char* host = grpc_slice_to_c_string(calld->host);
@@ -246,7 +241,7 @@
                  host);
     gpr_free(host);
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, batch,
+        batch,
         grpc_error_set_int(GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_msg),
                            GRPC_ERROR_INT_GRPC_STATUS,
                            GRPC_STATUS_UNAUTHENTICATED),
@@ -255,22 +250,20 @@
   }
 }
 
-static void cancel_check_call_host(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void cancel_check_call_host(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
   if (error != GRPC_ERROR_NONE) {
     grpc_channel_security_connector_cancel_check_call_host(
-        exec_ctx, chand->security_connector, &calld->async_result_closure,
+        chand->security_connector, &calld->async_result_closure,
         GRPC_ERROR_REF(error));
   }
-  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_check_call_host");
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_check_call_host");
 }
 
 static void auth_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   GPR_TIMER_BEGIN("auth_start_transport_stream_op_batch", 0);
 
   /* grab pointers to our data from the call element */
@@ -303,13 +296,13 @@
        */
       if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_AUTHORITY)) {
         if (calld->have_host) {
-          grpc_slice_unref_internal(exec_ctx, calld->host);
+          grpc_slice_unref_internal(calld->host);
         }
         calld->host = grpc_slice_ref_internal(GRPC_MDVALUE(md));
         calld->have_host = true;
       } else if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_PATH)) {
         if (calld->have_method) {
-          grpc_slice_unref_internal(exec_ctx, calld->method);
+          grpc_slice_unref_internal(calld->method);
         }
         calld->method = grpc_slice_ref_internal(GRPC_MDVALUE(md));
         calld->have_method = true;
@@ -322,16 +315,16 @@
       char* call_host = grpc_slice_to_c_string(calld->host);
       grpc_error* error = GRPC_ERROR_NONE;
       if (grpc_channel_security_connector_check_call_host(
-              exec_ctx, chand->security_connector, call_host,
-              chand->auth_context, &calld->async_result_closure, &error)) {
+              chand->security_connector, call_host, chand->auth_context,
+              &calld->async_result_closure, &error)) {
         // Synchronous return; invoke on_host_checked() directly.
-        on_host_checked(exec_ctx, batch, error);
+        on_host_checked(batch, error);
         GRPC_ERROR_UNREF(error);
       } else {
         // Async return; register cancellation closure with call combiner.
         GRPC_CALL_STACK_REF(calld->owning_call, "cancel_check_call_host");
         grpc_call_combiner_set_notify_on_cancel(
-            exec_ctx, calld->call_combiner,
+            calld->call_combiner,
             GRPC_CLOSURE_INIT(&calld->check_call_host_cancel_closure,
                               cancel_check_call_host, elem,
                               grpc_schedule_on_exec_ctx));
@@ -343,13 +336,12 @@
   }
 
   /* pass control down the stack */
-  grpc_call_next_op(exec_ctx, elem, batch);
+  grpc_call_next_op(elem, batch);
   GPR_TIMER_END("auth_start_transport_stream_op_batch", 0);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   calld->owning_call = args->call_stack;
@@ -357,32 +349,30 @@
   return GRPC_ERROR_NONE;
 }
 
-static void set_pollset_or_pollset_set(grpc_exec_ctx* exec_ctx,
-                                       grpc_call_element* elem,
+static void set_pollset_or_pollset_set(grpc_call_element* elem,
                                        grpc_polling_entity* pollent) {
   call_data* calld = (call_data*)elem->call_data;
   calld->pollent = pollent;
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   call_data* calld = (call_data*)elem->call_data;
-  grpc_credentials_mdelem_array_destroy(exec_ctx, &calld->md_array);
-  grpc_call_credentials_unref(exec_ctx, calld->creds);
+  grpc_credentials_mdelem_array_destroy(&calld->md_array);
+  grpc_call_credentials_unref(calld->creds);
   if (calld->have_host) {
-    grpc_slice_unref_internal(exec_ctx, calld->host);
+    grpc_slice_unref_internal(calld->host);
   }
   if (calld->have_method) {
-    grpc_slice_unref_internal(exec_ctx, calld->method);
+    grpc_slice_unref_internal(calld->method);
   }
   grpc_auth_metadata_context_reset(&calld->auth_md_context);
 }
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   grpc_security_connector* sc =
       grpc_security_connector_find_in_args(args->channel_args);
@@ -415,13 +405,12 @@
 }
 
 /* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   /* grab pointers to our data from the channel element */
   channel_data* chand = (channel_data*)elem->channel_data;
   grpc_channel_security_connector* sc = chand->security_connector;
   if (sc != nullptr) {
-    GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, &sc->base, "client_auth_filter");
+    GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "client_auth_filter");
   }
   GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "client_auth_filter");
 }
diff --git a/src/core/lib/security/transport/lb_targets_info.cc b/src/core/lib/security/transport/lb_targets_info.cc
index c07be35..183b1eb 100644
--- a/src/core/lib/security/transport/lb_targets_info.cc
+++ b/src/core/lib/security/transport/lb_targets_info.cc
@@ -28,8 +28,8 @@
 static void* targets_info_copy(void* p) {
   return grpc_slice_hash_table_ref((grpc_slice_hash_table*)p);
 }
-static void targets_info_destroy(grpc_exec_ctx* exec_ctx, void* p) {
-  grpc_slice_hash_table_unref(exec_ctx, (grpc_slice_hash_table*)p);
+static void targets_info_destroy(void* p) {
+  grpc_slice_hash_table_unref((grpc_slice_hash_table*)p);
 }
 static int targets_info_cmp(void* a, void* b) {
   return grpc_slice_hash_table_cmp((const grpc_slice_hash_table*)a,
diff --git a/src/core/lib/security/transport/lb_targets_info.h b/src/core/lib/security/transport/lb_targets_info.h
index b4a0bc9..7543d3c 100644
--- a/src/core/lib/security/transport/lb_targets_info.h
+++ b/src/core/lib/security/transport/lb_targets_info.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/slice/slice_hash_table.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Return a channel argument containing \a targets_info. */
 grpc_arg grpc_lb_targets_info_create_channel_arg(
     grpc_slice_hash_table* targets_info);
@@ -33,8 +29,4 @@
 grpc_slice_hash_table* grpc_lb_targets_info_find_in_args(
     const grpc_channel_args* args);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_LB_TARGETS_INFO_H */
diff --git a/src/core/lib/security/transport/secure_endpoint.cc b/src/core/lib/security/transport/secure_endpoint.cc
index 4cd317a..e5c089d 100644
--- a/src/core/lib/security/transport/secure_endpoint.cc
+++ b/src/core/lib/security/transport/secure_endpoint.cc
@@ -63,28 +63,27 @@
 
 grpc_core::TraceFlag grpc_trace_secure_endpoint(false, "secure_endpoint");
 
-static void destroy(grpc_exec_ctx* exec_ctx, secure_endpoint* secure_ep) {
+static void destroy(secure_endpoint* secure_ep) {
   secure_endpoint* ep = secure_ep;
-  grpc_endpoint_destroy(exec_ctx, ep->wrapped_ep);
+  grpc_endpoint_destroy(ep->wrapped_ep);
   tsi_frame_protector_destroy(ep->protector);
-  tsi_zero_copy_grpc_protector_destroy(exec_ctx, ep->zero_copy_protector);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &ep->leftover_bytes);
-  grpc_slice_unref_internal(exec_ctx, ep->read_staging_buffer);
-  grpc_slice_unref_internal(exec_ctx, ep->write_staging_buffer);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &ep->output_buffer);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &ep->source_buffer);
+  tsi_zero_copy_grpc_protector_destroy(ep->zero_copy_protector);
+  grpc_slice_buffer_destroy_internal(&ep->leftover_bytes);
+  grpc_slice_unref_internal(ep->read_staging_buffer);
+  grpc_slice_unref_internal(ep->write_staging_buffer);
+  grpc_slice_buffer_destroy_internal(&ep->output_buffer);
+  grpc_slice_buffer_destroy_internal(&ep->source_buffer);
   gpr_mu_destroy(&ep->protector_mu);
   gpr_free(ep);
 }
 
 #ifndef NDEBUG
-#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
-  secure_endpoint_unref((exec_ctx), (ep), (reason), __FILE__, __LINE__)
+#define SECURE_ENDPOINT_UNREF(ep, reason) \
+  secure_endpoint_unref((ep), (reason), __FILE__, __LINE__)
 #define SECURE_ENDPOINT_REF(ep, reason) \
   secure_endpoint_ref((ep), (reason), __FILE__, __LINE__)
-static void secure_endpoint_unref(grpc_exec_ctx* exec_ctx, secure_endpoint* ep,
-                                  const char* reason, const char* file,
-                                  int line) {
+static void secure_endpoint_unref(secure_endpoint* ep, const char* reason,
+                                  const char* file, int line) {
   if (grpc_trace_secure_endpoint.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&ep->ref.count);
     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -92,7 +91,7 @@
             val - 1);
   }
   if (gpr_unref(&ep->ref)) {
-    destroy(exec_ctx, ep);
+    destroy(ep);
   }
 }
 
@@ -107,13 +106,11 @@
   gpr_ref(&ep->ref);
 }
 #else
-#define SECURE_ENDPOINT_UNREF(exec_ctx, ep, reason) \
-  secure_endpoint_unref((exec_ctx), (ep))
+#define SECURE_ENDPOINT_UNREF(ep, reason) secure_endpoint_unref((ep))
 #define SECURE_ENDPOINT_REF(ep, reason) secure_endpoint_ref((ep))
-static void secure_endpoint_unref(grpc_exec_ctx* exec_ctx,
-                                  secure_endpoint* ep) {
+static void secure_endpoint_unref(secure_endpoint* ep) {
   if (gpr_unref(&ep->ref)) {
-    destroy(exec_ctx, ep);
+    destroy(ep);
   }
 }
 
@@ -128,8 +125,7 @@
   *end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
 }
 
-static void call_read_cb(grpc_exec_ctx* exec_ctx, secure_endpoint* ep,
-                         grpc_error* error) {
+static void call_read_cb(secure_endpoint* ep, grpc_error* error) {
   if (grpc_trace_secure_endpoint.enabled()) {
     size_t i;
     for (i = 0; i < ep->read_buffer->count; i++) {
@@ -140,12 +136,11 @@
     }
   }
   ep->read_buffer = nullptr;
-  GRPC_CLOSURE_SCHED(exec_ctx, ep->read_cb, error);
-  SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
+  GRPC_CLOSURE_SCHED(ep->read_cb, error);
+  SECURE_ENDPOINT_UNREF(ep, "read");
 }
 
-static void on_read(grpc_exec_ctx* exec_ctx, void* user_data,
-                    grpc_error* error) {
+static void on_read(void* user_data, grpc_error* error) {
   unsigned i;
   uint8_t keep_looping = 0;
   tsi_result result = TSI_OK;
@@ -154,17 +149,16 @@
   uint8_t* end = GRPC_SLICE_END_PTR(ep->read_staging_buffer);
 
   if (error != GRPC_ERROR_NONE) {
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
-    call_read_cb(exec_ctx, ep,
-                 GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                     "Secure read failed", &error, 1));
+    grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer);
+    call_read_cb(ep, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                         "Secure read failed", &error, 1));
     return;
   }
 
   if (ep->zero_copy_protector != nullptr) {
     // Use zero-copy grpc protector to unprotect.
     result = tsi_zero_copy_grpc_protector_unprotect(
-        exec_ctx, ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer);
+        ep->zero_copy_protector, &ep->source_buffer, ep->read_buffer);
   } else {
     // Use frame protector to unprotect.
     /* TODO(yangg) check error, maybe bail out early */
@@ -217,37 +211,35 @@
 
   /* TODO(yangg) experiment with moving this block after read_cb to see if it
      helps latency */
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->source_buffer);
+  grpc_slice_buffer_reset_and_unref_internal(&ep->source_buffer);
 
   if (result != TSI_OK) {
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
+    grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer);
     call_read_cb(
-        exec_ctx, ep,
-        grpc_set_tsi_error_result(
-            GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result));
+        ep, grpc_set_tsi_error_result(
+                GRPC_ERROR_CREATE_FROM_STATIC_STRING("Unwrap failed"), result));
     return;
   }
 
-  call_read_cb(exec_ctx, ep, GRPC_ERROR_NONE);
+  call_read_cb(ep, GRPC_ERROR_NONE);
 }
 
-static void endpoint_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* secure_ep,
-                          grpc_slice_buffer* slices, grpc_closure* cb) {
+static void endpoint_read(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
+                          grpc_closure* cb) {
   secure_endpoint* ep = (secure_endpoint*)secure_ep;
   ep->read_cb = cb;
   ep->read_buffer = slices;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, ep->read_buffer);
+  grpc_slice_buffer_reset_and_unref_internal(ep->read_buffer);
 
   SECURE_ENDPOINT_REF(ep, "read");
   if (ep->leftover_bytes.count) {
     grpc_slice_buffer_swap(&ep->leftover_bytes, &ep->source_buffer);
     GPR_ASSERT(ep->leftover_bytes.count == 0);
-    on_read(exec_ctx, ep, GRPC_ERROR_NONE);
+    on_read(ep, GRPC_ERROR_NONE);
     return;
   }
 
-  grpc_endpoint_read(exec_ctx, ep->wrapped_ep, &ep->source_buffer,
-                     &ep->on_read);
+  grpc_endpoint_read(ep->wrapped_ep, &ep->source_buffer, &ep->on_read);
 }
 
 static void flush_write_staging_buffer(secure_endpoint* ep, uint8_t** cur,
@@ -258,8 +250,8 @@
   *end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
 }
 
-static void endpoint_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* secure_ep,
-                           grpc_slice_buffer* slices, grpc_closure* cb) {
+static void endpoint_write(grpc_endpoint* secure_ep, grpc_slice_buffer* slices,
+                           grpc_closure* cb) {
   GPR_TIMER_BEGIN("secure_endpoint.endpoint_write", 0);
 
   unsigned i;
@@ -268,7 +260,7 @@
   uint8_t* cur = GRPC_SLICE_START_PTR(ep->write_staging_buffer);
   uint8_t* end = GRPC_SLICE_END_PTR(ep->write_staging_buffer);
 
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
+  grpc_slice_buffer_reset_and_unref_internal(&ep->output_buffer);
 
   if (grpc_trace_secure_endpoint.enabled()) {
     for (i = 0; i < slices->count; i++) {
@@ -281,8 +273,8 @@
 
   if (ep->zero_copy_protector != nullptr) {
     // Use zero-copy grpc protector to protect.
-    result = tsi_zero_copy_grpc_protector_protect(
-        exec_ctx, ep->zero_copy_protector, slices, &ep->output_buffer);
+    result = tsi_zero_copy_grpc_protector_protect(ep->zero_copy_protector,
+                                                  slices, &ep->output_buffer);
   } else {
     // Use frame protector to protect.
     for (i = 0; i < slices->count; i++) {
@@ -340,50 +332,44 @@
 
   if (result != TSI_OK) {
     /* TODO(yangg) do different things according to the error type? */
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &ep->output_buffer);
+    grpc_slice_buffer_reset_and_unref_internal(&ep->output_buffer);
     GRPC_CLOSURE_SCHED(
-        exec_ctx, cb,
-        grpc_set_tsi_error_result(
-            GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result));
+        cb, grpc_set_tsi_error_result(
+                GRPC_ERROR_CREATE_FROM_STATIC_STRING("Wrap failed"), result));
     GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
     return;
   }
 
-  grpc_endpoint_write(exec_ctx, ep->wrapped_ep, &ep->output_buffer, cb);
+  grpc_endpoint_write(ep->wrapped_ep, &ep->output_buffer, cb);
   GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
 }
 
-static void endpoint_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* secure_ep,
-                              grpc_error* why) {
+static void endpoint_shutdown(grpc_endpoint* secure_ep, grpc_error* why) {
   secure_endpoint* ep = (secure_endpoint*)secure_ep;
-  grpc_endpoint_shutdown(exec_ctx, ep->wrapped_ep, why);
+  grpc_endpoint_shutdown(ep->wrapped_ep, why);
 }
 
-static void endpoint_destroy(grpc_exec_ctx* exec_ctx,
-                             grpc_endpoint* secure_ep) {
+static void endpoint_destroy(grpc_endpoint* secure_ep) {
   secure_endpoint* ep = (secure_endpoint*)secure_ep;
-  SECURE_ENDPOINT_UNREF(exec_ctx, ep, "destroy");
+  SECURE_ENDPOINT_UNREF(ep, "destroy");
 }
 
-static void endpoint_add_to_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_endpoint* secure_ep,
+static void endpoint_add_to_pollset(grpc_endpoint* secure_ep,
                                     grpc_pollset* pollset) {
   secure_endpoint* ep = (secure_endpoint*)secure_ep;
-  grpc_endpoint_add_to_pollset(exec_ctx, ep->wrapped_ep, pollset);
+  grpc_endpoint_add_to_pollset(ep->wrapped_ep, pollset);
 }
 
-static void endpoint_add_to_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_endpoint* secure_ep,
+static void endpoint_add_to_pollset_set(grpc_endpoint* secure_ep,
                                         grpc_pollset_set* pollset_set) {
   secure_endpoint* ep = (secure_endpoint*)secure_ep;
-  grpc_endpoint_add_to_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
+  grpc_endpoint_add_to_pollset_set(ep->wrapped_ep, pollset_set);
 }
 
-static void endpoint_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                             grpc_endpoint* secure_ep,
+static void endpoint_delete_from_pollset_set(grpc_endpoint* secure_ep,
                                              grpc_pollset_set* pollset_set) {
   secure_endpoint* ep = (secure_endpoint*)secure_ep;
-  grpc_endpoint_delete_from_pollset_set(exec_ctx, ep->wrapped_ep, pollset_set);
+  grpc_endpoint_delete_from_pollset_set(ep->wrapped_ep, pollset_set);
 }
 
 static char* endpoint_get_peer(grpc_endpoint* secure_ep) {
diff --git a/src/core/lib/security/transport/secure_endpoint.h b/src/core/lib/security/transport/secure_endpoint.h
index 92c4574..b2556a0 100644
--- a/src/core/lib/security/transport/secure_endpoint.h
+++ b/src/core/lib/security/transport/secure_endpoint.h
@@ -22,10 +22,6 @@
 #include <grpc/slice.h>
 #include "src/core/lib/iomgr/endpoint.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 struct tsi_frame_protector;
 struct tsi_zero_copy_grpc_protector;
 
@@ -40,8 +36,4 @@
     grpc_endpoint* to_wrap, grpc_slice* leftover_slices,
     size_t leftover_nslices);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURE_ENDPOINT_H */
diff --git a/src/core/lib/security/transport/security_connector.cc b/src/core/lib/security/transport/security_connector.cc
index c56e459..fd13971 100644
--- a/src/core/lib/security/transport/security_connector.cc
+++ b/src/core/lib/security/transport/security_connector.cc
@@ -105,33 +105,32 @@
 }
 
 void grpc_channel_security_connector_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* connector,
+    grpc_channel_security_connector* connector,
     grpc_handshake_manager* handshake_mgr) {
   if (connector != nullptr) {
-    connector->add_handshakers(exec_ctx, connector, handshake_mgr);
+    connector->add_handshakers(connector, handshake_mgr);
   }
 }
 
 void grpc_server_security_connector_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_server_security_connector* connector,
+    grpc_server_security_connector* connector,
     grpc_handshake_manager* handshake_mgr) {
   if (connector != nullptr) {
-    connector->add_handshakers(exec_ctx, connector, handshake_mgr);
+    connector->add_handshakers(connector, handshake_mgr);
   }
 }
 
-void grpc_security_connector_check_peer(grpc_exec_ctx* exec_ctx,
-                                        grpc_security_connector* sc,
+void grpc_security_connector_check_peer(grpc_security_connector* sc,
                                         tsi_peer peer,
                                         grpc_auth_context** auth_context,
                                         grpc_closure* on_peer_checked) {
   if (sc == nullptr) {
-    GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked,
+    GRPC_CLOSURE_SCHED(on_peer_checked,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                            "cannot check peer -- no security connector"));
     tsi_peer_destruct(&peer);
   } else {
-    sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
+    sc->vtable->check_peer(sc, peer, auth_context, on_peer_checked);
   }
 }
 
@@ -169,26 +168,26 @@
 }
 
 bool grpc_channel_security_connector_check_call_host(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
-    const char* host, grpc_auth_context* auth_context,
-    grpc_closure* on_call_host_checked, grpc_error** error) {
+    grpc_channel_security_connector* sc, const char* host,
+    grpc_auth_context* auth_context, grpc_closure* on_call_host_checked,
+    grpc_error** error) {
   if (sc == nullptr || sc->check_call_host == nullptr) {
     *error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
         "cannot check call host -- no security connector");
     return true;
   }
-  return sc->check_call_host(exec_ctx, sc, host, auth_context,
-                             on_call_host_checked, error);
+  return sc->check_call_host(sc, host, auth_context, on_call_host_checked,
+                             error);
 }
 
 void grpc_channel_security_connector_cancel_check_call_host(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
-    grpc_closure* on_call_host_checked, grpc_error* error) {
+    grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked,
+    grpc_error* error) {
   if (sc == nullptr || sc->cancel_check_call_host == nullptr) {
     GRPC_ERROR_UNREF(error);
     return;
   }
-  sc->cancel_check_call_host(exec_ctx, sc, on_call_host_checked, error);
+  sc->cancel_check_call_host(sc, on_call_host_checked, error);
 }
 
 #ifndef NDEBUG
@@ -205,15 +204,14 @@
 #else
 grpc_security_connector* grpc_security_connector_ref(
     grpc_security_connector* sc) {
-  if (sc == NULL) return NULL;
+  if (sc == nullptr) return nullptr;
 #endif
   gpr_ref(&sc->refcount);
   return sc;
 }
 
 #ifndef NDEBUG
-void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* sc,
+void grpc_security_connector_unref(grpc_security_connector* sc,
                                    const char* file, int line,
                                    const char* reason) {
   if (sc == nullptr) return;
@@ -224,15 +222,14 @@
             val, val - 1, reason);
   }
 #else
-void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* sc) {
-  if (sc == NULL) return;
+void grpc_security_connector_unref(grpc_security_connector* sc) {
+  if (sc == nullptr) return;
 #endif
-  if (gpr_unref(&sc->refcount)) sc->vtable->destroy(exec_ctx, sc);
+  if (gpr_unref(&sc->refcount)) sc->vtable->destroy(sc);
 }
 
-static void connector_arg_destroy(grpc_exec_ctx* exec_ctx, void* p) {
-  GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, (grpc_security_connector*)p,
+static void connector_arg_destroy(void* p) {
+  GRPC_SECURITY_CONNECTOR_UNREF((grpc_security_connector*)p,
                                 "connector_arg_destroy");
 }
 
@@ -309,20 +306,16 @@
   bool is_lb_channel;
 } grpc_fake_channel_security_connector;
 
-static void fake_channel_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_security_connector* sc) {
+static void fake_channel_destroy(grpc_security_connector* sc) {
   grpc_fake_channel_security_connector* c =
       (grpc_fake_channel_security_connector*)sc;
-  grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
+  grpc_call_credentials_unref(c->base.request_metadata_creds);
   gpr_free(c->target);
   gpr_free(c->expected_targets);
   gpr_free(c);
 }
 
-static void fake_server_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_security_connector* sc) {
-  gpr_free(sc);
-}
+static void fake_server_destroy(grpc_security_connector* sc) { gpr_free(sc); }
 
 static bool fake_check_target(const char* target_type, const char* target,
                               const char* set_str) {
@@ -386,8 +379,7 @@
   if (!success) abort();
 }
 
-static void fake_check_peer(grpc_exec_ctx* exec_ctx,
-                            grpc_security_connector* sc, tsi_peer peer,
+static void fake_check_peer(grpc_security_connector* sc, tsi_peer peer,
                             grpc_auth_context** auth_context,
                             grpc_closure* on_peer_checked) {
   const char* prop_name;
@@ -419,25 +411,23 @@
       *auth_context, GRPC_TRANSPORT_SECURITY_TYPE_PROPERTY_NAME,
       GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
 end:
-  GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
+  GRPC_CLOSURE_SCHED(on_peer_checked, error);
   tsi_peer_destruct(&peer);
 }
 
-static void fake_channel_check_peer(grpc_exec_ctx* exec_ctx,
-                                    grpc_security_connector* sc, tsi_peer peer,
+static void fake_channel_check_peer(grpc_security_connector* sc, tsi_peer peer,
                                     grpc_auth_context** auth_context,
                                     grpc_closure* on_peer_checked) {
-  fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
+  fake_check_peer(sc, peer, auth_context, on_peer_checked);
   grpc_fake_channel_security_connector* c =
       (grpc_fake_channel_security_connector*)sc;
   fake_secure_name_check(c->target, c->expected_targets, c->is_lb_channel);
 }
 
-static void fake_server_check_peer(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* sc, tsi_peer peer,
+static void fake_server_check_peer(grpc_security_connector* sc, tsi_peer peer,
                                    grpc_auth_context** auth_context,
                                    grpc_closure* on_peer_checked) {
-  fake_check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
+  fake_check_peer(sc, peer, auth_context, on_peer_checked);
 }
 
 static int fake_channel_cmp(grpc_security_connector* sc1,
@@ -466,8 +456,7 @@
       (grpc_server_security_connector*)sc2);
 }
 
-static bool fake_channel_check_call_host(grpc_exec_ctx* exec_ctx,
-                                         grpc_channel_security_connector* sc,
+static bool fake_channel_check_call_host(grpc_channel_security_connector* sc,
                                          const char* host,
                                          grpc_auth_context* auth_context,
                                          grpc_closure* on_call_host_checked,
@@ -476,29 +465,26 @@
 }
 
 static void fake_channel_cancel_check_call_host(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
-    grpc_closure* on_call_host_checked, grpc_error* error) {
+    grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked,
+    grpc_error* error) {
   GRPC_ERROR_UNREF(error);
 }
 
 static void fake_channel_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
+    grpc_channel_security_connector* sc,
     grpc_handshake_manager* handshake_mgr) {
   grpc_handshake_manager_add(
       handshake_mgr,
       grpc_security_handshaker_create(
-          exec_ctx, tsi_create_fake_handshaker(true /* is_client */),
-          &sc->base));
+          tsi_create_fake_handshaker(true /* is_client */), &sc->base));
 }
 
-static void fake_server_add_handshakers(grpc_exec_ctx* exec_ctx,
-                                        grpc_server_security_connector* sc,
+static void fake_server_add_handshakers(grpc_server_security_connector* sc,
                                         grpc_handshake_manager* handshake_mgr) {
   grpc_handshake_manager_add(
       handshake_mgr,
       grpc_security_handshaker_create(
-          exec_ctx, tsi_create_fake_handshaker(false /* is_client */),
-          &sc->base));
+          tsi_create_fake_handshaker(false /* is_client */), &sc->base));
 }
 
 static grpc_security_connector_vtable fake_channel_vtable = {
@@ -565,12 +551,11 @@
   return server_creds->certificate_config_fetcher.cb != nullptr;
 }
 
-static void ssl_channel_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_security_connector* sc) {
+static void ssl_channel_destroy(grpc_security_connector* sc) {
   grpc_ssl_channel_security_connector* c =
       (grpc_ssl_channel_security_connector*)sc;
-  grpc_channel_credentials_unref(exec_ctx, c->base.channel_creds);
-  grpc_call_credentials_unref(exec_ctx, c->base.request_metadata_creds);
+  grpc_channel_credentials_unref(c->base.channel_creds);
+  grpc_call_credentials_unref(c->base.request_metadata_creds);
   tsi_ssl_client_handshaker_factory_unref(c->client_handshaker_factory);
   c->client_handshaker_factory = nullptr;
   if (c->target_name != nullptr) gpr_free(c->target_name);
@@ -578,18 +563,16 @@
   gpr_free(sc);
 }
 
-static void ssl_server_destroy(grpc_exec_ctx* exec_ctx,
-                               grpc_security_connector* sc) {
+static void ssl_server_destroy(grpc_security_connector* sc) {
   grpc_ssl_server_security_connector* c =
       (grpc_ssl_server_security_connector*)sc;
-  grpc_server_credentials_unref(exec_ctx, c->base.server_creds);
+  grpc_server_credentials_unref(c->base.server_creds);
   tsi_ssl_server_handshaker_factory_unref(c->server_handshaker_factory);
   c->server_handshaker_factory = nullptr;
   gpr_free(sc);
 }
 
-static void ssl_channel_add_handshakers(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_security_connector* sc,
+static void ssl_channel_add_handshakers(grpc_channel_security_connector* sc,
                                         grpc_handshake_manager* handshake_mgr) {
   grpc_ssl_channel_security_connector* c =
       (grpc_ssl_channel_security_connector*)sc;
@@ -607,9 +590,8 @@
   }
   // Create handshakers.
   grpc_handshake_manager_add(
-      handshake_mgr,
-      grpc_security_handshaker_create(
-          exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base));
+      handshake_mgr, grpc_security_handshaker_create(
+                         tsi_create_adapter_handshaker(tsi_hs), &sc->base));
 }
 
 static const char** fill_alpn_protocol_strings(size_t* num_alpn_protocols) {
@@ -701,8 +683,7 @@
   return status;
 }
 
-static void ssl_server_add_handshakers(grpc_exec_ctx* exec_ctx,
-                                       grpc_server_security_connector* sc,
+static void ssl_server_add_handshakers(grpc_server_security_connector* sc,
                                        grpc_handshake_manager* handshake_mgr) {
   grpc_ssl_server_security_connector* c =
       (grpc_ssl_server_security_connector*)sc;
@@ -718,9 +699,8 @@
   }
   // Create handshakers.
   grpc_handshake_manager_add(
-      handshake_mgr,
-      grpc_security_handshaker_create(
-          exec_ctx, tsi_create_adapter_handshaker(tsi_hs), &sc->base));
+      handshake_mgr, grpc_security_handshaker_create(
+                         tsi_create_adapter_handshaker(tsi_hs), &sc->base));
 }
 
 static int ssl_host_matches_name(const tsi_peer* peer, const char* peer_name) {
@@ -804,8 +784,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static void ssl_channel_check_peer(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* sc, tsi_peer peer,
+static void ssl_channel_check_peer(grpc_security_connector* sc, tsi_peer peer,
                                    grpc_auth_context** auth_context,
                                    grpc_closure* on_peer_checked) {
   grpc_ssl_channel_security_connector* c =
@@ -815,17 +794,16 @@
                                          ? c->overridden_target_name
                                          : c->target_name,
                                      &peer, auth_context);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
+  GRPC_CLOSURE_SCHED(on_peer_checked, error);
   tsi_peer_destruct(&peer);
 }
 
-static void ssl_server_check_peer(grpc_exec_ctx* exec_ctx,
-                                  grpc_security_connector* sc, tsi_peer peer,
+static void ssl_server_check_peer(grpc_security_connector* sc, tsi_peer peer,
                                   grpc_auth_context** auth_context,
                                   grpc_closure* on_peer_checked) {
   grpc_error* error = ssl_check_peer(sc, nullptr, &peer, auth_context);
   tsi_peer_destruct(&peer);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_peer_checked, error);
+  GRPC_CLOSURE_SCHED(on_peer_checked, error);
 }
 
 static int ssl_channel_cmp(grpc_security_connector* sc1,
@@ -895,8 +873,7 @@
   if (peer->properties != nullptr) gpr_free(peer->properties);
 }
 
-static bool ssl_channel_check_call_host(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_security_connector* sc,
+static bool ssl_channel_check_call_host(grpc_channel_security_connector* sc,
                                         const char* host,
                                         grpc_auth_context* auth_context,
                                         grpc_closure* on_call_host_checked,
@@ -922,8 +899,8 @@
 }
 
 static void ssl_channel_cancel_check_call_host(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
-    grpc_closure* on_call_host_checked, grpc_error* error) {
+    grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked,
+    grpc_error* error) {
   GRPC_ERROR_UNREF(error);
 }
 
@@ -990,7 +967,7 @@
 }
 
 grpc_security_status grpc_ssl_channel_security_connector_create(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* channel_creds,
+    grpc_channel_credentials* channel_creds,
     grpc_call_credentials* request_metadata_creds,
     const grpc_ssl_config* config, const char* target_name,
     const char* overridden_target_name, grpc_channel_security_connector** sc) {
@@ -1045,7 +1022,7 @@
   if (result != TSI_OK) {
     gpr_log(GPR_ERROR, "Handshaker factory creation failed with %s.",
             tsi_result_to_string(result));
-    ssl_channel_destroy(exec_ctx, &c->base.base);
+    ssl_channel_destroy(&c->base.base);
     *sc = nullptr;
     goto error;
   }
@@ -1073,8 +1050,7 @@
 }
 
 grpc_security_status grpc_ssl_server_security_connector_create(
-    grpc_exec_ctx* exec_ctx, grpc_server_credentials* gsc,
-    grpc_server_security_connector** sc) {
+    grpc_server_credentials* gsc, grpc_server_security_connector** sc) {
   tsi_result result = TSI_OK;
   grpc_ssl_server_credentials* server_credentials =
       (grpc_ssl_server_credentials*)gsc;
@@ -1114,7 +1090,7 @@
   if (retval == GRPC_SECURITY_OK) {
     *sc = &c->base;
   } else {
-    if (c != nullptr) ssl_server_destroy(exec_ctx, &c->base.base);
+    if (c != nullptr) ssl_server_destroy(&c->base.base);
     if (sc != nullptr) *sc = nullptr;
   }
   return retval;
diff --git a/src/core/lib/security/transport/security_connector.h b/src/core/lib/security/transport/security_connector.h
index 7cde358..495821d 100644
--- a/src/core/lib/security/transport/security_connector.h
+++ b/src/core/lib/security/transport/security_connector.h
@@ -29,10 +29,6 @@
 #include "src/core/tsi/ssl_transport_security.h"
 #include "src/core/tsi/transport_security_interface.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_security_connector_refcount;
 
 /* --- status enum. --- */
@@ -54,9 +50,9 @@
 #define GRPC_ARG_SECURITY_CONNECTOR "grpc.security_connector"
 
 typedef struct {
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_security_connector* sc);
-  void (*check_peer)(grpc_exec_ctx* exec_ctx, grpc_security_connector* sc,
-                     tsi_peer peer, grpc_auth_context** auth_context,
+  void (*destroy)(grpc_security_connector* sc);
+  void (*check_peer)(grpc_security_connector* sc, tsi_peer peer,
+                     grpc_auth_context** auth_context,
                      grpc_closure* on_peer_checked);
   int (*cmp)(grpc_security_connector* sc, grpc_security_connector* other);
 } grpc_security_connector_vtable;
@@ -71,29 +67,25 @@
 #ifndef NDEBUG
 #define GRPC_SECURITY_CONNECTOR_REF(p, r) \
   grpc_security_connector_ref((p), __FILE__, __LINE__, (r))
-#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \
-  grpc_security_connector_unref((exec_ctx), (p), __FILE__, __LINE__, (r))
+#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) \
+  grpc_security_connector_unref((p), __FILE__, __LINE__, (r))
 grpc_security_connector* grpc_security_connector_ref(
     grpc_security_connector* policy, const char* file, int line,
     const char* reason);
-void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* policy,
+void grpc_security_connector_unref(grpc_security_connector* policy,
                                    const char* file, int line,
                                    const char* reason);
 #else
 #define GRPC_SECURITY_CONNECTOR_REF(p, r) grpc_security_connector_ref((p))
-#define GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, p, r) \
-  grpc_security_connector_unref((exec_ctx), (p))
+#define GRPC_SECURITY_CONNECTOR_UNREF(p, r) grpc_security_connector_unref((p))
 grpc_security_connector* grpc_security_connector_ref(
     grpc_security_connector* policy);
-void grpc_security_connector_unref(grpc_exec_ctx* exec_ctx,
-                                   grpc_security_connector* policy);
+void grpc_security_connector_unref(grpc_security_connector* policy);
 #endif
 
 /* Check the peer. Callee takes ownership of the peer object.
    When done, sets *auth_context and invokes on_peer_checked. */
-void grpc_security_connector_check_peer(grpc_exec_ctx* exec_ctx,
-                                        grpc_security_connector* sc,
+void grpc_security_connector_check_peer(grpc_security_connector* sc,
                                         tsi_peer peer,
                                         grpc_auth_context** auth_context,
                                         grpc_closure* on_peer_checked);
@@ -123,17 +115,14 @@
   grpc_security_connector base;
   grpc_channel_credentials* channel_creds;
   grpc_call_credentials* request_metadata_creds;
-  bool (*check_call_host)(grpc_exec_ctx* exec_ctx,
-                          grpc_channel_security_connector* sc, const char* host,
+  bool (*check_call_host)(grpc_channel_security_connector* sc, const char* host,
                           grpc_auth_context* auth_context,
                           grpc_closure* on_call_host_checked,
                           grpc_error** error);
-  void (*cancel_check_call_host)(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_security_connector* sc,
+  void (*cancel_check_call_host)(grpc_channel_security_connector* sc,
                                  grpc_closure* on_call_host_checked,
                                  grpc_error* error);
-  void (*add_handshakers)(grpc_exec_ctx* exec_ctx,
-                          grpc_channel_security_connector* sc,
+  void (*add_handshakers)(grpc_channel_security_connector* sc,
                           grpc_handshake_manager* handshake_mgr);
 };
 
@@ -146,20 +135,20 @@
 /// be set to indicate the result.  Otherwise, \a on_call_host_checked
 /// will be invoked when complete.
 bool grpc_channel_security_connector_check_call_host(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
-    const char* host, grpc_auth_context* auth_context,
-    grpc_closure* on_call_host_checked, grpc_error** error);
+    grpc_channel_security_connector* sc, const char* host,
+    grpc_auth_context* auth_context, grpc_closure* on_call_host_checked,
+    grpc_error** error);
 
 /// Cancels a pending asychronous call to
 /// grpc_channel_security_connector_check_call_host() with
 /// \a on_call_host_checked as its callback.
 void grpc_channel_security_connector_cancel_check_call_host(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* sc,
-    grpc_closure* on_call_host_checked, grpc_error* error);
+    grpc_channel_security_connector* sc, grpc_closure* on_call_host_checked,
+    grpc_error* error);
 
 /* Registers handshakers with \a handshake_mgr. */
 void grpc_channel_security_connector_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_channel_security_connector* connector,
+    grpc_channel_security_connector* connector,
     grpc_handshake_manager* handshake_mgr);
 
 /* --- server_security_connector object. ---
@@ -172,8 +161,7 @@
 struct grpc_server_security_connector {
   grpc_security_connector base;
   grpc_server_credentials* server_creds;
-  void (*add_handshakers)(grpc_exec_ctx* exec_ctx,
-                          grpc_server_security_connector* sc,
+  void (*add_handshakers)(grpc_server_security_connector* sc,
                           grpc_handshake_manager* handshake_mgr);
 };
 
@@ -182,8 +170,7 @@
                                        grpc_server_security_connector* sc2);
 
 void grpc_server_security_connector_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_server_security_connector* sc,
-    grpc_handshake_manager* handshake_mgr);
+    grpc_server_security_connector* sc, grpc_handshake_manager* handshake_mgr);
 
 /* --- Creation security connectors. --- */
 
@@ -220,7 +207,7 @@
   specific error code otherwise.
 */
 grpc_security_status grpc_ssl_channel_security_connector_create(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* channel_creds,
+    grpc_channel_credentials* channel_creds,
     grpc_call_credentials* request_metadata_creds,
     const grpc_ssl_config* config, const char* target_name,
     const char* overridden_target_name, grpc_channel_security_connector** sc);
@@ -246,7 +233,7 @@
   specific error code otherwise.
 */
 grpc_security_status grpc_ssl_server_security_connector_create(
-    grpc_exec_ctx* exec_ctx, grpc_server_credentials* server_credentials,
+    grpc_server_credentials* server_credentials,
     grpc_server_security_connector** sc);
 
 /* Util. */
@@ -259,8 +246,4 @@
     const grpc_auth_context* auth_context);
 void tsi_shallow_peer_destruct(tsi_peer* peer);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_CONNECTOR_H */
diff --git a/src/core/lib/security/transport/security_handshaker.cc b/src/core/lib/security/transport/security_handshaker.cc
index 7067b70..7623fbf 100644
--- a/src/core/lib/security/transport/security_handshaker.cc
+++ b/src/core/lib/security/transport/security_handshaker.cc
@@ -65,8 +65,7 @@
   tsi_handshaker_result* handshaker_result;
 } security_handshaker;
 
-static size_t move_read_buffer_into_handshake_buffer(grpc_exec_ctx* exec_ctx,
-                                                     security_handshaker* h) {
+static size_t move_read_buffer_into_handshake_buffer(security_handshaker* h) {
   size_t bytes_in_read_buffer = h->args->read_buffer->length;
   if (h->handshake_buffer_size < bytes_in_read_buffer) {
     h->handshake_buffer =
@@ -79,48 +78,45 @@
     memcpy(h->handshake_buffer + offset, GRPC_SLICE_START_PTR(next_slice),
            GRPC_SLICE_LENGTH(next_slice));
     offset += GRPC_SLICE_LENGTH(next_slice);
-    grpc_slice_unref_internal(exec_ctx, next_slice);
+    grpc_slice_unref_internal(next_slice);
   }
   return bytes_in_read_buffer;
 }
 
-static void security_handshaker_unref(grpc_exec_ctx* exec_ctx,
-                                      security_handshaker* h) {
+static void security_handshaker_unref(security_handshaker* h) {
   if (gpr_unref(&h->refs)) {
     gpr_mu_destroy(&h->mu);
     tsi_handshaker_destroy(h->handshaker);
     tsi_handshaker_result_destroy(h->handshaker_result);
     if (h->endpoint_to_destroy != nullptr) {
-      grpc_endpoint_destroy(exec_ctx, h->endpoint_to_destroy);
+      grpc_endpoint_destroy(h->endpoint_to_destroy);
     }
     if (h->read_buffer_to_destroy != nullptr) {
-      grpc_slice_buffer_destroy_internal(exec_ctx, h->read_buffer_to_destroy);
+      grpc_slice_buffer_destroy_internal(h->read_buffer_to_destroy);
       gpr_free(h->read_buffer_to_destroy);
     }
     gpr_free(h->handshake_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &h->outgoing);
+    grpc_slice_buffer_destroy_internal(&h->outgoing);
     GRPC_AUTH_CONTEXT_UNREF(h->auth_context, "handshake");
-    GRPC_SECURITY_CONNECTOR_UNREF(exec_ctx, h->connector, "handshake");
+    GRPC_SECURITY_CONNECTOR_UNREF(h->connector, "handshake");
     gpr_free(h);
   }
 }
 
 // Set args fields to NULL, saving the endpoint and read buffer for
 // later destruction.
-static void cleanup_args_for_failure_locked(grpc_exec_ctx* exec_ctx,
-                                            security_handshaker* h) {
+static void cleanup_args_for_failure_locked(security_handshaker* h) {
   h->endpoint_to_destroy = h->args->endpoint;
   h->args->endpoint = nullptr;
   h->read_buffer_to_destroy = h->args->read_buffer;
   h->args->read_buffer = nullptr;
-  grpc_channel_args_destroy(exec_ctx, h->args->args);
+  grpc_channel_args_destroy(h->args->args);
   h->args->args = nullptr;
 }
 
 // If the handshake failed or we're shutting down, clean up and invoke the
 // callback with the error.
-static void security_handshake_failed_locked(grpc_exec_ctx* exec_ctx,
-                                             security_handshaker* h,
+static void security_handshake_failed_locked(security_handshaker* h,
                                              grpc_error* error) {
   if (error == GRPC_ERROR_NONE) {
     // If we were shut down after the handshake succeeded but before an
@@ -135,34 +131,33 @@
     // before destroying them, even if we know that there are no
     // pending read/write callbacks.  This should be fixed, at which
     // point this can be removed.
-    grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(error));
+    grpc_endpoint_shutdown(h->args->endpoint, GRPC_ERROR_REF(error));
     // Not shutting down, so the write failed.  Clean up before
     // invoking the callback.
-    cleanup_args_for_failure_locked(exec_ctx, h);
+    cleanup_args_for_failure_locked(h);
     // Set shutdown to true so that subsequent calls to
     // security_handshaker_shutdown() do nothing.
     h->shutdown = true;
   }
   // Invoke callback.
-  GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, error);
+  GRPC_CLOSURE_SCHED(h->on_handshake_done, error);
 }
 
-static void on_peer_checked_inner(grpc_exec_ctx* exec_ctx,
-                                  security_handshaker* h, grpc_error* error) {
+static void on_peer_checked_inner(security_handshaker* h, grpc_error* error) {
   if (error != GRPC_ERROR_NONE || h->shutdown) {
-    security_handshake_failed_locked(exec_ctx, h, GRPC_ERROR_REF(error));
+    security_handshake_failed_locked(h, GRPC_ERROR_REF(error));
     return;
   }
   // Create zero-copy frame protector, if implemented.
   tsi_zero_copy_grpc_protector* zero_copy_protector = nullptr;
   tsi_result result = tsi_handshaker_result_create_zero_copy_grpc_protector(
-      exec_ctx, h->handshaker_result, nullptr, &zero_copy_protector);
+      h->handshaker_result, nullptr, &zero_copy_protector);
   if (result != TSI_OK && result != TSI_UNIMPLEMENTED) {
     error = grpc_set_tsi_error_result(
         GRPC_ERROR_CREATE_FROM_STATIC_STRING(
             "Zero-copy frame protector creation failed"),
         result);
-    security_handshake_failed_locked(exec_ctx, h, error);
+    security_handshake_failed_locked(h, error);
     return;
   }
   // Create frame protector if zero-copy frame protector is NULL.
@@ -174,7 +169,7 @@
       error = grpc_set_tsi_error_result(GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                                             "Frame protector creation failed"),
                                         result);
-      security_handshake_failed_locked(exec_ctx, h, error);
+      security_handshake_failed_locked(h, error);
       return;
     }
   }
@@ -189,7 +184,7 @@
         grpc_slice_from_copied_buffer((char*)unused_bytes, unused_bytes_size);
     h->args->endpoint = grpc_secure_endpoint_create(
         protector, zero_copy_protector, h->args->endpoint, &slice, 1);
-    grpc_slice_unref_internal(exec_ctx, slice);
+    grpc_slice_unref_internal(slice);
   } else {
     h->args->endpoint = grpc_secure_endpoint_create(
         protector, zero_copy_protector, h->args->endpoint, nullptr, 0);
@@ -201,25 +196,23 @@
   grpc_channel_args* tmp_args = h->args->args;
   h->args->args =
       grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
-  grpc_channel_args_destroy(exec_ctx, tmp_args);
+  grpc_channel_args_destroy(tmp_args);
   // Invoke callback.
-  GRPC_CLOSURE_SCHED(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(h->on_handshake_done, GRPC_ERROR_NONE);
   // Set shutdown to true so that subsequent calls to
   // security_handshaker_shutdown() do nothing.
   h->shutdown = true;
 }
 
-static void on_peer_checked(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void on_peer_checked(void* arg, grpc_error* error) {
   security_handshaker* h = (security_handshaker*)arg;
   gpr_mu_lock(&h->mu);
-  on_peer_checked_inner(exec_ctx, h, error);
+  on_peer_checked_inner(h, error);
   gpr_mu_unlock(&h->mu);
-  security_handshaker_unref(exec_ctx, h);
+  security_handshaker_unref(h);
 }
 
-static grpc_error* check_peer_locked(grpc_exec_ctx* exec_ctx,
-                                     security_handshaker* h) {
+static grpc_error* check_peer_locked(security_handshaker* h) {
   tsi_peer peer;
   tsi_result result =
       tsi_handshaker_result_extract_peer(h->handshaker_result, &peer);
@@ -227,20 +220,20 @@
     return grpc_set_tsi_error_result(
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Peer extraction failed"), result);
   }
-  grpc_security_connector_check_peer(exec_ctx, h->connector, peer,
-                                     &h->auth_context, &h->on_peer_checked);
+  grpc_security_connector_check_peer(h->connector, peer, &h->auth_context,
+                                     &h->on_peer_checked);
   return GRPC_ERROR_NONE;
 }
 
 static grpc_error* on_handshake_next_done_locked(
-    grpc_exec_ctx* exec_ctx, security_handshaker* h, tsi_result result,
+    security_handshaker* h, tsi_result result,
     const unsigned char* bytes_to_send, size_t bytes_to_send_size,
     tsi_handshaker_result* handshaker_result) {
   grpc_error* error = GRPC_ERROR_NONE;
   // Read more if we need to.
   if (result == TSI_INCOMPLETE_DATA) {
     GPR_ASSERT(bytes_to_send_size == 0);
-    grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
+    grpc_endpoint_read(h->args->endpoint, h->args->read_buffer,
                        &h->on_handshake_data_received_from_peer);
     return error;
   }
@@ -257,17 +250,17 @@
     // Send data to peer, if needed.
     grpc_slice to_send = grpc_slice_from_copied_buffer(
         (const char*)bytes_to_send, bytes_to_send_size);
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &h->outgoing);
+    grpc_slice_buffer_reset_and_unref_internal(&h->outgoing);
     grpc_slice_buffer_add(&h->outgoing, to_send);
-    grpc_endpoint_write(exec_ctx, h->args->endpoint, &h->outgoing,
+    grpc_endpoint_write(h->args->endpoint, &h->outgoing,
                         &h->on_handshake_data_sent_to_peer);
   } else if (handshaker_result == nullptr) {
     // There is nothing to send, but need to read from peer.
-    grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
+    grpc_endpoint_read(h->args->endpoint, h->args->read_buffer,
                        &h->on_handshake_data_received_from_peer);
   } else {
     // Handshake has finished, check peer and so on.
-    error = check_peer_locked(exec_ctx, h);
+    error = check_peer_locked(h);
   }
   return error;
 }
@@ -278,24 +271,22 @@
   security_handshaker* h = (security_handshaker*)user_data;
   // This callback will be invoked by TSI in a non-grpc thread, so it's
   // safe to create our own exec_ctx here.
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_mu_lock(&h->mu);
-  grpc_error* error =
-      on_handshake_next_done_locked(&exec_ctx, h, result, bytes_to_send,
-                                    bytes_to_send_size, handshaker_result);
+  grpc_error* error = on_handshake_next_done_locked(
+      h, result, bytes_to_send, bytes_to_send_size, handshaker_result);
   if (error != GRPC_ERROR_NONE) {
-    security_handshake_failed_locked(&exec_ctx, h, error);
+    security_handshake_failed_locked(h, error);
     gpr_mu_unlock(&h->mu);
-    security_handshaker_unref(&exec_ctx, h);
+    security_handshaker_unref(h);
   } else {
     gpr_mu_unlock(&h->mu);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static grpc_error* do_handshaker_next_locked(
-    grpc_exec_ctx* exec_ctx, security_handshaker* h,
-    const unsigned char* bytes_received, size_t bytes_received_size) {
+    security_handshaker* h, const unsigned char* bytes_received,
+    size_t bytes_received_size) {
   // Invoke TSI handshaker.
   const unsigned char* bytes_to_send = nullptr;
   size_t bytes_to_send_size = 0;
@@ -311,62 +302,57 @@
   }
   // Handshaker returned synchronously. Invoke callback directly in
   // this thread with our existing exec_ctx.
-  return on_handshake_next_done_locked(exec_ctx, h, result, bytes_to_send,
+  return on_handshake_next_done_locked(h, result, bytes_to_send,
                                        bytes_to_send_size, handshaker_result);
 }
 
-static void on_handshake_data_received_from_peer(grpc_exec_ctx* exec_ctx,
-                                                 void* arg, grpc_error* error) {
+static void on_handshake_data_received_from_peer(void* arg, grpc_error* error) {
   security_handshaker* h = (security_handshaker*)arg;
   gpr_mu_lock(&h->mu);
   if (error != GRPC_ERROR_NONE || h->shutdown) {
     security_handshake_failed_locked(
-        exec_ctx, h,
-        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-            "Handshake read failed", &error, 1));
+        h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+               "Handshake read failed", &error, 1));
     gpr_mu_unlock(&h->mu);
-    security_handshaker_unref(exec_ctx, h);
+    security_handshaker_unref(h);
     return;
   }
   // Copy all slices received.
-  size_t bytes_received_size =
-      move_read_buffer_into_handshake_buffer(exec_ctx, h);
+  size_t bytes_received_size = move_read_buffer_into_handshake_buffer(h);
   // Call TSI handshaker.
-  error = do_handshaker_next_locked(exec_ctx, h, h->handshake_buffer,
-                                    bytes_received_size);
+  error =
+      do_handshaker_next_locked(h, h->handshake_buffer, bytes_received_size);
 
   if (error != GRPC_ERROR_NONE) {
-    security_handshake_failed_locked(exec_ctx, h, error);
+    security_handshake_failed_locked(h, error);
     gpr_mu_unlock(&h->mu);
-    security_handshaker_unref(exec_ctx, h);
+    security_handshaker_unref(h);
   } else {
     gpr_mu_unlock(&h->mu);
   }
 }
 
-static void on_handshake_data_sent_to_peer(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void on_handshake_data_sent_to_peer(void* arg, grpc_error* error) {
   security_handshaker* h = (security_handshaker*)arg;
   gpr_mu_lock(&h->mu);
   if (error != GRPC_ERROR_NONE || h->shutdown) {
     security_handshake_failed_locked(
-        exec_ctx, h,
-        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-            "Handshake write failed", &error, 1));
+        h, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+               "Handshake write failed", &error, 1));
     gpr_mu_unlock(&h->mu);
-    security_handshaker_unref(exec_ctx, h);
+    security_handshaker_unref(h);
     return;
   }
   // We may be done.
   if (h->handshaker_result == nullptr) {
-    grpc_endpoint_read(exec_ctx, h->args->endpoint, h->args->read_buffer,
+    grpc_endpoint_read(h->args->endpoint, h->args->read_buffer,
                        &h->on_handshake_data_received_from_peer);
   } else {
-    error = check_peer_locked(exec_ctx, h);
+    error = check_peer_locked(h);
     if (error != GRPC_ERROR_NONE) {
-      security_handshake_failed_locked(exec_ctx, h, error);
+      security_handshake_failed_locked(h, error);
       gpr_mu_unlock(&h->mu);
-      security_handshaker_unref(exec_ctx, h);
+      security_handshaker_unref(h);
       return;
     }
   }
@@ -377,28 +363,25 @@
 // public handshaker API
 //
 
-static void security_handshaker_destroy(grpc_exec_ctx* exec_ctx,
-                                        grpc_handshaker* handshaker) {
+static void security_handshaker_destroy(grpc_handshaker* handshaker) {
   security_handshaker* h = (security_handshaker*)handshaker;
-  security_handshaker_unref(exec_ctx, h);
+  security_handshaker_unref(h);
 }
 
-static void security_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
-                                         grpc_handshaker* handshaker,
+static void security_handshaker_shutdown(grpc_handshaker* handshaker,
                                          grpc_error* why) {
   security_handshaker* h = (security_handshaker*)handshaker;
   gpr_mu_lock(&h->mu);
   if (!h->shutdown) {
     h->shutdown = true;
-    grpc_endpoint_shutdown(exec_ctx, h->args->endpoint, GRPC_ERROR_REF(why));
-    cleanup_args_for_failure_locked(exec_ctx, h);
+    grpc_endpoint_shutdown(h->args->endpoint, GRPC_ERROR_REF(why));
+    cleanup_args_for_failure_locked(h);
   }
   gpr_mu_unlock(&h->mu);
   GRPC_ERROR_UNREF(why);
 }
 
-static void security_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
-                                             grpc_handshaker* handshaker,
+static void security_handshaker_do_handshake(grpc_handshaker* handshaker,
                                              grpc_tcp_server_acceptor* acceptor,
                                              grpc_closure* on_handshake_done,
                                              grpc_handshaker_args* args) {
@@ -407,14 +390,13 @@
   h->args = args;
   h->on_handshake_done = on_handshake_done;
   gpr_ref(&h->refs);
-  size_t bytes_received_size =
-      move_read_buffer_into_handshake_buffer(exec_ctx, h);
-  grpc_error* error = do_handshaker_next_locked(
-      exec_ctx, h, h->handshake_buffer, bytes_received_size);
+  size_t bytes_received_size = move_read_buffer_into_handshake_buffer(h);
+  grpc_error* error =
+      do_handshaker_next_locked(h, h->handshake_buffer, bytes_received_size);
   if (error != GRPC_ERROR_NONE) {
-    security_handshake_failed_locked(exec_ctx, h, error);
+    security_handshake_failed_locked(h, error);
     gpr_mu_unlock(&h->mu);
-    security_handshaker_unref(exec_ctx, h);
+    security_handshaker_unref(h);
     return;
   }
   gpr_mu_unlock(&h->mu);
@@ -425,8 +407,7 @@
     security_handshaker_do_handshake};
 
 static grpc_handshaker* security_handshaker_create(
-    grpc_exec_ctx* exec_ctx, tsi_handshaker* handshaker,
-    grpc_security_connector* connector) {
+    tsi_handshaker* handshaker, grpc_security_connector* connector) {
   security_handshaker* h =
       (security_handshaker*)gpr_zalloc(sizeof(security_handshaker));
   grpc_handshaker_init(&security_handshaker_vtable, &h->base);
@@ -452,23 +433,20 @@
 // fail_handshaker
 //
 
-static void fail_handshaker_destroy(grpc_exec_ctx* exec_ctx,
-                                    grpc_handshaker* handshaker) {
+static void fail_handshaker_destroy(grpc_handshaker* handshaker) {
   gpr_free(handshaker);
 }
 
-static void fail_handshaker_shutdown(grpc_exec_ctx* exec_ctx,
-                                     grpc_handshaker* handshaker,
+static void fail_handshaker_shutdown(grpc_handshaker* handshaker,
                                      grpc_error* why) {
   GRPC_ERROR_UNREF(why);
 }
 
-static void fail_handshaker_do_handshake(grpc_exec_ctx* exec_ctx,
-                                         grpc_handshaker* handshaker,
+static void fail_handshaker_do_handshake(grpc_handshaker* handshaker,
                                          grpc_tcp_server_acceptor* acceptor,
                                          grpc_closure* on_handshake_done,
                                          grpc_handshaker_args* args) {
-  GRPC_CLOSURE_SCHED(exec_ctx, on_handshake_done,
+  GRPC_CLOSURE_SCHED(on_handshake_done,
                      GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                          "Failed to create security handshaker"));
 }
@@ -488,27 +466,27 @@
 //
 
 static void client_handshaker_factory_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+    grpc_handshaker_factory* handshaker_factory, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr) {
   grpc_channel_security_connector* security_connector =
       (grpc_channel_security_connector*)grpc_security_connector_find_in_args(
           args);
-  grpc_channel_security_connector_add_handshakers(exec_ctx, security_connector,
+  grpc_channel_security_connector_add_handshakers(security_connector,
                                                   handshake_mgr);
 }
 
 static void server_handshaker_factory_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* hf,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
+    grpc_handshaker_factory* hf, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr) {
   grpc_server_security_connector* security_connector =
       (grpc_server_security_connector*)grpc_security_connector_find_in_args(
           args);
-  grpc_server_security_connector_add_handshakers(exec_ctx, security_connector,
+  grpc_server_security_connector_add_handshakers(security_connector,
                                                  handshake_mgr);
 }
 
 static void handshaker_factory_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory) {}
+    grpc_handshaker_factory* handshaker_factory) {}
 
 static const grpc_handshaker_factory_vtable client_handshaker_factory_vtable = {
     client_handshaker_factory_add_handshakers, handshaker_factory_destroy};
@@ -527,14 +505,13 @@
 //
 
 grpc_handshaker* grpc_security_handshaker_create(
-    grpc_exec_ctx* exec_ctx, tsi_handshaker* handshaker,
-    grpc_security_connector* connector) {
+    tsi_handshaker* handshaker, grpc_security_connector* connector) {
   // If no TSI handshaker was created, return a handshaker that always fails.
   // Otherwise, return a real security handshaker.
   if (handshaker == nullptr) {
     return fail_handshaker_create();
   } else {
-    return security_handshaker_create(exec_ctx, handshaker, connector);
+    return security_handshaker_create(handshaker, connector);
   }
 }
 
diff --git a/src/core/lib/security/transport/security_handshaker.h b/src/core/lib/security/transport/security_handshaker.h
index 174f70f..6cd6446 100644
--- a/src/core/lib/security/transport/security_handshaker.h
+++ b/src/core/lib/security/transport/security_handshaker.h
@@ -23,20 +23,11 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/security/transport/security_connector.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// Creates a security handshaker using \a handshaker.
 grpc_handshaker* grpc_security_handshaker_create(
-    grpc_exec_ctx* exec_ctx, tsi_handshaker* handshaker,
-    grpc_security_connector* connector);
+    tsi_handshaker* handshaker, grpc_security_connector* connector);
 
 /// Registers security handshaker factories.
 void grpc_security_register_handshaker_factories();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_SECURITY_HANDSHAKER_H */
diff --git a/src/core/lib/security/transport/server_auth_filter.cc b/src/core/lib/security/transport/server_auth_filter.cc
index 9cf368a..73653f2 100644
--- a/src/core/lib/security/transport/server_auth_filter.cc
+++ b/src/core/lib/security/transport/server_auth_filter.cc
@@ -73,8 +73,7 @@
   return result;
 }
 
-static grpc_filtered_mdelem remove_consumed_md(grpc_exec_ctx* exec_ctx,
-                                               void* user_data,
+static grpc_filtered_mdelem remove_consumed_md(void* user_data,
                                                grpc_mdelem md) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -88,8 +87,7 @@
   return GRPC_FILTERED_MDELEM(md);
 }
 
-static void on_md_processing_done_inner(grpc_exec_ctx* exec_ctx,
-                                        grpc_call_element* elem,
+static void on_md_processing_done_inner(grpc_call_element* elem,
                                         const grpc_metadata* consumed_md,
                                         size_t num_consumed_md,
                                         const grpc_metadata* response_md,
@@ -107,11 +105,10 @@
     calld->consumed_md = consumed_md;
     calld->num_consumed_md = num_consumed_md;
     error = grpc_metadata_batch_filter(
-        exec_ctx, batch->payload->recv_initial_metadata.recv_initial_metadata,
+        batch->payload->recv_initial_metadata.recv_initial_metadata,
         remove_consumed_md, elem, "Response metadata filtering error");
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, calld->original_recv_initial_metadata_ready,
-                     error);
+  GRPC_CLOSURE_SCHED(calld->original_recv_initial_metadata_ready, error);
 }
 
 // Called from application code.
@@ -121,7 +118,7 @@
     grpc_status_code status, const char* error_details) {
   grpc_call_element* elem = (grpc_call_element*)user_data;
   call_data* calld = (call_data*)elem->call_data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   // If the call was not cancelled while we were in flight, process the result.
   if (gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
                        (gpr_atm)STATE_DONE)) {
@@ -134,34 +131,32 @@
           GRPC_ERROR_CREATE_FROM_COPIED_STRING(error_details),
           GRPC_ERROR_INT_GRPC_STATUS, status);
     }
-    on_md_processing_done_inner(&exec_ctx, elem, consumed_md, num_consumed_md,
-                                response_md, num_response_md, error);
+    on_md_processing_done_inner(elem, consumed_md, num_consumed_md, response_md,
+                                num_response_md, error);
   }
   // Clean up.
   for (size_t i = 0; i < calld->md.count; i++) {
-    grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].key);
-    grpc_slice_unref_internal(&exec_ctx, calld->md.metadata[i].value);
+    grpc_slice_unref_internal(calld->md.metadata[i].key);
+    grpc_slice_unref_internal(calld->md.metadata[i].value);
   }
   grpc_metadata_array_destroy(&calld->md);
-  GRPC_CALL_STACK_UNREF(&exec_ctx, calld->owning_call, "server_auth_metadata");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "server_auth_metadata");
 }
 
-static void cancel_call(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void cancel_call(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   // If the result was not already processed, invoke the callback now.
   if (error != GRPC_ERROR_NONE &&
       gpr_atm_full_cas(&calld->state, (gpr_atm)STATE_INIT,
                        (gpr_atm)STATE_CANCELLED)) {
-    on_md_processing_done_inner(exec_ctx, elem, nullptr, 0, nullptr, 0,
+    on_md_processing_done_inner(elem, nullptr, 0, nullptr, 0,
                                 GRPC_ERROR_REF(error));
   }
-  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "cancel_call");
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "cancel_call");
 }
 
-static void recv_initial_metadata_ready(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void recv_initial_metadata_ready(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
@@ -173,7 +168,7 @@
       GRPC_CALL_STACK_REF(calld->owning_call, "cancel_call");
       GRPC_CLOSURE_INIT(&calld->cancel_closure, cancel_call, elem,
                         grpc_schedule_on_exec_ctx);
-      grpc_call_combiner_set_notify_on_cancel(exec_ctx, calld->call_combiner,
+      grpc_call_combiner_set_notify_on_cancel(calld->call_combiner,
                                               &calld->cancel_closure);
       GRPC_CALL_STACK_REF(calld->owning_call, "server_auth_metadata");
       calld->md = metadata_batch_to_md_array(
@@ -184,13 +179,12 @@
       return;
     }
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_recv_initial_metadata_ready,
+  GRPC_CLOSURE_RUN(calld->original_recv_initial_metadata_ready,
                    GRPC_ERROR_REF(error));
 }
 
 static void auth_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* batch) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* batch) {
   call_data* calld = (call_data*)elem->call_data;
   if (batch->recv_initial_metadata) {
     // Inject our callback.
@@ -200,12 +194,11 @@
     batch->payload->recv_initial_metadata.recv_initial_metadata_ready =
         &calld->recv_initial_metadata_ready;
   }
-  grpc_call_next_op(exec_ctx, elem, batch);
+  grpc_call_next_op(elem, batch);
 }
 
 /* Constructor for call_data */
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -231,13 +224,12 @@
 }
 
 /* Destructor for call_data */
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
 /* Constructor for channel_data */
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(!args->is_last);
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -253,11 +245,10 @@
 }
 
 /* Destructor for channel data */
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   GRPC_AUTH_CONTEXT_UNREF(chand->auth_context, "server_auth_filter");
-  grpc_server_credentials_unref(exec_ctx, chand->creds);
+  grpc_server_credentials_unref(chand->creds);
 }
 
 const grpc_channel_filter grpc_server_auth_filter = {
diff --git a/src/core/lib/security/transport/tsi_error.h b/src/core/lib/security/transport/tsi_error.h
index 4e8418f..8fa6c48 100644
--- a/src/core/lib/security/transport/tsi_error.h
+++ b/src/core/lib/security/transport/tsi_error.h
@@ -22,14 +22,6 @@
 #include "src/core/lib/iomgr/error.h"
 #include "src/core/tsi/transport_security_interface.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_error* grpc_set_tsi_error_result(grpc_error* error, tsi_result result);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_TRANSPORT_TSI_ERROR_H */
diff --git a/src/core/lib/security/util/json_util.h b/src/core/lib/security/util/json_util.h
index 7538f76..b7e46d4 100644
--- a/src/core/lib/security/util/json_util.h
+++ b/src/core/lib/security/util/json_util.h
@@ -28,10 +28,6 @@
 #define GRPC_AUTH_JSON_TYPE_SERVICE_ACCOUNT "service_account"
 #define GRPC_AUTH_JSON_TYPE_AUTHORIZED_USER "authorized_user"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // Gets a child property from a json node.
 const char* grpc_json_get_string_property(const grpc_json* json,
                                           const char* prop_name);
@@ -41,8 +37,4 @@
 bool grpc_copy_json_string_property(const grpc_json* json,
                                     const char* prop_name, char** copied_value);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SECURITY_UTIL_JSON_UTIL_H */
diff --git a/src/core/lib/slice/b64.cc b/src/core/lib/slice/b64.cc
index fe7a86e..f36b13e 100644
--- a/src/core/lib/slice/b64.cc
+++ b/src/core/lib/slice/b64.cc
@@ -122,9 +122,8 @@
   result[current - result] = '\0';
 }
 
-grpc_slice grpc_base64_decode(grpc_exec_ctx* exec_ctx, const char* b64,
-                              int url_safe) {
-  return grpc_base64_decode_with_len(exec_ctx, b64, strlen(b64), url_safe);
+grpc_slice grpc_base64_decode(const char* b64, int url_safe) {
+  return grpc_base64_decode_with_len(b64, strlen(b64), url_safe);
 }
 
 static void decode_one_char(const unsigned char* codes, unsigned char* result,
@@ -185,8 +184,8 @@
   return 1;
 }
 
-grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx* exec_ctx, const char* b64,
-                                       size_t b64_len, int url_safe) {
+grpc_slice grpc_base64_decode_with_len(const char* b64, size_t b64_len,
+                                       int url_safe) {
   grpc_slice result = GRPC_SLICE_MALLOC(b64_len);
   unsigned char* current = GRPC_SLICE_START_PTR(result);
   size_t result_size = 0;
@@ -231,6 +230,6 @@
   return result;
 
 fail:
-  grpc_slice_unref_internal(exec_ctx, result);
+  grpc_slice_unref_internal(result);
   return grpc_empty_slice();
 }
diff --git a/src/core/lib/slice/b64.h b/src/core/lib/slice/b64.h
index 467f5d8..17e7306 100644
--- a/src/core/lib/slice/b64.h
+++ b/src/core/lib/slice/b64.h
@@ -21,10 +21,6 @@
 
 #include <grpc/slice.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Encodes data using base64. It is the caller's responsability to free
    the returned char * using gpr_free. Returns NULL on NULL input.
    TODO(makdharma) : change the flags to bool from int */
@@ -44,15 +40,10 @@
 
 /* Decodes data according to the base64 specification. Returns an empty
    slice in case of failure. */
-grpc_slice grpc_base64_decode(grpc_exec_ctx* exec_ctx, const char* b64,
-                              int url_safe);
+grpc_slice grpc_base64_decode(const char* b64, int url_safe);
 
 /* Same as above except that the length is provided by the caller. */
-grpc_slice grpc_base64_decode_with_len(grpc_exec_ctx* exec_ctx, const char* b64,
-                                       size_t b64_len, int url_safe);
-
-#ifdef __cplusplus
-}
-#endif
+grpc_slice grpc_base64_decode_with_len(const char* b64, size_t b64_len,
+                                       int url_safe);
 
 #endif /* GRPC_CORE_LIB_SLICE_B64_H */
diff --git a/src/core/lib/slice/percent_encoding.h b/src/core/lib/slice/percent_encoding.h
index 22b5e8d..a1009ff 100644
--- a/src/core/lib/slice/percent_encoding.h
+++ b/src/core/lib/slice/percent_encoding.h
@@ -30,10 +30,6 @@
 
 #include <grpc/slice.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* URL percent encoding spec bitfield (usabel as 'unreserved_bytes' in
    grpc_percent_encode_slice, grpc_strict_percent_decode_slice).
    Flags [A-Za-z0-9-_.~] as unreserved bytes for the percent encoding routines
@@ -64,8 +60,4 @@
    This cannot fail. */
 grpc_slice grpc_permissive_percent_decode_slice(grpc_slice slice_in);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SLICE_PERCENT_ENCODING_H */
diff --git a/src/core/lib/slice/slice.cc b/src/core/lib/slice/slice.cc
index bbaf87b..1eb1529 100644
--- a/src/core/lib/slice/slice.cc
+++ b/src/core/lib/slice/slice.cc
@@ -54,9 +54,9 @@
   return slice;
 }
 
-void grpc_slice_unref_internal(grpc_exec_ctx* exec_ctx, grpc_slice slice) {
+void grpc_slice_unref_internal(grpc_slice slice) {
   if (slice.refcount) {
-    slice.refcount->vtable->unref(exec_ctx, slice.refcount);
+    slice.refcount->vtable->unref(slice.refcount);
   }
 }
 
@@ -67,15 +67,14 @@
 
 /* Public API */
 void grpc_slice_unref(grpc_slice slice) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_unref_internal(&exec_ctx, slice);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_unref_internal(slice);
 }
 
 /* grpc_slice_from_static_string support structure - a refcount that does
    nothing */
 static void noop_ref(void* unused) {}
-static void noop_unref(grpc_exec_ctx* exec_ctx, void* unused) {}
+static void noop_unref(void* unused) {}
 
 static const grpc_slice_refcount_vtable noop_refcount_vtable = {
     noop_ref, noop_unref, grpc_slice_default_eq_impl,
@@ -109,7 +108,7 @@
   gpr_ref(&r->refs);
 }
 
-static void new_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
+static void new_slice_unref(void* p) {
   new_slice_refcount* r = (new_slice_refcount*)p;
   if (gpr_unref(&r->refs)) {
     r->user_destroy(r->user_data);
@@ -159,7 +158,7 @@
   gpr_ref(&r->refs);
 }
 
-static void new_with_len_unref(grpc_exec_ctx* exec_ctx, void* p) {
+static void new_with_len_unref(void* p) {
   new_with_len_slice_refcount* r = (new_with_len_slice_refcount*)p;
   if (gpr_unref(&r->refs)) {
     r->user_destroy(r->user_data, r->user_length);
@@ -210,7 +209,7 @@
   gpr_ref(&r->refs);
 }
 
-static void malloc_unref(grpc_exec_ctx* exec_ctx, void* p) {
+static void malloc_unref(void* p) {
   malloc_refcount* r = (malloc_refcount*)p;
   if (gpr_unref(&r->refs)) {
     gpr_free(r);
diff --git a/src/core/lib/slice/slice_buffer.cc b/src/core/lib/slice/slice_buffer.cc
index 5db54da..33ec2af 100644
--- a/src/core/lib/slice/slice_buffer.cc
+++ b/src/core/lib/slice/slice_buffer.cc
@@ -65,18 +65,16 @@
   sb->base_slices = sb->slices = sb->inlined;
 }
 
-void grpc_slice_buffer_destroy_internal(grpc_exec_ctx* exec_ctx,
-                                        grpc_slice_buffer* sb) {
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, sb);
+void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb) {
+  grpc_slice_buffer_reset_and_unref_internal(sb);
   if (sb->base_slices != sb->inlined) {
     gpr_free(sb->base_slices);
   }
 }
 
 void grpc_slice_buffer_destroy(grpc_slice_buffer* sb) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_buffer_destroy_internal(&exec_ctx, sb);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_buffer_destroy_internal(sb);
 }
 
 uint8_t* grpc_slice_buffer_tiny_add(grpc_slice_buffer* sb, size_t n) {
@@ -163,11 +161,10 @@
   }
 }
 
-void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx* exec_ctx,
-                                                grpc_slice_buffer* sb) {
+void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb) {
   size_t i;
   for (i = 0; i < sb->count; i++) {
-    grpc_slice_unref_internal(exec_ctx, sb->slices[i]);
+    grpc_slice_unref_internal(sb->slices[i]);
   }
 
   sb->count = 0;
@@ -175,9 +172,8 @@
 }
 
 void grpc_slice_buffer_reset_and_unref(grpc_slice_buffer* sb) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, sb);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_buffer_reset_and_unref_internal(sb);
 }
 
 void grpc_slice_buffer_swap(grpc_slice_buffer* a, grpc_slice_buffer* b) {
@@ -289,8 +285,7 @@
   slice_buffer_move_first_maybe_ref(src, n, dst, false);
 }
 
-void grpc_slice_buffer_move_first_into_buffer(grpc_exec_ctx* exec_ctx,
-                                              grpc_slice_buffer* src, size_t n,
+void grpc_slice_buffer_move_first_into_buffer(grpc_slice_buffer* src, size_t n,
                                               void* dst) {
   char* dstp = (char*)dst;
   GPR_ASSERT(src->length >= n);
@@ -305,13 +300,13 @@
       n = 0;
     } else if (slice_len == n) {
       memcpy(dstp, GRPC_SLICE_START_PTR(slice), n);
-      grpc_slice_unref_internal(exec_ctx, slice);
+      grpc_slice_unref_internal(slice);
       n = 0;
     } else {
       memcpy(dstp, GRPC_SLICE_START_PTR(slice), slice_len);
       dstp += slice_len;
       n -= slice_len;
-      grpc_slice_unref_internal(exec_ctx, slice);
+      grpc_slice_unref_internal(slice);
     }
   }
 }
diff --git a/src/core/lib/slice/slice_hash_table.cc b/src/core/lib/slice/slice_hash_table.cc
index 8f8e5a6..89340ef 100644
--- a/src/core/lib/slice/slice_hash_table.cc
+++ b/src/core/lib/slice/slice_hash_table.cc
@@ -27,7 +27,7 @@
 
 struct grpc_slice_hash_table {
   gpr_refcount refs;
-  void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value);
+  void (*destroy_value)(void* value);
   int (*value_cmp)(void* a, void* b);
   size_t size;
   size_t max_num_probes;
@@ -58,8 +58,7 @@
 
 grpc_slice_hash_table* grpc_slice_hash_table_create(
     size_t num_entries, grpc_slice_hash_table_entry* entries,
-    void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
-    int (*value_cmp)(void* a, void* b)) {
+    void (*destroy_value)(void* value), int (*value_cmp)(void* a, void* b)) {
   grpc_slice_hash_table* table =
       (grpc_slice_hash_table*)gpr_zalloc(sizeof(*table));
   gpr_ref_init(&table->refs, 1);
@@ -81,14 +80,13 @@
   return table;
 }
 
-void grpc_slice_hash_table_unref(grpc_exec_ctx* exec_ctx,
-                                 grpc_slice_hash_table* table) {
+void grpc_slice_hash_table_unref(grpc_slice_hash_table* table) {
   if (table != nullptr && gpr_unref(&table->refs)) {
     for (size_t i = 0; i < table->size; ++i) {
       grpc_slice_hash_table_entry* entry = &table->entries[i];
       if (!is_empty(entry)) {
-        grpc_slice_unref_internal(exec_ctx, entry->key);
-        table->destroy_value(exec_ctx, entry->value);
+        grpc_slice_unref_internal(entry->key);
+        table->destroy_value(entry->value);
       }
     }
     gpr_free(table->entries);
diff --git a/src/core/lib/slice/slice_hash_table.h b/src/core/lib/slice/slice_hash_table.h
index f86f25e..db69da6 100644
--- a/src/core/lib/slice/slice_hash_table.h
+++ b/src/core/lib/slice/slice_hash_table.h
@@ -19,10 +19,6 @@
 
 #include "src/core/lib/transport/metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Hash table implementation.
  *
  * This implementation uses open addressing
@@ -50,12 +46,10 @@
     will be used. */
 grpc_slice_hash_table* grpc_slice_hash_table_create(
     size_t num_entries, grpc_slice_hash_table_entry* entries,
-    void (*destroy_value)(grpc_exec_ctx* exec_ctx, void* value),
-    int (*value_cmp)(void* a, void* b));
+    void (*destroy_value)(void* value), int (*value_cmp)(void* a, void* b));
 
 grpc_slice_hash_table* grpc_slice_hash_table_ref(grpc_slice_hash_table* table);
-void grpc_slice_hash_table_unref(grpc_exec_ctx* exec_ctx,
-                                 grpc_slice_hash_table* table);
+void grpc_slice_hash_table_unref(grpc_slice_hash_table* table);
 
 /** Returns the value from \a table associated with \a key.
     Returns NULL if \a key is not found. */
@@ -71,8 +65,4 @@
 int grpc_slice_hash_table_cmp(const grpc_slice_hash_table* a,
                               const grpc_slice_hash_table* b);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SLICE_SLICE_HASH_TABLE_H */
diff --git a/src/core/lib/slice/slice_intern.cc b/src/core/lib/slice/slice_intern.cc
index e894913..c578c6d 100644
--- a/src/core/lib/slice/slice_intern.cc
+++ b/src/core/lib/slice/slice_intern.cc
@@ -90,7 +90,7 @@
   gpr_mu_unlock(&shard->mu);
 }
 
-static void interned_slice_unref(grpc_exec_ctx* exec_ctx, void* p) {
+static void interned_slice_unref(void* p) {
   interned_slice_refcount* s = (interned_slice_refcount*)p;
   if (1 == gpr_atm_full_fetch_add(&s->refcnt, -1)) {
     interned_slice_destroy(s);
@@ -101,9 +101,8 @@
   interned_slice_ref(((char*)p) - offsetof(interned_slice_refcount, sub));
 }
 
-static void interned_slice_sub_unref(grpc_exec_ctx* exec_ctx, void* p) {
-  interned_slice_unref(exec_ctx,
-                       ((char*)p) - offsetof(interned_slice_refcount, sub));
+static void interned_slice_sub_unref(void* p) {
+  interned_slice_unref(((char*)p) - offsetof(interned_slice_refcount, sub));
 }
 
 static uint32_t interned_slice_hash(grpc_slice slice) {
diff --git a/src/core/lib/slice/slice_internal.h b/src/core/lib/slice/slice_internal.h
index 10527dc..4e9ab80 100644
--- a/src/core/lib/slice/slice_internal.h
+++ b/src/core/lib/slice/slice_internal.h
@@ -24,19 +24,12 @@
 
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_slice grpc_slice_ref_internal(grpc_slice slice);
-void grpc_slice_unref_internal(grpc_exec_ctx* exec_ctx, grpc_slice slice);
-void grpc_slice_buffer_reset_and_unref_internal(grpc_exec_ctx* exec_ctx,
-                                                grpc_slice_buffer* sb);
-void grpc_slice_buffer_partial_unref_internal(grpc_exec_ctx* exec_ctx,
-                                              grpc_slice_buffer* sb,
+void grpc_slice_unref_internal(grpc_slice slice);
+void grpc_slice_buffer_reset_and_unref_internal(grpc_slice_buffer* sb);
+void grpc_slice_buffer_partial_unref_internal(grpc_slice_buffer* sb,
                                               size_t idx);
-void grpc_slice_buffer_destroy_internal(grpc_exec_ctx* exec_ctx,
-                                        grpc_slice_buffer* sb);
+void grpc_slice_buffer_destroy_internal(grpc_slice_buffer* sb);
 
 /* Check if a slice is interned */
 bool grpc_slice_is_interned(grpc_slice slice);
@@ -53,8 +46,4 @@
 uint32_t grpc_static_slice_hash(grpc_slice s);
 int grpc_static_slice_eq(grpc_slice a, grpc_slice b);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SLICE_SLICE_INTERNAL_H */
diff --git a/src/core/lib/slice/slice_string_helpers.h b/src/core/lib/slice/slice_string_helpers.h
index acbc41e..7f51b11 100644
--- a/src/core/lib/slice/slice_string_helpers.h
+++ b/src/core/lib/slice/slice_string_helpers.h
@@ -28,10 +28,6 @@
 
 #include "src/core/lib/support/string.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Calls gpr_dump on a slice. */
 char* grpc_dump_slice(grpc_slice slice, uint32_t flags);
 
@@ -41,8 +37,4 @@
 
 bool grpc_parse_slice_to_uint32(grpc_slice str, uint32_t* result);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SLICE_SLICE_STRING_HELPERS_H */
diff --git a/src/core/lib/slice/slice_traits.h b/src/core/lib/slice/slice_traits.h
index 7fdb675..4b898bd 100644
--- a/src/core/lib/slice/slice_traits.h
+++ b/src/core/lib/slice/slice_traits.h
@@ -22,16 +22,8 @@
 #include <grpc/slice.h>
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 bool grpc_slice_is_legal_header(grpc_slice s);
 bool grpc_slice_is_legal_nonbin_header(grpc_slice s);
 bool grpc_slice_is_bin_suffixed(grpc_slice s);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SLICE_SLICE_TRAITS_H */
diff --git a/src/core/lib/support/arena.h b/src/core/lib/support/arena.h
index 4d43c56..cfe973a 100644
--- a/src/core/lib/support/arena.h
+++ b/src/core/lib/support/arena.h
@@ -27,10 +27,6 @@
 
 #include <stddef.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct gpr_arena gpr_arena;
 
 // Create an arena, with \a initial_size bytes in the first allocated buffer
@@ -40,8 +36,4 @@
 // Destroy an arena, returning the total number of bytes allocated
 size_t gpr_arena_destroy(gpr_arena* arena);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SUPPORT_ARENA_H */
diff --git a/src/core/lib/support/debug_location.h b/src/core/lib/support/debug_location.h
new file mode 100644
index 0000000..9b3f922
--- /dev/null
+++ b/src/core/lib/support/debug_location.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SUPPORT_DEBUG_LOCATION_H
+#define GRPC_CORE_LIB_SUPPORT_DEBUG_LOCATION_H
+
+namespace grpc_core {
+
+// Used for tracking file and line where a call is made for debug builds.
+// No-op for non-debug builds.
+// Callers can use the DEBUG_LOCATION macro in either case.
+#ifndef NDEBUG
+class DebugLocation {
+ public:
+  DebugLocation(const char* file, int line) : file_(file), line_(line) {}
+  bool Log() const { return true; }
+  const char* file() const { return file_; }
+  int line() const { return line_; }
+
+ private:
+  const char* file_;
+  const int line_;
+};
+#define DEBUG_LOCATION ::grpc_core::DebugLocation(__FILE__, __LINE__)
+#else
+class DebugLocation {
+ public:
+  bool Log() const { return false; }
+  const char* file() const { return nullptr; }
+  int line() const { return -1; }
+};
+#define DEBUG_LOCATION ::grpc_core::DebugLocation()
+#endif
+
+}  // namespace grpc_core
+
+#endif /* GRPC_CORE_LIB_SUPPORT_DEBUG_LOCATION_H */
diff --git a/src/core/lib/support/env.h b/src/core/lib/support/env.h
index f50d7bc..2452fd3 100644
--- a/src/core/lib/support/env.h
+++ b/src/core/lib/support/env.h
@@ -21,10 +21,6 @@
 
 #include <stdio.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Env utility functions */
 
 /* Gets the environment variable value with the specified name.
@@ -42,8 +38,4 @@
    level of logging. So DO NOT USE THIS. */
 const char* gpr_getenv_silent(const char* name, char** dst);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SUPPORT_ENV_H */
diff --git a/src/core/lib/support/log.cc b/src/core/lib/support/log.cc
index e9adc6c..2a40745 100644
--- a/src/core/lib/support/log.cc
+++ b/src/core/lib/support/log.cc
@@ -27,7 +27,7 @@
 #include <stdio.h>
 #include <string.h>
 
-extern "C" void gpr_default_log(gpr_log_func_args* args);
+void gpr_default_log(gpr_log_func_args* args);
 static gpr_atm g_log_func = (gpr_atm)gpr_default_log;
 static gpr_atm g_min_severity_to_print = GPR_LOG_VERBOSITY_UNSET;
 
diff --git a/src/core/lib/support/log_android.cc b/src/core/lib/support/log_android.cc
index 73d24cd..0d3ac0f 100644
--- a/src/core/lib/support/log_android.cc
+++ b/src/core/lib/support/log_android.cc
@@ -39,8 +39,8 @@
   return ANDROID_LOG_DEFAULT;
 }
 
-extern "C" void gpr_log(const char* file, int line, gpr_log_severity severity,
-                        const char* format, ...) {
+void gpr_log(const char* file, int line, gpr_log_severity severity,
+             const char* format, ...) {
   char* message = NULL;
   va_list args;
   va_start(args, format);
@@ -50,7 +50,7 @@
   free(message);
 }
 
-extern "C" void gpr_default_log(gpr_log_func_args* args) {
+void gpr_default_log(gpr_log_func_args* args) {
   const char* final_slash;
   const char* display_file;
   char* output = NULL;
diff --git a/src/core/lib/support/log_linux.cc b/src/core/lib/support/log_linux.cc
index e0e277f..6b1f1c7 100644
--- a/src/core/lib/support/log_linux.cc
+++ b/src/core/lib/support/log_linux.cc
@@ -56,7 +56,7 @@
   free(message);
 }
 
-extern "C" void gpr_default_log(gpr_log_func_args* args) {
+void gpr_default_log(gpr_log_func_args* args) {
   const char* final_slash;
   char* prefix;
   const char* display_file;
diff --git a/src/core/lib/support/log_posix.cc b/src/core/lib/support/log_posix.cc
index e765f91..9fab480 100644
--- a/src/core/lib/support/log_posix.cc
+++ b/src/core/lib/support/log_posix.cc
@@ -56,7 +56,7 @@
   gpr_free(allocated);
 }
 
-extern "C" void gpr_default_log(gpr_log_func_args* args) {
+void gpr_default_log(gpr_log_func_args* args) {
   const char* final_slash;
   const char* display_file;
   char time_buffer[64];
diff --git a/src/core/lib/support/log_windows.cc b/src/core/lib/support/log_windows.cc
index d448179..0013bf4 100644
--- a/src/core/lib/support/log_windows.cc
+++ b/src/core/lib/support/log_windows.cc
@@ -65,7 +65,7 @@
 }
 
 /* Simple starter implementation */
-extern "C" void gpr_default_log(gpr_log_func_args* args) {
+void gpr_default_log(gpr_log_func_args* args) {
   const char* final_slash;
   const char* display_file;
   char time_buffer[64];
diff --git a/src/core/lib/support/mpscq.h b/src/core/lib/support/mpscq.h
index fb22742..648ead1 100644
--- a/src/core/lib/support/mpscq.h
+++ b/src/core/lib/support/mpscq.h
@@ -24,10 +24,6 @@
 #include <stdbool.h>
 #include <stddef.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 // Multiple-producer single-consumer lock free queue, based upon the
 // implementation from Dmitry Vyukov here:
 // http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue
@@ -84,8 +80,5 @@
 // Pop a node.  Returns NULL only if the queue was empty at some point after
 // calling this function
 gpr_mpscq_node* gpr_locked_mpscq_pop(gpr_locked_mpscq* q);
-#ifdef __cplusplus
-}
-#endif
 
 #endif /* GRPC_CORE_LIB_SUPPORT_MPSCQ_H */
diff --git a/src/core/lib/support/murmur_hash.h b/src/core/lib/support/murmur_hash.h
index d02bba6..422770f 100644
--- a/src/core/lib/support/murmur_hash.h
+++ b/src/core/lib/support/murmur_hash.h
@@ -23,15 +23,7 @@
 
 #include <stddef.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* compute the hash of key (length len) */
 uint32_t gpr_murmur_hash3(const void* key, size_t len, uint32_t seed);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SUPPORT_MURMUR_HASH_H */
diff --git a/src/core/lib/support/ref_counted.h b/src/core/lib/support/ref_counted.h
new file mode 100644
index 0000000..4c662f9
--- /dev/null
+++ b/src/core/lib/support/ref_counted.h
@@ -0,0 +1,122 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SUPPORT_REF_COUNTED_H
+#define GRPC_CORE_LIB_SUPPORT_REF_COUNTED_H
+
+#include <grpc/support/log.h>
+#include <grpc/support/sync.h>
+
+#include "src/core/lib/debug/trace.h"
+#include "src/core/lib/support/debug_location.h"
+#include "src/core/lib/support/memory.h"
+
+namespace grpc_core {
+
+// A base class for reference-counted objects.
+// New objects should be created via New() and start with a refcount of 1.
+// When the refcount reaches 0, the object will be deleted via Delete().
+class RefCounted {
+ public:
+  void Ref() { gpr_ref(&refs_); }
+
+  void Unref() {
+    if (gpr_unref(&refs_)) {
+      Delete(this);
+    }
+  }
+
+  // Not copyable nor movable.
+  RefCounted(const RefCounted&) = delete;
+  RefCounted& operator=(const RefCounted&) = delete;
+
+ protected:
+  // Allow Delete() to access destructor.
+  template <typename T>
+  friend void Delete(T*);
+
+  RefCounted() { gpr_ref_init(&refs_, 1); }
+
+  virtual ~RefCounted() {}
+
+ private:
+  gpr_refcount refs_;
+};
+
+// An alternative version of the RefCounted base class that
+// supports tracing.  This is intended to be used in cases where the
+// object will be handled both by idiomatic C++ code using smart
+// pointers and legacy code that is manually calling Ref() and Unref().
+// Once all of our code is converted to idiomatic C++, we may be able to
+// eliminate this class.
+class RefCountedWithTracing {
+ public:
+  void Ref() { gpr_ref(&refs_); }
+
+  void Ref(const DebugLocation& location, const char* reason) {
+    if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+      gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
+      gpr_log(GPR_DEBUG, "%s:%p %s:%d ref %" PRIdPTR " -> %" PRIdPTR " %s",
+              trace_flag_->name(), this, location.file(), location.line(),
+              old_refs, old_refs + 1, reason);
+    }
+    Ref();
+  }
+
+  void Unref() {
+    if (gpr_unref(&refs_)) {
+      Delete(this);
+    }
+  }
+
+  void Unref(const DebugLocation& location, const char* reason) {
+    if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
+      gpr_atm old_refs = gpr_atm_no_barrier_load(&refs_.count);
+      gpr_log(GPR_DEBUG, "%s:%p %s:%d unref %" PRIdPTR " -> %" PRIdPTR " %s",
+              trace_flag_->name(), this, location.file(), location.line(),
+              old_refs, old_refs - 1, reason);
+    }
+    Unref();
+  }
+
+  // Not copyable nor movable.
+  RefCountedWithTracing(const RefCountedWithTracing&) = delete;
+  RefCountedWithTracing& operator=(const RefCountedWithTracing&) = delete;
+
+ protected:
+  // Allow Delete() to access destructor.
+  template <typename T>
+  friend void Delete(T*);
+
+  RefCountedWithTracing() : RefCountedWithTracing(nullptr) {}
+
+  explicit RefCountedWithTracing(TraceFlag* trace_flag)
+      : trace_flag_(trace_flag) {
+    gpr_ref_init(&refs_, 1);
+  }
+
+  virtual ~RefCountedWithTracing() {}
+
+ private:
+  TraceFlag* trace_flag_ = nullptr;
+  gpr_refcount refs_;
+};
+
+}  // namespace grpc_core
+
+#endif /* GRPC_CORE_LIB_SUPPORT_REF_COUNTED_H */
diff --git a/src/core/lib/support/ref_counted_ptr.h b/src/core/lib/support/ref_counted_ptr.h
new file mode 100644
index 0000000..dc2385e
--- /dev/null
+++ b/src/core/lib/support/ref_counted_ptr.h
@@ -0,0 +1,90 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_CORE_LIB_SUPPORT_REF_COUNTED_PTR_H
+#define GRPC_CORE_LIB_SUPPORT_REF_COUNTED_PTR_H
+
+#include <utility>
+
+#include "src/core/lib/support/memory.h"
+
+namespace grpc_core {
+
+// A smart pointer class for objects that provide Ref() and Unref() methods,
+// such as those provided by the RefCounted base class.
+template <typename T>
+class RefCountedPtr {
+ public:
+  RefCountedPtr() {}
+
+  // If value is non-null, we take ownership of a ref to it.
+  explicit RefCountedPtr(T* value) { value_ = value; }
+
+  // Move support.
+  RefCountedPtr(RefCountedPtr&& other) {
+    value_ = other.value_;
+    other.value_ = nullptr;
+  }
+  RefCountedPtr& operator=(RefCountedPtr&& other) {
+    if (value_ != nullptr) value_->Unref();
+    value_ = other.value_;
+    other.value_ = nullptr;
+    return *this;
+  }
+
+  // Copy support.
+  RefCountedPtr(const RefCountedPtr& other) {
+    if (other.value_ != nullptr) other.value_->Ref();
+    value_ = other.value_;
+  }
+  RefCountedPtr& operator=(const RefCountedPtr& other) {
+    // Note: Order of reffing and unreffing is important here in case value_
+    // and other.value_ are the same object.
+    if (other.value_ != nullptr) other.value_->Ref();
+    if (value_ != nullptr) value_->Unref();
+    value_ = other.value_;
+    return *this;
+  }
+
+  ~RefCountedPtr() {
+    if (value_ != nullptr) value_->Unref();
+  }
+
+  // If value is non-null, we take ownership of a ref to it.
+  void reset(T* value = nullptr) {
+    if (value_ != nullptr) value_->Unref();
+    value_ = value;
+  }
+
+  T* get() const { return value_; }
+
+  T& operator*() const { return *value_; }
+  T* operator->() const { return value_; }
+
+ private:
+  T* value_ = nullptr;
+};
+
+template <typename T, typename... Args>
+inline RefCountedPtr<T> MakeRefCounted(Args&&... args) {
+  return RefCountedPtr<T>(New<T>(std::forward<Args>(args)...));
+}
+
+}  // namespace grpc_core
+
+#endif /* GRPC_CORE_LIB_SUPPORT_REF_COUNTED_PTR_H */
diff --git a/src/core/lib/support/string.h b/src/core/lib/support/string.h
index 0b18ffc..dd37f0b 100644
--- a/src/core/lib/support/string.h
+++ b/src/core/lib/support/string.h
@@ -24,10 +24,6 @@
 
 #include <grpc/support/port_platform.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* String utility functions */
 
 /* Flags for gpr_dump function. */
@@ -109,8 +105,5 @@
 
 /** Return true if lower(s) equals "true", "yes" or "1", otherwise false. */
 bool gpr_is_true(const char* s);
-#ifdef __cplusplus
-}
-#endif
 
 #endif /* GRPC_CORE_LIB_SUPPORT_STRING_H */
diff --git a/src/core/lib/support/string_windows.h b/src/core/lib/support/string_windows.h
index 6771647..7c7f31e 100644
--- a/src/core/lib/support/string_windows.h
+++ b/src/core/lib/support/string_windows.h
@@ -21,10 +21,6 @@
 
 #include <grpc/support/port_platform.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #ifdef GPR_WINDOWS
 
 /* These allocate new strings using gpr_malloc to convert from and to utf-8. */
@@ -33,8 +29,4 @@
 
 #endif /* GPR_WINDOWS */
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SUPPORT_STRING_WINDOWS_H */
diff --git a/src/core/lib/support/sync_posix.cc b/src/core/lib/support/sync_posix.cc
index dfdd233..c3f6b10 100644
--- a/src/core/lib/support/sync_posix.cc
+++ b/src/core/lib/support/sync_posix.cc
@@ -66,7 +66,12 @@
 /*----------------------------------------*/
 
 void gpr_cv_init(gpr_cv* cv) {
-  GPR_ASSERT(pthread_cond_init(cv, nullptr) == 0);
+  pthread_condattr_t attr;
+  GPR_ASSERT(pthread_condattr_init(&attr) == 0);
+#if GPR_LINUX
+  GPR_ASSERT(pthread_condattr_setclock(&attr, CLOCK_MONOTONIC) == 0);
+#endif  // GPR_LINUX
+  GPR_ASSERT(pthread_cond_init(cv, &attr) == 0);
 }
 
 void gpr_cv_destroy(gpr_cv* cv) { GPR_ASSERT(pthread_cond_destroy(cv) == 0); }
@@ -78,7 +83,11 @@
     err = pthread_cond_wait(cv, mu);
   } else {
     struct timespec abs_deadline_ts;
+#if GPR_LINUX
+    abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_MONOTONIC);
+#else
     abs_deadline = gpr_convert_clock_type(abs_deadline, GPR_CLOCK_REALTIME);
+#endif  // GPR_LINUX
     abs_deadline_ts.tv_sec = (time_t)abs_deadline.tv_sec;
     abs_deadline_ts.tv_nsec = abs_deadline.tv_nsec;
     err = pthread_cond_timedwait(cv, mu, &abs_deadline_ts);
diff --git a/src/core/lib/support/thd_posix.cc b/src/core/lib/support/thd_posix.cc
index c2a4f41..f0ed48d 100644
--- a/src/core/lib/support/thd_posix.cc
+++ b/src/core/lib/support/thd_posix.cc
@@ -41,6 +41,7 @@
 struct thd_arg {
   void (*body)(void* arg); /* body of a thread */
   void* arg;               /* argument to a thread */
+  const char* name;        /* name of thread. Can be nullptr. */
 };
 
 static void inc_thd_count();
@@ -50,12 +51,26 @@
 static void* thread_body(void* v) {
   struct thd_arg a = *(struct thd_arg*)v;
   free(v);
+  if (a.name != nullptr) {
+#if GPR_APPLE_PTHREAD_NAME
+    /* Apple supports 64 characters, and will truncate if it's longer. */
+    pthread_setname_np(a.name);
+#elif GPR_LINUX_PTHREAD_NAME
+    /* Linux supports 16 characters max, and will error if it's longer. */
+    char buf[16];
+    size_t buf_len = GPR_ARRAY_SIZE(buf) - 1;
+    strncpy(buf, a.name, buf_len);
+    buf[buf_len] = '\0';
+    pthread_setname_np(pthread_self(), buf);
+#endif  // GPR_APPLE_PTHREAD_NAME
+  }
   (*a.body)(a.arg);
   dec_thd_count();
   return nullptr;
 }
 
-int gpr_thd_new(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg,
+int gpr_thd_new(gpr_thd_id* t, const char* thd_name,
+                void (*thd_body)(void* arg), void* arg,
                 const gpr_thd_options* options) {
   int thread_started;
   pthread_attr_t attr;
@@ -66,6 +81,7 @@
   GPR_ASSERT(a != nullptr);
   a->body = thd_body;
   a->arg = arg;
+  a->name = thd_name;
   inc_thd_count();
 
   GPR_ASSERT(pthread_attr_init(&attr) == 0);
diff --git a/src/core/lib/support/thd_windows.cc b/src/core/lib/support/thd_windows.cc
index 0875c2f..f920770 100644
--- a/src/core/lib/support/thd_windows.cc
+++ b/src/core/lib/support/thd_windows.cc
@@ -65,7 +65,8 @@
   return 0;
 }
 
-int gpr_thd_new(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg,
+int gpr_thd_new(gpr_thd_id* t, const char* thd_name,
+                void (*thd_body)(void* arg), void* arg,
                 const gpr_thd_options* options) {
   HANDLE handle;
   struct thd_info* info = (struct thd_info*)gpr_malloc(sizeof(*info));
diff --git a/src/core/lib/support/time_posix.cc b/src/core/lib/support/time_posix.cc
index 7f65205..47a8494 100644
--- a/src/core/lib/support/time_posix.cc
+++ b/src/core/lib/support/time_posix.cc
@@ -127,9 +127,7 @@
 }
 #endif
 
-extern "C" {
 gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type) = now_impl;
-}
 
 #ifdef GPR_LOW_LEVEL_COUNTERS
 gpr_atm gpr_now_call_count;
diff --git a/src/core/lib/support/time_precise.h b/src/core/lib/support/time_precise.h
index 3befda3..35cd154 100644
--- a/src/core/lib/support/time_precise.h
+++ b/src/core/lib/support/time_precise.h
@@ -21,15 +21,7 @@
 
 #include <grpc/support/time.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void gpr_precise_clock_init(void);
 void gpr_precise_clock_now(gpr_timespec* clk);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SUPPORT_TIME_PRECISE_H */
diff --git a/src/core/lib/support/time_windows.cc b/src/core/lib/support/time_windows.cc
index 08c1b22..fb17e5c 100644
--- a/src/core/lib/support/time_windows.cc
+++ b/src/core/lib/support/time_windows.cc
@@ -68,9 +68,7 @@
   return now_tv;
 }
 
-extern "C" {
 gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type) = now_impl;
-}
 
 gpr_timespec gpr_now(gpr_clock_type clock_type) {
   return gpr_now_impl(clock_type);
diff --git a/src/core/lib/support/tmpfile.h b/src/core/lib/support/tmpfile.h
index 437d871..c5ceda8 100644
--- a/src/core/lib/support/tmpfile.h
+++ b/src/core/lib/support/tmpfile.h
@@ -21,18 +21,10 @@
 
 #include <stdio.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Creates a temporary file from a prefix.
    If tmp_filename is not NULL, *tmp_filename is assigned the name of the
    created file and it is the responsibility of the caller to gpr_free it
    unless an error occurs in which case it will be set to NULL. */
 FILE* gpr_tmpfile(const char* prefix, char** tmp_filename);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SUPPORT_TMPFILE_H */
diff --git a/src/core/lib/surface/alarm.cc b/src/core/lib/surface/alarm.cc
index b1c9f7b..f6ea016 100644
--- a/src/core/lib/surface/alarm.cc
+++ b/src/core/lib/surface/alarm.cc
@@ -45,11 +45,11 @@
 
 static void alarm_unref(grpc_alarm* alarm) {
   if (gpr_unref(&alarm->refs)) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     if (alarm->cq != nullptr) {
-      GRPC_CQ_INTERNAL_UNREF(&exec_ctx, alarm->cq, "alarm");
+      GRPC_CQ_INTERNAL_UNREF(alarm->cq, "alarm");
     }
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_free(alarm);
   }
 }
@@ -80,20 +80,19 @@
 }
 #endif
 
-static void alarm_end_completion(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_cq_completion* c) {
+static void alarm_end_completion(void* arg, grpc_cq_completion* c) {
   grpc_alarm* alarm = (grpc_alarm*)arg;
   GRPC_ALARM_UNREF(alarm, "dequeue-end-op");
 }
 
-static void alarm_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void alarm_cb(void* arg, grpc_error* error) {
   grpc_alarm* alarm = (grpc_alarm*)arg;
 
   /* We are queuing an op on completion queue. This means, the alarm's structure
      cannot be destroyed until the op is dequeued. Adding an extra ref
      here and unref'ing when the op is dequeued will achieve this */
   GRPC_ALARM_REF(alarm, "queue-end-op");
-  grpc_cq_end_op(exec_ctx, alarm->cq, alarm->tag, error, alarm_end_completion,
+  grpc_cq_end_op(alarm->cq, alarm->tag, error, alarm_end_completion,
                  (void*)alarm, &alarm->completion);
 }
 
@@ -116,22 +115,20 @@
 
 void grpc_alarm_set(grpc_alarm* alarm, grpc_completion_queue* cq,
                     gpr_timespec deadline, void* tag, void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_CQ_INTERNAL_REF(cq, "alarm");
   alarm->cq = cq;
   alarm->tag = tag;
 
   GPR_ASSERT(grpc_cq_begin_op(cq, tag));
-  grpc_timer_init(&exec_ctx, &alarm->alarm,
-                  grpc_timespec_to_millis_round_up(deadline), &alarm->on_alarm);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_timer_init(&alarm->alarm, grpc_timespec_to_millis_round_up(deadline),
+                  &alarm->on_alarm);
 }
 
 void grpc_alarm_cancel(grpc_alarm* alarm, void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_timer_cancel(&exec_ctx, &alarm->alarm);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_timer_cancel(&alarm->alarm);
 }
 
 void grpc_alarm_destroy(grpc_alarm* alarm, void* reserved) {
diff --git a/src/core/lib/surface/alarm_internal.h b/src/core/lib/surface/alarm_internal.h
index 2ee3a31..99e9812 100644
--- a/src/core/lib/surface/alarm_internal.h
+++ b/src/core/lib/surface/alarm_internal.h
@@ -24,10 +24,6 @@
 
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_alarm_refcount;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #ifndef NDEBUG
 
 #define GRPC_ALARM_REF(a, reason) alarm_ref_dbg(a, reason, __FILE__, __LINE__)
@@ -41,8 +37,4 @@
 
 #endif /* defined(NDEBUG) */
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_ALARM_INTERNAL_H */
diff --git a/src/core/lib/surface/byte_buffer.cc b/src/core/lib/surface/byte_buffer.cc
index 9e0636b..e4c2a4a 100644
--- a/src/core/lib/surface/byte_buffer.cc
+++ b/src/core/lib/surface/byte_buffer.cc
@@ -71,14 +71,13 @@
 
 void grpc_byte_buffer_destroy(grpc_byte_buffer* bb) {
   if (!bb) return;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   switch (bb->type) {
     case GRPC_BB_RAW:
-      grpc_slice_buffer_destroy_internal(&exec_ctx, &bb->data.raw.slice_buffer);
+      grpc_slice_buffer_destroy_internal(&bb->data.raw.slice_buffer);
       break;
   }
   gpr_free(bb);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 size_t grpc_byte_buffer_length(grpc_byte_buffer* bb) {
diff --git a/src/core/lib/surface/byte_buffer_reader.cc b/src/core/lib/surface/byte_buffer_reader.cc
index 001227a..81a48e9 100644
--- a/src/core/lib/surface/byte_buffer_reader.cc
+++ b/src/core/lib/surface/byte_buffer_reader.cc
@@ -42,15 +42,14 @@
 
 int grpc_byte_buffer_reader_init(grpc_byte_buffer_reader* reader,
                                  grpc_byte_buffer* buffer) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_slice_buffer decompressed_slices_buffer;
   reader->buffer_in = buffer;
   switch (reader->buffer_in->type) {
     case GRPC_BB_RAW:
       grpc_slice_buffer_init(&decompressed_slices_buffer);
       if (is_compressed(reader->buffer_in)) {
-        if (grpc_msg_decompress(&exec_ctx,
-                                reader->buffer_in->data.raw.compression,
+        if (grpc_msg_decompress(reader->buffer_in->data.raw.compression,
                                 &reader->buffer_in->data.raw.slice_buffer,
                                 &decompressed_slices_buffer) == 0) {
           gpr_log(GPR_ERROR,
@@ -64,15 +63,14 @@
               grpc_raw_byte_buffer_create(decompressed_slices_buffer.slices,
                                           decompressed_slices_buffer.count);
         }
-        grpc_slice_buffer_destroy_internal(&exec_ctx,
-                                           &decompressed_slices_buffer);
+        grpc_slice_buffer_destroy_internal(&decompressed_slices_buffer);
       } else { /* not compressed, use the input buffer as output */
         reader->buffer_out = reader->buffer_in;
       }
       reader->current.index = 0;
       break;
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return 1;
 }
 
@@ -112,14 +110,14 @@
   grpc_slice out_slice = GRPC_SLICE_MALLOC(input_size);
   uint8_t* const outbuf = GRPC_SLICE_START_PTR(out_slice); /* just an alias */
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (grpc_byte_buffer_reader_next(reader, &in_slice) != 0) {
     const size_t slice_length = GRPC_SLICE_LENGTH(in_slice);
     memcpy(&(outbuf[bytes_read]), GRPC_SLICE_START_PTR(in_slice), slice_length);
     bytes_read += slice_length;
-    grpc_slice_unref_internal(&exec_ctx, in_slice);
+    grpc_slice_unref_internal(in_slice);
     GPR_ASSERT(bytes_read <= input_size);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return out_slice;
 }
diff --git a/src/core/lib/surface/call.cc b/src/core/lib/surface/call.cc
index a2eb02b..d677576 100644
--- a/src/core/lib/surface/call.cc
+++ b/src/core/lib/surface/call.cc
@@ -270,30 +270,25 @@
 #define CALL_FROM_TOP_ELEM(top_elem) \
   CALL_FROM_CALL_STACK(grpc_call_stack_from_top_element(top_elem))
 
-static void execute_batch(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                          grpc_transport_stream_op_batch* op,
+static void execute_batch(grpc_call* call, grpc_transport_stream_op_batch* op,
                           grpc_closure* start_batch_closure);
-static void cancel_with_status(grpc_exec_ctx* exec_ctx, grpc_call* c,
-                               status_source source, grpc_status_code status,
+static void cancel_with_status(grpc_call* c, status_source source,
+                               grpc_status_code status,
                                const char* description);
-static void cancel_with_error(grpc_exec_ctx* exec_ctx, grpc_call* c,
-                              status_source source, grpc_error* error);
-static void destroy_call(grpc_exec_ctx* exec_ctx, void* call_stack,
-                         grpc_error* error);
-static void receiving_slice_ready(grpc_exec_ctx* exec_ctx, void* bctlp,
-                                  grpc_error* error);
-static void get_final_status(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                             void (*set_value)(grpc_status_code code,
-                                               void* user_data),
-                             void* set_value_user_data, grpc_slice* details,
-                             const char** error_string);
+static void cancel_with_error(grpc_call* c, status_source source,
+                              grpc_error* error);
+static void destroy_call(void* call_stack, grpc_error* error);
+static void receiving_slice_ready(void* bctlp, grpc_error* error);
+static void get_final_status(
+    grpc_call* call, void (*set_value)(grpc_status_code code, void* user_data),
+    void* set_value_user_data, grpc_slice* details, const char** error_string);
 static void set_status_value_directly(grpc_status_code status, void* dest);
-static void set_status_from_error(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                                  status_source source, grpc_error* error);
-static void process_data_after_md(grpc_exec_ctx* exec_ctx, batch_control* bctl);
-static void post_batch_completion(grpc_exec_ctx* exec_ctx, batch_control* bctl);
-static void add_batch_error(grpc_exec_ctx* exec_ctx, batch_control* bctl,
-                            grpc_error* error, bool has_cancelled);
+static void set_status_from_error(grpc_call* call, status_source source,
+                                  grpc_error* error);
+static void process_data_after_md(batch_control* bctl);
+static void post_batch_completion(batch_control* bctl);
+static void add_batch_error(batch_control* bctl, grpc_error* error,
+                            bool has_cancelled);
 
 static void add_init_error(grpc_error** composite, grpc_error* new_err) {
   if (new_err == GRPC_ERROR_NONE) return;
@@ -311,7 +306,8 @@
   if (p == nullptr) {
     p = (parent_call*)gpr_arena_alloc(call->arena, sizeof(*p));
     gpr_mu_init(&p->child_list_mu);
-    if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm)NULL, (gpr_atm)p)) {
+    if (!gpr_atm_rel_cas(&call->parent_call_atm, (gpr_atm) nullptr,
+                         (gpr_atm)p)) {
       gpr_mu_destroy(&p->child_list_mu);
       p = (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
     }
@@ -323,8 +319,7 @@
   return (parent_call*)gpr_atm_acq_load(&call->parent_call_atm);
 }
 
-grpc_error* grpc_call_create(grpc_exec_ctx* exec_ctx,
-                             const grpc_call_create_args* args,
+grpc_error* grpc_call_create(const grpc_call_create_args* args,
                              grpc_call** out_call) {
   size_t i, j;
   grpc_error* error = GRPC_ERROR_NONE;
@@ -333,7 +328,7 @@
   grpc_call* call;
   GPR_TIMER_BEGIN("grpc_call_create", 0);
   size_t initial_size = grpc_channel_get_call_size_estimate(args->channel);
-  GRPC_STATS_INC_CALL_INITIAL_SIZE(exec_ctx, initial_size);
+  GRPC_STATS_INC_CALL_INITIAL_SIZE(initial_size);
   gpr_arena* arena = gpr_arena_create(initial_size);
   call = (grpc_call*)gpr_arena_alloc(
       arena, sizeof(grpc_call) + channel_stack->call_stack_size);
@@ -348,9 +343,9 @@
   GPR_BITSET(&call->encodings_accepted_by_peer, GRPC_COMPRESS_NONE);
   call->is_client = args->server_transport_data == nullptr;
   if (call->is_client) {
-    GRPC_STATS_INC_CLIENT_CALLS_CREATED(exec_ctx);
+    GRPC_STATS_INC_CLIENT_CALLS_CREATED();
   } else {
-    GRPC_STATS_INC_SERVER_CALLS_CREATED(exec_ctx);
+    GRPC_STATS_INC_SERVER_CALLS_CREATED();
   }
   call->stream_op_payload.context = call->context;
   grpc_slice path = grpc_empty_slice();
@@ -445,15 +440,13 @@
                                       send_deadline,
                                       call->arena,
                                       &call->call_combiner};
-  add_init_error(&error, grpc_call_stack_init(exec_ctx, channel_stack, 1,
-                                              destroy_call, call, &call_args));
+  add_init_error(&error, grpc_call_stack_init(channel_stack, 1, destroy_call,
+                                              call, &call_args));
   if (error != GRPC_ERROR_NONE) {
-    cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
-                      GRPC_ERROR_REF(error));
+    cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error));
   }
   if (immediately_cancel) {
-    cancel_with_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
-                      GRPC_ERROR_CANCELLED);
+    cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED);
   }
   if (args->cq != nullptr) {
     GPR_ASSERT(
@@ -468,17 +461,17 @@
         args->pollset_set_alternative);
   }
   if (!grpc_polling_entity_is_empty(&call->pollent)) {
-    grpc_call_stack_set_pollset_or_pollset_set(
-        exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
+    grpc_call_stack_set_pollset_or_pollset_set(CALL_STACK_FROM_CALL(call),
+                                               &call->pollent);
   }
 
-  grpc_slice_unref_internal(exec_ctx, path);
+  grpc_slice_unref_internal(path);
 
   GPR_TIMER_END("grpc_call_create", 0);
   return error;
 }
 
-void grpc_call_set_completion_queue(grpc_exec_ctx* exec_ctx, grpc_call* call,
+void grpc_call_set_completion_queue(grpc_call* call,
                                     grpc_completion_queue* cq) {
   GPR_ASSERT(cq);
 
@@ -489,8 +482,8 @@
   call->cq = cq;
   GRPC_CQ_INTERNAL_REF(cq, "bind");
   call->pollent = grpc_polling_entity_create_from_pollset(grpc_cq_pollset(cq));
-  grpc_call_stack_set_pollset_or_pollset_set(
-      exec_ctx, CALL_STACK_FROM_CALL(call), &call->pollent);
+  grpc_call_stack_set_pollset_or_pollset_set(CALL_STACK_FROM_CALL(call),
+                                             &call->pollent);
 }
 
 #ifndef NDEBUG
@@ -503,40 +496,38 @@
 void grpc_call_internal_ref(grpc_call* c REF_ARG) {
   GRPC_CALL_STACK_REF(CALL_STACK_FROM_CALL(c), REF_REASON);
 }
-void grpc_call_internal_unref(grpc_exec_ctx* exec_ctx, grpc_call* c REF_ARG) {
-  GRPC_CALL_STACK_UNREF(exec_ctx, CALL_STACK_FROM_CALL(c), REF_REASON);
+void grpc_call_internal_unref(grpc_call* c REF_ARG) {
+  GRPC_CALL_STACK_UNREF(CALL_STACK_FROM_CALL(c), REF_REASON);
 }
 
-static void release_call(grpc_exec_ctx* exec_ctx, void* call,
-                         grpc_error* error) {
+static void release_call(void* call, grpc_error* error) {
   grpc_call* c = (grpc_call*)call;
   grpc_channel* channel = c->channel;
   grpc_call_combiner_destroy(&c->call_combiner);
   gpr_free((char*)c->peer_string);
   grpc_channel_update_call_size_estimate(channel, gpr_arena_destroy(c->arena));
-  GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, "call");
+  GRPC_CHANNEL_INTERNAL_UNREF(channel, "call");
 }
 
 static void set_status_value_directly(grpc_status_code status, void* dest);
-static void destroy_call(grpc_exec_ctx* exec_ctx, void* call,
-                         grpc_error* error) {
+static void destroy_call(void* call, grpc_error* error) {
   size_t i;
   int ii;
   grpc_call* c = (grpc_call*)call;
   GPR_TIMER_BEGIN("destroy_call", 0);
   for (i = 0; i < 2; i++) {
     grpc_metadata_batch_destroy(
-        exec_ctx, &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
+        &c->metadata_batch[1 /* is_receiving */][i /* is_initial */]);
   }
   if (c->receiving_stream != nullptr) {
-    grpc_byte_stream_destroy(exec_ctx, c->receiving_stream);
+    grpc_byte_stream_destroy(c->receiving_stream);
   }
   parent_call* pc = get_parent_call(c);
   if (pc != nullptr) {
     gpr_mu_destroy(&pc->child_list_mu);
   }
   for (ii = 0; ii < c->send_extra_metadata_count; ii++) {
-    GRPC_MDELEM_UNREF(exec_ctx, c->send_extra_metadata[ii].md);
+    GRPC_MDELEM_UNREF(c->send_extra_metadata[ii].md);
   }
   for (i = 0; i < GRPC_CONTEXT_COUNT; i++) {
     if (c->context[i].destroy) {
@@ -544,12 +535,11 @@
     }
   }
   if (c->cq) {
-    GRPC_CQ_INTERNAL_UNREF(exec_ctx, c->cq, "bind");
+    GRPC_CQ_INTERNAL_UNREF(c->cq, "bind");
   }
 
-  get_final_status(exec_ctx, c, set_status_value_directly,
-                   &c->final_info.final_status, nullptr,
-                   c->final_info.error_string);
+  get_final_status(c, set_status_value_directly, &c->final_info.final_status,
+                   nullptr, c->final_info.error_string);
   c->final_info.stats.latency =
       gpr_time_sub(gpr_now(GPR_CLOCK_MONOTONIC), c->start_time);
 
@@ -558,7 +548,7 @@
         unpack_received_status(gpr_atm_acq_load(&c->status[i])).error);
   }
 
-  grpc_call_stack_destroy(exec_ctx, CALL_STACK_FROM_CALL(c), &c->final_info,
+  grpc_call_stack_destroy(CALL_STACK_FROM_CALL(c), &c->final_info,
                           GRPC_CLOSURE_INIT(&c->release_call, release_call, c,
                                             grpc_schedule_on_exec_ctx));
   GPR_TIMER_END("destroy_call", 0);
@@ -570,7 +560,7 @@
   if (!gpr_unref(&c->ext_ref)) return;
 
   child_call* cc = c->child;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GPR_TIMER_BEGIN("grpc_call_unref", 0);
   GRPC_API_TRACE("grpc_call_unref(c=%p)", 1, (c));
@@ -587,7 +577,7 @@
     cc->sibling_prev->child->sibling_next = cc->sibling_next;
     cc->sibling_next->child->sibling_prev = cc->sibling_prev;
     gpr_mu_unlock(&pc->child_list_mu);
-    GRPC_CALL_INTERNAL_UNREF(&exec_ctx, cc->parent, "child");
+    GRPC_CALL_INTERNAL_UNREF(cc->parent, "child");
   }
 
   GPR_ASSERT(!c->destroy_called);
@@ -595,53 +585,49 @@
   bool cancel = gpr_atm_acq_load(&c->any_ops_sent_atm) != 0 &&
                 gpr_atm_acq_load(&c->received_final_op_atm) == 0;
   if (cancel) {
-    cancel_with_error(&exec_ctx, c, STATUS_FROM_API_OVERRIDE,
-                      GRPC_ERROR_CANCELLED);
+    cancel_with_error(c, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED);
   } else {
     // Unset the call combiner cancellation closure.  This has the
     // effect of scheduling the previously set cancellation closure, if
     // any, so that it can release any internal references it may be
     // holding to the call stack.
-    grpc_call_combiner_set_notify_on_cancel(&exec_ctx, &c->call_combiner,
-                                            nullptr);
+    grpc_call_combiner_set_notify_on_cancel(&c->call_combiner, nullptr);
   }
-  GRPC_CALL_INTERNAL_UNREF(&exec_ctx, c, "destroy");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CALL_INTERNAL_UNREF(c, "destroy");
+
   GPR_TIMER_END("grpc_call_unref", 0);
 }
 
 grpc_call_error grpc_call_cancel(grpc_call* call, void* reserved) {
   GRPC_API_TRACE("grpc_call_cancel(call=%p, reserved=%p)", 2, (call, reserved));
   GPR_ASSERT(!reserved);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  cancel_with_error(&exec_ctx, call, STATUS_FROM_API_OVERRIDE,
-                    GRPC_ERROR_CANCELLED);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  cancel_with_error(call, STATUS_FROM_API_OVERRIDE, GRPC_ERROR_CANCELLED);
+
   return GRPC_CALL_OK;
 }
 
 // This is called via the call combiner to start sending a batch down
 // the filter stack.
-static void execute_batch_in_call_combiner(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* ignored) {
+static void execute_batch_in_call_combiner(void* arg, grpc_error* ignored) {
   grpc_transport_stream_op_batch* batch = (grpc_transport_stream_op_batch*)arg;
   grpc_call* call = (grpc_call*)batch->handler_private.extra_arg;
   GPR_TIMER_BEGIN("execute_batch", 0);
   grpc_call_element* elem = CALL_ELEM_FROM_CALL(call, 0);
   GRPC_CALL_LOG_OP(GPR_INFO, elem, batch);
-  elem->filter->start_transport_stream_op_batch(exec_ctx, elem, batch);
+  elem->filter->start_transport_stream_op_batch(elem, batch);
   GPR_TIMER_END("execute_batch", 0);
 }
 
 // start_batch_closure points to a caller-allocated closure to be used
 // for entering the call combiner.
-static void execute_batch(grpc_exec_ctx* exec_ctx, grpc_call* call,
+static void execute_batch(grpc_call* call,
                           grpc_transport_stream_op_batch* batch,
                           grpc_closure* start_batch_closure) {
   batch->handler_private.extra_arg = call;
   GRPC_CLOSURE_INIT(start_batch_closure, execute_batch_in_call_combiner, batch,
                     grpc_schedule_on_exec_ctx);
-  GRPC_CALL_COMBINER_START(exec_ctx, &call->call_combiner, start_batch_closure,
+  GRPC_CALL_COMBINER_START(&call->call_combiner, start_batch_closure,
                            GRPC_ERROR_NONE, "executing batch");
 }
 
@@ -665,15 +651,14 @@
                                              grpc_status_code status,
                                              const char* description,
                                              void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE(
       "grpc_call_cancel_with_status("
       "c=%p, status=%d, description=%s, reserved=%p)",
       4, (c, (int)status, description, reserved));
   GPR_ASSERT(reserved == nullptr);
-  cancel_with_status(&exec_ctx, c, STATUS_FROM_API_OVERRIDE, status,
-                     description);
-  grpc_exec_ctx_finish(&exec_ctx);
+  cancel_with_status(c, STATUS_FROM_API_OVERRIDE, status, description);
+
   return GRPC_CALL_OK;
 }
 
@@ -685,24 +670,23 @@
 
 // The on_complete callback used when sending a cancel_stream batch down
 // the filter stack.  Yields the call combiner when the batch is done.
-static void done_termination(grpc_exec_ctx* exec_ctx, void* arg,
-                             grpc_error* error) {
+static void done_termination(void* arg, grpc_error* error) {
   cancel_state* state = (cancel_state*)arg;
-  GRPC_CALL_COMBINER_STOP(exec_ctx, &state->call->call_combiner,
+  GRPC_CALL_COMBINER_STOP(&state->call->call_combiner,
                           "on_complete for cancel_stream op");
-  GRPC_CALL_INTERNAL_UNREF(exec_ctx, state->call, "termination");
+  GRPC_CALL_INTERNAL_UNREF(state->call, "termination");
   gpr_free(state);
 }
 
-static void cancel_with_error(grpc_exec_ctx* exec_ctx, grpc_call* c,
-                              status_source source, grpc_error* error) {
+static void cancel_with_error(grpc_call* c, status_source source,
+                              grpc_error* error) {
   GRPC_CALL_INTERNAL_REF(c, "termination");
   // Inform the call combiner of the cancellation, so that it can cancel
   // any in-flight asynchronous actions that may be holding the call
   // combiner.  This ensures that the cancel_stream batch can be sent
   // down the filter stack in a timely manner.
-  grpc_call_combiner_cancel(exec_ctx, &c->call_combiner, GRPC_ERROR_REF(error));
-  set_status_from_error(exec_ctx, c, source, GRPC_ERROR_REF(error));
+  grpc_call_combiner_cancel(&c->call_combiner, GRPC_ERROR_REF(error));
+  set_status_from_error(c, source, GRPC_ERROR_REF(error));
   cancel_state* state = (cancel_state*)gpr_malloc(sizeof(*state));
   state->call = c;
   GRPC_CLOSURE_INIT(&state->finish_batch, done_termination, state,
@@ -711,7 +695,7 @@
       grpc_make_transport_stream_op(&state->finish_batch);
   op->cancel_stream = true;
   op->payload->cancel_stream.cancel_error = error;
-  execute_batch(exec_ctx, c, op, &state->start_batch);
+  execute_batch(c, op, &state->start_batch);
 }
 
 static grpc_error* error_from_status(grpc_status_code status,
@@ -725,11 +709,10 @@
       GRPC_ERROR_INT_GRPC_STATUS, status);
 }
 
-static void cancel_with_status(grpc_exec_ctx* exec_ctx, grpc_call* c,
-                               status_source source, grpc_status_code status,
+static void cancel_with_status(grpc_call* c, status_source source,
+                               grpc_status_code status,
                                const char* description) {
-  cancel_with_error(exec_ctx, c, source,
-                    error_from_status(status, description));
+  cancel_with_error(c, source, error_from_status(status, description));
 }
 
 /*******************************************************************************
@@ -737,14 +720,13 @@
  */
 
 static bool get_final_status_from(
-    grpc_exec_ctx* exec_ctx, grpc_call* call, grpc_error* error,
-    bool allow_ok_status,
+    grpc_call* call, grpc_error* error, bool allow_ok_status,
     void (*set_value)(grpc_status_code code, void* user_data),
     void* set_value_user_data, grpc_slice* details, const char** error_string) {
   grpc_status_code code;
   grpc_slice slice = grpc_empty_slice();
-  grpc_error_get_status(exec_ctx, error, call->send_deadline, &code, &slice,
-                        nullptr, error_string);
+  grpc_error_get_status(error, call->send_deadline, &code, &slice, nullptr,
+                        error_string);
   if (code == GRPC_STATUS_OK && !allow_ok_status) {
     return false;
   }
@@ -756,11 +738,9 @@
   return true;
 }
 
-static void get_final_status(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                             void (*set_value)(grpc_status_code code,
-                                               void* user_data),
-                             void* set_value_user_data, grpc_slice* details,
-                             const char** error_string) {
+static void get_final_status(
+    grpc_call* call, void (*set_value)(grpc_status_code code, void* user_data),
+    void* set_value_user_data, grpc_slice* details, const char** error_string) {
   int i;
   received_status status[STATUS_SOURCE_COUNT];
   for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
@@ -782,9 +762,9 @@
     for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
       if (status[i].is_set &&
           grpc_error_has_clear_grpc_status(status[i].error)) {
-        if (get_final_status_from(exec_ctx, call, status[i].error,
-                                  allow_ok_status != 0, set_value,
-                                  set_value_user_data, details, error_string)) {
+        if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
+                                  set_value, set_value_user_data, details,
+                                  error_string)) {
           return;
         }
       }
@@ -792,9 +772,9 @@
     /* If no clearly defined status exists, search for 'anything' */
     for (i = 0; i < STATUS_SOURCE_COUNT; i++) {
       if (status[i].is_set) {
-        if (get_final_status_from(exec_ctx, call, status[i].error,
-                                  allow_ok_status != 0, set_value,
-                                  set_value_user_data, details, error_string)) {
+        if (get_final_status_from(call, status[i].error, allow_ok_status != 0,
+                                  set_value, set_value_user_data, details,
+                                  error_string)) {
           return;
         }
       }
@@ -808,8 +788,8 @@
   }
 }
 
-static void set_status_from_error(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                                  status_source source, grpc_error* error) {
+static void set_status_from_error(grpc_call* call, status_source source,
+                                  grpc_error* error) {
   if (!gpr_atm_rel_cas(&call->status[source],
                        pack_received_status({false, GRPC_ERROR_NONE}),
                        pack_received_status({true, error}))) {
@@ -861,8 +841,7 @@
 
 static void destroy_encodings_accepted_by_peer(void* p) { return; }
 
-static void set_encodings_accepted_by_peer(grpc_exec_ctx* exec_ctx,
-                                           grpc_call* call, grpc_mdelem mdel) {
+static void set_encodings_accepted_by_peer(grpc_call* call, grpc_mdelem mdel) {
   size_t i;
   grpc_compression_algorithm algorithm;
   grpc_slice_buffer accept_encoding_parts;
@@ -900,15 +879,14 @@
     }
   }
 
-  grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts);
+  grpc_slice_buffer_destroy_internal(&accept_encoding_parts);
 
   grpc_mdelem_set_user_data(
       mdel, destroy_encodings_accepted_by_peer,
       (void*)(((uintptr_t)call->encodings_accepted_by_peer) + 1));
 }
 
-static void set_stream_encodings_accepted_by_peer(grpc_exec_ctx* exec_ctx,
-                                                  grpc_call* call,
+static void set_stream_encodings_accepted_by_peer(grpc_call* call,
                                                   grpc_mdelem mdel) {
   size_t i;
   grpc_stream_compression_algorithm algorithm;
@@ -946,7 +924,7 @@
     }
   }
 
-  grpc_slice_buffer_destroy_internal(exec_ctx, &accept_encoding_parts);
+  grpc_slice_buffer_destroy_internal(&accept_encoding_parts);
 
   grpc_mdelem_set_user_data(
       mdel, destroy_encodings_accepted_by_peer,
@@ -984,10 +962,12 @@
   return res;
 }
 
-static int prepare_application_metadata(
-    grpc_exec_ctx* exec_ctx, grpc_call* call, int count,
-    grpc_metadata* metadata, int is_trailing, int prepend_extra_metadata,
-    grpc_metadata* additional_metadata, int additional_metadata_count) {
+static int prepare_application_metadata(grpc_call* call, int count,
+                                        grpc_metadata* metadata,
+                                        int is_trailing,
+                                        int prepend_extra_metadata,
+                                        grpc_metadata* additional_metadata,
+                                        int additional_metadata_count) {
   int total_count = count + additional_metadata_count;
   int i;
   grpc_metadata_batch* batch =
@@ -1006,14 +986,14 @@
                    grpc_validate_header_nonbin_value_is_legal(md->value))) {
       break;
     }
-    l->md = grpc_mdelem_from_grpc_metadata(exec_ctx, (grpc_metadata*)md);
+    l->md = grpc_mdelem_from_grpc_metadata((grpc_metadata*)md);
   }
   if (i != total_count) {
     for (int j = 0; j < i; j++) {
       const grpc_metadata* md =
           get_md_elem(metadata, additional_metadata, j, count);
       grpc_linked_mdelem* l = linked_from_md(md);
-      GRPC_MDELEM_UNREF(exec_ctx, l->md);
+      GRPC_MDELEM_UNREF(l->md);
     }
     return 0;
   }
@@ -1024,16 +1004,16 @@
       for (i = 0; i < call->send_extra_metadata_count; i++) {
         GRPC_LOG_IF_ERROR("prepare_application_metadata",
                           grpc_metadata_batch_link_tail(
-                              exec_ctx, batch, &call->send_extra_metadata[i]));
+                              batch, &call->send_extra_metadata[i]));
       }
     }
   }
   for (i = 0; i < total_count; i++) {
     grpc_metadata* md = get_md_elem(metadata, additional_metadata, i, count);
     grpc_linked_mdelem* l = linked_from_md(md);
-    grpc_error* error = grpc_metadata_batch_link_tail(exec_ctx, batch, l);
+    grpc_error* error = grpc_metadata_batch_link_tail(batch, l);
     if (error != GRPC_ERROR_NONE) {
-      GRPC_MDELEM_UNREF(exec_ctx, l->md);
+      GRPC_MDELEM_UNREF(l->md);
     }
     GRPC_LOG_IF_ERROR("prepare_application_metadata", error);
   }
@@ -1120,46 +1100,43 @@
   GPR_TIMER_END("publish_app_metadata", 0);
 }
 
-static void recv_initial_filter(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                                grpc_metadata_batch* b) {
+static void recv_initial_filter(grpc_call* call, grpc_metadata_batch* b) {
   if (b->idx.named.content_encoding != nullptr) {
     if (b->idx.named.grpc_encoding != nullptr) {
       gpr_log(GPR_ERROR,
               "Received both content-encoding and grpc-encoding header. "
               "Ignoring grpc-encoding.");
-      grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
+      grpc_metadata_batch_remove(b, b->idx.named.grpc_encoding);
     }
     GPR_TIMER_BEGIN("incoming_stream_compression_algorithm", 0);
     set_incoming_stream_compression_algorithm(
         call, decode_stream_compression(b->idx.named.content_encoding->md));
     GPR_TIMER_END("incoming_stream_compression_algorithm", 0);
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.content_encoding);
+    grpc_metadata_batch_remove(b, b->idx.named.content_encoding);
   } else if (b->idx.named.grpc_encoding != nullptr) {
     GPR_TIMER_BEGIN("incoming_compression_algorithm", 0);
     set_incoming_compression_algorithm(
         call, decode_compression(b->idx.named.grpc_encoding->md));
     GPR_TIMER_END("incoming_compression_algorithm", 0);
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_encoding);
+    grpc_metadata_batch_remove(b, b->idx.named.grpc_encoding);
   }
   if (b->idx.named.grpc_accept_encoding != nullptr) {
     GPR_TIMER_BEGIN("encodings_accepted_by_peer", 0);
-    set_encodings_accepted_by_peer(exec_ctx, call,
-                                   b->idx.named.grpc_accept_encoding->md);
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_accept_encoding);
+    set_encodings_accepted_by_peer(call, b->idx.named.grpc_accept_encoding->md);
+    grpc_metadata_batch_remove(b, b->idx.named.grpc_accept_encoding);
     GPR_TIMER_END("encodings_accepted_by_peer", 0);
   }
   if (b->idx.named.accept_encoding != nullptr) {
     GPR_TIMER_BEGIN("stream_encodings_accepted_by_peer", 0);
-    set_stream_encodings_accepted_by_peer(exec_ctx, call,
+    set_stream_encodings_accepted_by_peer(call,
                                           b->idx.named.accept_encoding->md);
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.accept_encoding);
+    grpc_metadata_batch_remove(b, b->idx.named.accept_encoding);
     GPR_TIMER_END("stream_encodings_accepted_by_peer", 0);
   }
   publish_app_metadata(call, b, false);
 }
 
-static void recv_trailing_filter(grpc_exec_ctx* exec_ctx, void* args,
-                                 grpc_metadata_batch* b) {
+static void recv_trailing_filter(void* args, grpc_metadata_batch* b) {
   grpc_call* call = (grpc_call*)args;
   if (b->idx.named.grpc_status != nullptr) {
     uint32_t status_code = decode_status(b->idx.named.grpc_status->md);
@@ -1174,13 +1151,13 @@
       error = grpc_error_set_str(
           error, GRPC_ERROR_STR_GRPC_MESSAGE,
           grpc_slice_ref_internal(GRPC_MDVALUE(b->idx.named.grpc_message->md)));
-      grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_message);
+      grpc_metadata_batch_remove(b, b->idx.named.grpc_message);
     } else if (error != GRPC_ERROR_NONE) {
       error = grpc_error_set_str(error, GRPC_ERROR_STR_GRPC_MESSAGE,
                                  grpc_empty_slice());
     }
-    set_status_from_error(exec_ctx, call, STATUS_FROM_WIRE, error);
-    grpc_metadata_batch_remove(exec_ctx, b, b->idx.named.grpc_status);
+    set_status_from_error(call, STATUS_FROM_WIRE, error);
+    grpc_metadata_batch_remove(b, b->idx.named.grpc_status);
   }
   publish_app_metadata(call, b, true);
 }
@@ -1257,12 +1234,12 @@
   return bctl;
 }
 
-static void finish_batch_completion(grpc_exec_ctx* exec_ctx, void* user_data,
+static void finish_batch_completion(void* user_data,
                                     grpc_cq_completion* storage) {
   batch_control* bctl = (batch_control*)user_data;
   grpc_call* call = bctl->call;
   bctl->call = nullptr;
-  GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+  GRPC_CALL_INTERNAL_UNREF(call, "completion");
 }
 
 static grpc_error* consolidate_batch_errors(batch_control* bctl) {
@@ -1286,15 +1263,13 @@
   }
 }
 
-static void post_batch_completion(grpc_exec_ctx* exec_ctx,
-                                  batch_control* bctl) {
+static void post_batch_completion(batch_control* bctl) {
   grpc_call* next_child_call;
   grpc_call* call = bctl->call;
   grpc_error* error = consolidate_batch_errors(bctl);
 
   if (bctl->op.send_initial_metadata) {
     grpc_metadata_batch_destroy(
-        exec_ctx,
         &call->metadata_batch[0 /* is_receiving */][0 /* is_trailing */]);
   }
   if (bctl->op.send_message) {
@@ -1302,13 +1277,12 @@
   }
   if (bctl->op.send_trailing_metadata) {
     grpc_metadata_batch_destroy(
-        exec_ctx,
         &call->metadata_batch[0 /* is_receiving */][1 /* is_trailing */]);
   }
   if (bctl->op.recv_trailing_metadata) {
     grpc_metadata_batch* md =
         &call->metadata_batch[1 /* is_receiving */][1 /* is_trailing */];
-    recv_trailing_filter(exec_ctx, call, md);
+    recv_trailing_filter(call, md);
 
     /* propagate cancellation to any interested children */
     gpr_atm_rel_store(&call->received_final_op_atm, 1);
@@ -1322,9 +1296,9 @@
           next_child_call = child->child->sibling_next;
           if (child->cancellation_is_inherited) {
             GRPC_CALL_INTERNAL_REF(child, "propagate_cancel");
-            cancel_with_error(exec_ctx, child, STATUS_FROM_API_OVERRIDE,
+            cancel_with_error(child, STATUS_FROM_API_OVERRIDE,
                               GRPC_ERROR_CANCELLED);
-            GRPC_CALL_INTERNAL_UNREF(exec_ctx, child, "propagate_cancel");
+            GRPC_CALL_INTERNAL_UNREF(child, "propagate_cancel");
           }
           child = next_child_call;
         } while (child != pc->first_child);
@@ -1333,12 +1307,12 @@
     }
 
     if (call->is_client) {
-      get_final_status(exec_ctx, call, set_status_value_directly,
+      get_final_status(call, set_status_value_directly,
                        call->final_op.client.status,
                        call->final_op.client.status_details,
                        call->final_op.client.error_string);
     } else {
-      get_final_status(exec_ctx, call, set_cancelled_value,
+      get_final_status(call, set_cancelled_value,
                        call->final_op.server.cancelled, nullptr, nullptr);
     }
 
@@ -1354,25 +1328,24 @@
   if (bctl->completion_data.notify_tag.is_closure) {
     /* unrefs bctl->error */
     bctl->call = nullptr;
-    GRPC_CLOSURE_RUN(
-        exec_ctx, (grpc_closure*)bctl->completion_data.notify_tag.tag, error);
-    GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, "completion");
+    GRPC_CLOSURE_RUN((grpc_closure*)bctl->completion_data.notify_tag.tag,
+                     error);
+    GRPC_CALL_INTERNAL_UNREF(call, "completion");
   } else {
     /* unrefs bctl->error */
-    grpc_cq_end_op(
-        exec_ctx, bctl->call->cq, bctl->completion_data.notify_tag.tag, error,
-        finish_batch_completion, bctl, &bctl->completion_data.cq_completion);
+    grpc_cq_end_op(bctl->call->cq, bctl->completion_data.notify_tag.tag, error,
+                   finish_batch_completion, bctl,
+                   &bctl->completion_data.cq_completion);
   }
 }
 
-static void finish_batch_step(grpc_exec_ctx* exec_ctx, batch_control* bctl) {
+static void finish_batch_step(batch_control* bctl) {
   if (gpr_unref(&bctl->steps_to_complete)) {
-    post_batch_completion(exec_ctx, bctl);
+    post_batch_completion(bctl);
   }
 }
 
-static void continue_receiving_slices(grpc_exec_ctx* exec_ctx,
-                                      batch_control* bctl) {
+static void continue_receiving_slices(batch_control* bctl) {
   grpc_error* error;
   grpc_call* call = bctl->call;
   for (;;) {
@@ -1380,25 +1353,25 @@
                        (*call->receiving_buffer)->data.raw.slice_buffer.length;
     if (remaining == 0) {
       call->receiving_message = 0;
-      grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+      grpc_byte_stream_destroy(call->receiving_stream);
       call->receiving_stream = nullptr;
-      finish_batch_step(exec_ctx, bctl);
+      finish_batch_step(bctl);
       return;
     }
-    if (grpc_byte_stream_next(exec_ctx, call->receiving_stream, remaining,
+    if (grpc_byte_stream_next(call->receiving_stream, remaining,
                               &call->receiving_slice_ready)) {
-      error = grpc_byte_stream_pull(exec_ctx, call->receiving_stream,
-                                    &call->receiving_slice);
+      error =
+          grpc_byte_stream_pull(call->receiving_stream, &call->receiving_slice);
       if (error == GRPC_ERROR_NONE) {
         grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
                               call->receiving_slice);
       } else {
-        grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+        grpc_byte_stream_destroy(call->receiving_stream);
         call->receiving_stream = nullptr;
         grpc_byte_buffer_destroy(*call->receiving_buffer);
         *call->receiving_buffer = nullptr;
         call->receiving_message = 0;
-        finish_batch_step(exec_ctx, bctl);
+        finish_batch_step(bctl);
         return;
       }
     } else {
@@ -1407,8 +1380,7 @@
   }
 }
 
-static void receiving_slice_ready(grpc_exec_ctx* exec_ctx, void* bctlp,
-                                  grpc_error* error) {
+static void receiving_slice_ready(void* bctlp, grpc_error* error) {
   batch_control* bctl = (batch_control*)bctlp;
   grpc_call* call = bctl->call;
   grpc_byte_stream* bs = call->receiving_stream;
@@ -1416,11 +1388,11 @@
 
   if (error == GRPC_ERROR_NONE) {
     grpc_slice slice;
-    error = grpc_byte_stream_pull(exec_ctx, bs, &slice);
+    error = grpc_byte_stream_pull(bs, &slice);
     if (error == GRPC_ERROR_NONE) {
       grpc_slice_buffer_add(&(*call->receiving_buffer)->data.raw.slice_buffer,
                             slice);
-      continue_receiving_slices(exec_ctx, bctl);
+      continue_receiving_slices(bctl);
     } else {
       /* Error returned by grpc_byte_stream_pull needs to be released manually
        */
@@ -1432,25 +1404,24 @@
     if (grpc_trace_operation_failures.enabled()) {
       GRPC_LOG_IF_ERROR("receiving_slice_ready", GRPC_ERROR_REF(error));
     }
-    grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+    grpc_byte_stream_destroy(call->receiving_stream);
     call->receiving_stream = nullptr;
     grpc_byte_buffer_destroy(*call->receiving_buffer);
     *call->receiving_buffer = nullptr;
     call->receiving_message = 0;
-    finish_batch_step(exec_ctx, bctl);
+    finish_batch_step(bctl);
     if (release_error) {
       GRPC_ERROR_UNREF(error);
     }
   }
 }
 
-static void process_data_after_md(grpc_exec_ctx* exec_ctx,
-                                  batch_control* bctl) {
+static void process_data_after_md(batch_control* bctl) {
   grpc_call* call = bctl->call;
   if (call->receiving_stream == nullptr) {
     *call->receiving_buffer = nullptr;
     call->receiving_message = 0;
-    finish_batch_step(exec_ctx, bctl);
+    finish_batch_step(bctl);
   } else {
     call->test_only_last_message_flags = call->receiving_stream->flags;
     if ((call->receiving_stream->flags & GRPC_WRITE_INTERNAL_COMPRESS) &&
@@ -1462,46 +1433,42 @@
     }
     GRPC_CLOSURE_INIT(&call->receiving_slice_ready, receiving_slice_ready, bctl,
                       grpc_schedule_on_exec_ctx);
-    continue_receiving_slices(exec_ctx, bctl);
+    continue_receiving_slices(bctl);
   }
 }
 
-static void receiving_stream_ready(grpc_exec_ctx* exec_ctx, void* bctlp,
-                                   grpc_error* error) {
+static void receiving_stream_ready(void* bctlp, grpc_error* error) {
   batch_control* bctl = (batch_control*)bctlp;
   grpc_call* call = bctl->call;
   if (error != GRPC_ERROR_NONE) {
     if (call->receiving_stream != nullptr) {
-      grpc_byte_stream_destroy(exec_ctx, call->receiving_stream);
+      grpc_byte_stream_destroy(call->receiving_stream);
       call->receiving_stream = nullptr;
     }
-    add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), true);
-    cancel_with_error(exec_ctx, call, STATUS_FROM_SURFACE,
-                      GRPC_ERROR_REF(error));
+    add_batch_error(bctl, GRPC_ERROR_REF(error), true);
+    cancel_with_error(call, STATUS_FROM_SURFACE, GRPC_ERROR_REF(error));
   }
   /* If recv_state is RECV_NONE, we will save the batch_control
    * object with rel_cas, and will not use it after the cas. Its corresponding
    * acq_load is in receiving_initial_metadata_ready() */
   if (error != GRPC_ERROR_NONE || call->receiving_stream == nullptr ||
       !gpr_atm_rel_cas(&call->recv_state, RECV_NONE, (gpr_atm)bctlp)) {
-    process_data_after_md(exec_ctx, bctl);
+    process_data_after_md(bctl);
   }
 }
 
 // The recv_message_ready callback used when sending a batch containing
 // a recv_message op down the filter stack.  Yields the call combiner
 // before processing the received message.
-static void receiving_stream_ready_in_call_combiner(grpc_exec_ctx* exec_ctx,
-                                                    void* bctlp,
+static void receiving_stream_ready_in_call_combiner(void* bctlp,
                                                     grpc_error* error) {
   batch_control* bctl = (batch_control*)bctlp;
   grpc_call* call = bctl->call;
-  GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "recv_message_ready");
-  receiving_stream_ready(exec_ctx, bctlp, error);
+  GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_message_ready");
+  receiving_stream_ready(bctlp, error);
 }
 
-static void validate_filtered_metadata(grpc_exec_ctx* exec_ctx,
-                                       batch_control* bctl) {
+static void validate_filtered_metadata(batch_control* bctl) {
   grpc_call* call = bctl->call;
   /* validate compression algorithms */
   if (call->incoming_stream_compression_algorithm !=
@@ -1515,8 +1482,8 @@
       gpr_asprintf(&error_msg,
                    "Invalid stream compression algorithm value '%d'.", algo);
       gpr_log(GPR_ERROR, "%s", error_msg);
-      cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
-                         GRPC_STATUS_UNIMPLEMENTED, error_msg);
+      cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED,
+                         error_msg);
     } else if (grpc_compression_options_is_stream_compression_algorithm_enabled(
                    &compression_options, algo) == 0) {
       /* check if algorithm is supported by current channel config */
@@ -1525,8 +1492,8 @@
       gpr_asprintf(&error_msg, "Stream compression algorithm '%s' is disabled.",
                    algo_name);
       gpr_log(GPR_ERROR, "%s", error_msg);
-      cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
-                         GRPC_STATUS_UNIMPLEMENTED, error_msg);
+      cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED,
+                         error_msg);
     }
     gpr_free(error_msg);
 
@@ -1556,8 +1523,8 @@
       gpr_asprintf(&error_msg, "Invalid compression algorithm value '%d'.",
                    algo);
       gpr_log(GPR_ERROR, "%s", error_msg);
-      cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
-                         GRPC_STATUS_UNIMPLEMENTED, error_msg);
+      cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED,
+                         error_msg);
     } else if (grpc_compression_options_is_algorithm_enabled(
                    &compression_options, algo) == 0) {
       /* check if algorithm is supported by current channel config */
@@ -1566,8 +1533,8 @@
       gpr_asprintf(&error_msg, "Compression algorithm '%s' is disabled.",
                    algo_name);
       gpr_log(GPR_ERROR, "%s", error_msg);
-      cancel_with_status(exec_ctx, call, STATUS_FROM_SURFACE,
-                         GRPC_STATUS_UNIMPLEMENTED, error_msg);
+      cancel_with_status(call, STATUS_FROM_SURFACE, GRPC_STATUS_UNIMPLEMENTED,
+                         error_msg);
     } else {
       call->incoming_compression_algorithm = algo;
     }
@@ -1590,34 +1557,31 @@
   }
 }
 
-static void add_batch_error(grpc_exec_ctx* exec_ctx, batch_control* bctl,
-                            grpc_error* error, bool has_cancelled) {
+static void add_batch_error(batch_control* bctl, grpc_error* error,
+                            bool has_cancelled) {
   if (error == GRPC_ERROR_NONE) return;
   int idx = (int)gpr_atm_full_fetch_add(&bctl->num_errors, 1);
   if (idx == 0 && !has_cancelled) {
-    cancel_with_error(exec_ctx, bctl->call, STATUS_FROM_CORE,
-                      GRPC_ERROR_REF(error));
+    cancel_with_error(bctl->call, STATUS_FROM_CORE, GRPC_ERROR_REF(error));
   }
   bctl->errors[idx] = error;
 }
 
-static void receiving_initial_metadata_ready(grpc_exec_ctx* exec_ctx,
-                                             void* bctlp, grpc_error* error) {
+static void receiving_initial_metadata_ready(void* bctlp, grpc_error* error) {
   batch_control* bctl = (batch_control*)bctlp;
   grpc_call* call = bctl->call;
 
-  GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner,
-                          "recv_initial_metadata_ready");
+  GRPC_CALL_COMBINER_STOP(&call->call_combiner, "recv_initial_metadata_ready");
 
-  add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
+  add_batch_error(bctl, GRPC_ERROR_REF(error), false);
   if (error == GRPC_ERROR_NONE) {
     grpc_metadata_batch* md =
         &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
-    recv_initial_filter(exec_ctx, call, md);
+    recv_initial_filter(call, md);
 
     /* TODO(ctiller): this could be moved into recv_initial_filter now */
     GPR_TIMER_BEGIN("validate_filtered_metadata", 0);
-    validate_filtered_metadata(exec_ctx, bctl);
+    validate_filtered_metadata(bctl);
     GPR_TIMER_END("validate_filtered_metadata", 0);
 
     if (md->deadline != GRPC_MILLIS_INF_FUTURE && !call->is_client) {
@@ -1650,28 +1614,25 @@
     }
   }
   if (saved_rsr_closure != nullptr) {
-    GRPC_CLOSURE_RUN(exec_ctx, saved_rsr_closure, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_RUN(saved_rsr_closure, GRPC_ERROR_REF(error));
   }
 
-  finish_batch_step(exec_ctx, bctl);
+  finish_batch_step(bctl);
 }
 
-static void finish_batch(grpc_exec_ctx* exec_ctx, void* bctlp,
-                         grpc_error* error) {
+static void finish_batch(void* bctlp, grpc_error* error) {
   batch_control* bctl = (batch_control*)bctlp;
   grpc_call* call = bctl->call;
-  GRPC_CALL_COMBINER_STOP(exec_ctx, &call->call_combiner, "on_complete");
-  add_batch_error(exec_ctx, bctl, GRPC_ERROR_REF(error), false);
-  finish_batch_step(exec_ctx, bctl);
+  GRPC_CALL_COMBINER_STOP(&call->call_combiner, "on_complete");
+  add_batch_error(bctl, GRPC_ERROR_REF(error), false);
+  finish_batch_step(bctl);
 }
 
-static void free_no_op_completion(grpc_exec_ctx* exec_ctx, void* p,
-                                  grpc_cq_completion* completion) {
+static void free_no_op_completion(void* p, grpc_cq_completion* completion) {
   gpr_free(completion);
 }
 
-static grpc_call_error call_start_batch(grpc_exec_ctx* exec_ctx,
-                                        grpc_call* call, const grpc_op* ops,
+static grpc_call_error call_start_batch(grpc_call* call, const grpc_op* ops,
                                         size_t nops, void* notify_tag,
                                         int is_notify_tag_closure) {
   size_t i;
@@ -1689,11 +1650,10 @@
     if (!is_notify_tag_closure) {
       GPR_ASSERT(grpc_cq_begin_op(call->cq, notify_tag));
       grpc_cq_end_op(
-          exec_ctx, call->cq, notify_tag, GRPC_ERROR_NONE,
-          free_no_op_completion, nullptr,
+          call->cq, notify_tag, GRPC_ERROR_NONE, free_no_op_completion, nullptr,
           (grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
     } else {
-      GRPC_CLOSURE_SCHED(exec_ctx, (grpc_closure*)notify_tag, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED((grpc_closure*)notify_tag, GRPC_ERROR_NONE);
     }
     error = GRPC_CALL_OK;
     goto done;
@@ -1793,7 +1753,7 @@
         stream_op->send_initial_metadata = true;
         call->sent_initial_metadata = true;
         if (!prepare_application_metadata(
-                exec_ctx, call, (int)op->data.send_initial_metadata.count,
+                call, (int)op->data.send_initial_metadata.count,
                 op->data.send_initial_metadata.metadata, 0, call->is_client,
                 &call->compression_md, (int)additional_metadata_count)) {
           error = GRPC_CALL_ERROR_INVALID_METADATA;
@@ -1887,16 +1847,17 @@
         GPR_ASSERT(call->send_extra_metadata_count == 0);
         call->send_extra_metadata_count = 1;
         call->send_extra_metadata[0].md = grpc_channel_get_reffed_status_elem(
-            exec_ctx, call->channel, op->data.send_status_from_server.status);
+            call->channel, op->data.send_status_from_server.status);
         {
           grpc_error* override_error = GRPC_ERROR_NONE;
           if (op->data.send_status_from_server.status != GRPC_STATUS_OK) {
-            override_error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(
-                "Error from server send status");
+            override_error =
+                error_from_status(op->data.send_status_from_server.status,
+                                  "Returned non-ok status");
           }
           if (op->data.send_status_from_server.status_details != nullptr) {
             call->send_extra_metadata[1].md = grpc_mdelem_from_slices(
-                exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+                GRPC_MDSTR_GRPC_MESSAGE,
                 grpc_slice_ref_internal(
                     *op->data.send_status_from_server.status_details));
             call->send_extra_metadata_count++;
@@ -1907,16 +1868,15 @@
                                    grpc_slice_from_copied_string(msg));
             gpr_free(msg);
           }
-          set_status_from_error(exec_ctx, call, STATUS_FROM_API_OVERRIDE,
-                                override_error);
+          set_status_from_error(call, STATUS_FROM_API_OVERRIDE, override_error);
         }
         if (!prepare_application_metadata(
-                exec_ctx, call,
+                call,
                 (int)op->data.send_status_from_server.trailing_metadata_count,
                 op->data.send_status_from_server.trailing_metadata, 1, 1,
                 nullptr, 0)) {
           for (int n = 0; n < call->send_extra_metadata_count; n++) {
-            GRPC_MDELEM_UNREF(exec_ctx, call->send_extra_metadata[n].md);
+            GRPC_MDELEM_UNREF(call->send_extra_metadata[n].md);
           }
           call->send_extra_metadata_count = 0;
           error = GRPC_CALL_ERROR_INVALID_METADATA;
@@ -2045,7 +2005,7 @@
   stream_op->on_complete = &bctl->finish_batch;
   gpr_atm_rel_store(&call->any_ops_sent_atm, 1);
 
-  execute_batch(exec_ctx, call, stream_op, &bctl->start_batch);
+  execute_batch(call, stream_op, &bctl->start_batch);
 
 done:
   GPR_TIMER_END("grpc_call_start_batch", 0);
@@ -2055,15 +2015,15 @@
   /* reverse any mutations that occured */
   if (stream_op->send_initial_metadata) {
     call->sent_initial_metadata = false;
-    grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][0]);
+    grpc_metadata_batch_clear(&call->metadata_batch[0][0]);
   }
   if (stream_op->send_message) {
     call->sending_message = false;
-    grpc_byte_stream_destroy(exec_ctx, &call->sending_stream.base);
+    grpc_byte_stream_destroy(&call->sending_stream.base);
   }
   if (stream_op->send_trailing_metadata) {
     call->sent_final_op = false;
-    grpc_metadata_batch_clear(exec_ctx, &call->metadata_batch[0][1]);
+    grpc_metadata_batch_clear(&call->metadata_batch[0][1]);
   }
   if (stream_op->recv_initial_metadata) {
     call->received_initial_metadata = false;
@@ -2079,7 +2039,7 @@
 
 grpc_call_error grpc_call_start_batch(grpc_call* call, const grpc_op* ops,
                                       size_t nops, void* tag, void* reserved) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_call_error err;
 
   GRPC_API_TRACE(
@@ -2090,19 +2050,17 @@
   if (reserved != nullptr) {
     err = GRPC_CALL_ERROR;
   } else {
-    err = call_start_batch(&exec_ctx, call, ops, nops, tag, 0);
+    err = call_start_batch(call, ops, nops, tag, 0);
   }
 
-  grpc_exec_ctx_finish(&exec_ctx);
   return err;
 }
 
-grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx* exec_ctx,
-                                                  grpc_call* call,
+grpc_call_error grpc_call_start_batch_and_execute(grpc_call* call,
                                                   const grpc_op* ops,
                                                   size_t nops,
                                                   grpc_closure* closure) {
-  return call_start_batch(exec_ctx, call, ops, nops, closure, 1);
+  return call_start_batch(call, ops, nops, closure, 1);
 }
 
 void grpc_call_context_set(grpc_call* call, grpc_context_index elem,
diff --git a/src/core/lib/surface/call.h b/src/core/lib/surface/call.h
index 07c4e48..189329c 100644
--- a/src/core/lib/surface/call.h
+++ b/src/core/lib/surface/call.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_CORE_LIB_SURFACE_CALL_H
 #define GRPC_CORE_LIB_SURFACE_CALL_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/channel/context.h"
 #include "src/core/lib/surface/api_trace.h"
@@ -30,8 +26,7 @@
 #include <grpc/grpc.h>
 #include <grpc/impl/codegen/compression_types.h>
 
-typedef void (*grpc_ioreq_completion_func)(grpc_exec_ctx* exec_ctx,
-                                           grpc_call* call, int success,
+typedef void (*grpc_ioreq_completion_func)(grpc_call* call, int success,
                                            void* user_data);
 
 typedef struct grpc_call_create_args {
@@ -55,33 +50,28 @@
 /* Create a new call based on \a args.
    Regardless of success or failure, always returns a valid new call into *call
    */
-grpc_error* grpc_call_create(grpc_exec_ctx* exec_ctx,
-                             const grpc_call_create_args* args,
+grpc_error* grpc_call_create(const grpc_call_create_args* args,
                              grpc_call** call);
 
-void grpc_call_set_completion_queue(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                                    grpc_completion_queue* cq);
+void grpc_call_set_completion_queue(grpc_call* call, grpc_completion_queue* cq);
 
 #ifndef NDEBUG
 void grpc_call_internal_ref(grpc_call* call, const char* reason);
-void grpc_call_internal_unref(grpc_exec_ctx* exec_ctx, grpc_call* call,
-                              const char* reason);
+void grpc_call_internal_unref(grpc_call* call, const char* reason);
 #define GRPC_CALL_INTERNAL_REF(call, reason) \
   grpc_call_internal_ref(call, reason)
-#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
-  grpc_call_internal_unref(exec_ctx, call, reason)
+#define GRPC_CALL_INTERNAL_UNREF(call, reason) \
+  grpc_call_internal_unref(call, reason)
 #else
 void grpc_call_internal_ref(grpc_call* call);
-void grpc_call_internal_unref(grpc_exec_ctx* exec_ctx, grpc_call* call);
+void grpc_call_internal_unref(grpc_call* call);
 #define GRPC_CALL_INTERNAL_REF(call, reason) grpc_call_internal_ref(call)
-#define GRPC_CALL_INTERNAL_UNREF(exec_ctx, call, reason) \
-  grpc_call_internal_unref(exec_ctx, call)
+#define GRPC_CALL_INTERNAL_UNREF(call, reason) grpc_call_internal_unref(call)
 #endif
 
 grpc_call_stack* grpc_call_get_call_stack(grpc_call* call);
 
-grpc_call_error grpc_call_start_batch_and_execute(grpc_exec_ctx* exec_ctx,
-                                                  grpc_call* call,
+grpc_call_error grpc_call_start_batch_and_execute(grpc_call* call,
                                                   const grpc_op* ops,
                                                   size_t nops,
                                                   grpc_closure* closure);
@@ -114,8 +104,4 @@
 extern grpc_core::TraceFlag grpc_call_error_trace;
 extern grpc_core::TraceFlag grpc_compression_trace;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_CALL_H */
diff --git a/src/core/lib/surface/call_details.cc b/src/core/lib/surface/call_details.cc
index ea9208c..cd0b145 100644
--- a/src/core/lib/surface/call_details.cc
+++ b/src/core/lib/surface/call_details.cc
@@ -34,8 +34,7 @@
 
 void grpc_call_details_destroy(grpc_call_details* cd) {
   GRPC_API_TRACE("grpc_call_details_destroy(cd=%p)", 1, (cd));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_unref_internal(&exec_ctx, cd->method);
-  grpc_slice_unref_internal(&exec_ctx, cd->host);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_unref_internal(cd->method);
+  grpc_slice_unref_internal(cd->host);
 }
diff --git a/src/core/lib/surface/call_test_only.h b/src/core/lib/surface/call_test_only.h
index 2ff4a48..90444f8 100644
--- a/src/core/lib/surface/call_test_only.h
+++ b/src/core/lib/surface/call_test_only.h
@@ -21,10 +21,6 @@
 
 #include <grpc/grpc.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /** Return the compression algorithm from \a call.
  *
  * \warning This function should \b only be used in test code. */
@@ -54,8 +50,4 @@
 grpc_stream_compression_algorithm
 grpc_call_test_only_get_incoming_stream_encodings(grpc_call* call);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_CALL_TEST_ONLY_H */
diff --git a/src/core/lib/surface/channel.cc b/src/core/lib/surface/channel.cc
index 1be734c..cf5e8c2 100644
--- a/src/core/lib/surface/channel.cc
+++ b/src/core/lib/surface/channel.cc
@@ -69,23 +69,22 @@
 #define CHANNEL_FROM_TOP_ELEM(top_elem) \
   CHANNEL_FROM_CHANNEL_STACK(grpc_channel_stack_from_top_element(top_elem))
 
-static void destroy_channel(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error);
+static void destroy_channel(void* arg, grpc_error* error);
 
 grpc_channel* grpc_channel_create_with_builder(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
+    grpc_channel_stack_builder* builder,
     grpc_channel_stack_type channel_stack_type) {
   char* target = gpr_strdup(grpc_channel_stack_builder_get_target(builder));
   grpc_channel_args* args = grpc_channel_args_copy(
       grpc_channel_stack_builder_get_channel_arguments(builder));
   grpc_channel* channel;
   if (channel_stack_type == GRPC_SERVER_CHANNEL) {
-    GRPC_STATS_INC_SERVER_CHANNELS_CREATED(exec_ctx);
+    GRPC_STATS_INC_SERVER_CHANNELS_CREATED();
   } else {
-    GRPC_STATS_INC_CLIENT_CHANNELS_CREATED(exec_ctx);
+    GRPC_STATS_INC_CLIENT_CHANNELS_CREATED();
   }
   grpc_error* error = grpc_channel_stack_builder_finish(
-      exec_ctx, builder, sizeof(grpc_channel), 1, destroy_channel, nullptr,
+      builder, sizeof(grpc_channel), 1, destroy_channel, nullptr,
       (void**)&channel);
   if (error != GRPC_ERROR_NONE) {
     gpr_log(GPR_ERROR, "channel stack builder failed: %s",
@@ -114,10 +113,10 @@
       } else {
         if (!GRPC_MDISNULL(channel->default_authority)) {
           /* setting this takes precedence over anything else */
-          GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority);
+          GRPC_MDELEM_UNREF(channel->default_authority);
         }
         channel->default_authority = grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_AUTHORITY,
+            GRPC_MDSTR_AUTHORITY,
             grpc_slice_intern(
                 grpc_slice_from_static_string(args->args[i].value.string)));
       }
@@ -134,7 +133,7 @@
                   GRPC_SSL_TARGET_NAME_OVERRIDE_ARG);
         } else {
           channel->default_authority = grpc_mdelem_from_slices(
-              exec_ctx, GRPC_MDSTR_AUTHORITY,
+              GRPC_MDSTR_AUTHORITY,
               grpc_slice_intern(
                   grpc_slice_from_static_string(args->args[i].value.string)));
         }
@@ -191,25 +190,23 @@
   }
 
 done:
-  grpc_channel_args_destroy(exec_ctx, args);
+  grpc_channel_args_destroy(args);
   return channel;
 }
 
-grpc_channel* grpc_channel_create(grpc_exec_ctx* exec_ctx, const char* target,
+grpc_channel* grpc_channel_create(const char* target,
                                   const grpc_channel_args* input_args,
                                   grpc_channel_stack_type channel_stack_type,
                                   grpc_transport* optional_transport) {
   grpc_channel_stack_builder* builder = grpc_channel_stack_builder_create();
-  grpc_channel_stack_builder_set_channel_arguments(exec_ctx, builder,
-                                                   input_args);
+  grpc_channel_stack_builder_set_channel_arguments(builder, input_args);
   grpc_channel_stack_builder_set_target(builder, target);
   grpc_channel_stack_builder_set_transport(builder, optional_transport);
-  if (!grpc_channel_init_create_stack(exec_ctx, builder, channel_stack_type)) {
-    grpc_channel_stack_builder_destroy(exec_ctx, builder);
+  if (!grpc_channel_init_create_stack(builder, channel_stack_type)) {
+    grpc_channel_stack_builder_destroy(builder);
     return nullptr;
   }
-  return grpc_channel_create_with_builder(exec_ctx, builder,
-                                          channel_stack_type);
+  return grpc_channel_create_with_builder(builder, channel_stack_type);
 }
 
 size_t grpc_channel_get_call_size_estimate(grpc_channel* channel) {
@@ -251,18 +248,17 @@
 
 void grpc_channel_get_info(grpc_channel* channel,
                            const grpc_channel_info* channel_info) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_element* elem =
       grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
-  elem->filter->get_channel_info(&exec_ctx, elem, channel_info);
-  grpc_exec_ctx_finish(&exec_ctx);
+  elem->filter->get_channel_info(elem, channel_info);
 }
 
 static grpc_call* grpc_channel_create_call_internal(
-    grpc_exec_ctx* exec_ctx, grpc_channel* channel, grpc_call* parent_call,
-    uint32_t propagation_mask, grpc_completion_queue* cq,
-    grpc_pollset_set* pollset_set_alternative, grpc_mdelem path_mdelem,
-    grpc_mdelem authority_mdelem, grpc_millis deadline) {
+    grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
+    grpc_completion_queue* cq, grpc_pollset_set* pollset_set_alternative,
+    grpc_mdelem path_mdelem, grpc_mdelem authority_mdelem,
+    grpc_millis deadline) {
   grpc_mdelem send_metadata[2];
   size_t num_metadata = 0;
 
@@ -289,7 +285,7 @@
   args.send_deadline = deadline;
 
   grpc_call* call;
-  GRPC_LOG_IF_ERROR("call_create", grpc_call_create(exec_ctx, &args, &call));
+  GRPC_LOG_IF_ERROR("call_create", grpc_call_create(&args, &call));
   return call;
 }
 
@@ -300,29 +296,27 @@
                                     grpc_slice method, const grpc_slice* host,
                                     gpr_timespec deadline, void* reserved) {
   GPR_ASSERT(!reserved);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_call* call = grpc_channel_create_call_internal(
-      &exec_ctx, channel, parent_call, propagation_mask, cq, nullptr,
-      grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_PATH,
-                              grpc_slice_ref_internal(method)),
-      host != nullptr ? grpc_mdelem_from_slices(&exec_ctx, GRPC_MDSTR_AUTHORITY,
+      channel, parent_call, propagation_mask, cq, nullptr,
+      grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)),
+      host != nullptr ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
                                                 grpc_slice_ref_internal(*host))
                       : GRPC_MDNULL,
       grpc_timespec_to_millis_round_up(deadline));
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return call;
 }
 
 grpc_call* grpc_channel_create_pollset_set_call(
-    grpc_exec_ctx* exec_ctx, grpc_channel* channel, grpc_call* parent_call,
-    uint32_t propagation_mask, grpc_pollset_set* pollset_set, grpc_slice method,
-    const grpc_slice* host, grpc_millis deadline, void* reserved) {
+    grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
+    grpc_pollset_set* pollset_set, grpc_slice method, const grpc_slice* host,
+    grpc_millis deadline, void* reserved) {
   GPR_ASSERT(!reserved);
   return grpc_channel_create_call_internal(
-      exec_ctx, channel, parent_call, propagation_mask, nullptr, pollset_set,
-      grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH,
-                              grpc_slice_ref_internal(method)),
-      host != nullptr ? grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY,
+      channel, parent_call, propagation_mask, nullptr, pollset_set,
+      grpc_mdelem_from_slices(GRPC_MDSTR_PATH, grpc_slice_ref_internal(method)),
+      host != nullptr ? grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
                                                 grpc_slice_ref_internal(*host))
                       : GRPC_MDNULL,
       deadline);
@@ -335,21 +329,21 @@
       "grpc_channel_register_call(channel=%p, method=%s, host=%s, reserved=%p)",
       4, (channel, method, host, reserved));
   GPR_ASSERT(!reserved);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   rc->path = grpc_mdelem_from_slices(
-      &exec_ctx, GRPC_MDSTR_PATH,
+      GRPC_MDSTR_PATH,
       grpc_slice_intern(grpc_slice_from_static_string(method)));
   rc->authority =
       host ? grpc_mdelem_from_slices(
-                 &exec_ctx, GRPC_MDSTR_AUTHORITY,
+                 GRPC_MDSTR_AUTHORITY,
                  grpc_slice_intern(grpc_slice_from_static_string(host)))
            : GRPC_MDNULL;
   gpr_mu_lock(&channel->registered_call_mu);
   rc->next = channel->registered_calls;
   channel->registered_calls = rc;
   gpr_mu_unlock(&channel->registered_call_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return rc;
 }
 
@@ -370,12 +364,12 @@
        registered_call_handle, deadline.tv_sec, deadline.tv_nsec,
        (int)deadline.clock_type, reserved));
   GPR_ASSERT(!reserved);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_call* call = grpc_channel_create_call_internal(
-      &exec_ctx, channel, parent_call, propagation_mask, completion_queue,
-      nullptr, GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
+      channel, parent_call, propagation_mask, completion_queue, nullptr,
+      GRPC_MDELEM_REF(rc->path), GRPC_MDELEM_REF(rc->authority),
       grpc_timespec_to_millis_round_up(deadline));
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return call;
 }
 
@@ -390,23 +384,21 @@
   GRPC_CHANNEL_STACK_REF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
 }
 
-void grpc_channel_internal_unref(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel* c REF_ARG) {
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
+void grpc_channel_internal_unref(grpc_channel* c REF_ARG) {
+  GRPC_CHANNEL_STACK_UNREF(CHANNEL_STACK_FROM_CHANNEL(c), REF_REASON);
 }
 
-static void destroy_channel(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void destroy_channel(void* arg, grpc_error* error) {
   grpc_channel* channel = (grpc_channel*)arg;
-  grpc_channel_stack_destroy(exec_ctx, CHANNEL_STACK_FROM_CHANNEL(channel));
+  grpc_channel_stack_destroy(CHANNEL_STACK_FROM_CHANNEL(channel));
   while (channel->registered_calls) {
     registered_call* rc = channel->registered_calls;
     channel->registered_calls = rc->next;
-    GRPC_MDELEM_UNREF(exec_ctx, rc->path);
-    GRPC_MDELEM_UNREF(exec_ctx, rc->authority);
+    GRPC_MDELEM_UNREF(rc->path);
+    GRPC_MDELEM_UNREF(rc->authority);
     gpr_free(rc);
   }
-  GRPC_MDELEM_UNREF(exec_ctx, channel->default_authority);
+  GRPC_MDELEM_UNREF(channel->default_authority);
   gpr_mu_destroy(&channel->registered_call_mu);
   gpr_free(channel->target);
   gpr_free(channel);
@@ -415,16 +407,14 @@
 void grpc_channel_destroy(grpc_channel* channel) {
   grpc_transport_op* op = grpc_make_transport_op(nullptr);
   grpc_channel_element* elem;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GRPC_API_TRACE("grpc_channel_destroy(channel=%p)", 1, (channel));
   op->disconnect_with_error =
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel Destroyed");
   elem = grpc_channel_stack_element(CHANNEL_STACK_FROM_CHANNEL(channel), 0);
-  elem->filter->start_transport_op(&exec_ctx, elem, op);
+  elem->filter->start_transport_op(elem, op);
 
-  GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, channel, "channel");
-
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CHANNEL_INTERNAL_UNREF(channel, "channel");
 }
 
 grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel) {
@@ -436,8 +426,7 @@
   return channel->compression_options;
 }
 
-grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx* exec_ctx,
-                                                grpc_channel* channel, int i) {
+grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel* channel, int i) {
   char tmp[GPR_LTOA_MIN_BUFSIZE];
   switch (i) {
     case 0:
@@ -448,6 +437,6 @@
       return GRPC_MDELEM_GRPC_STATUS_2;
   }
   gpr_ltoa(i, tmp);
-  return grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_GRPC_STATUS,
+  return grpc_mdelem_from_slices(GRPC_MDSTR_GRPC_STATUS,
                                  grpc_slice_from_copied_string(tmp));
 }
diff --git a/src/core/lib/surface/channel.h b/src/core/lib/surface/channel.h
index 063e685..26d8fce 100644
--- a/src/core/lib/surface/channel.h
+++ b/src/core/lib/surface/channel.h
@@ -23,17 +23,13 @@
 #include "src/core/lib/channel/channel_stack_builder.h"
 #include "src/core/lib/surface/channel_stack_type.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-grpc_channel* grpc_channel_create(grpc_exec_ctx* exec_ctx, const char* target,
+grpc_channel* grpc_channel_create(const char* target,
                                   const grpc_channel_args* args,
                                   grpc_channel_stack_type channel_stack_type,
                                   grpc_transport* optional_transport);
 
 grpc_channel* grpc_channel_create_with_builder(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder,
+    grpc_channel_stack_builder* builder,
     grpc_channel_stack_type channel_stack_type);
 
 /** Create a call given a grpc_channel, in order to call \a method.
@@ -45,9 +41,9 @@
     properties from the server call to this new client call, depending on the
     value of \a propagation_mask (see propagation_bits.h for possible values) */
 grpc_call* grpc_channel_create_pollset_set_call(
-    grpc_exec_ctx* exec_ctx, grpc_channel* channel, grpc_call* parent_call,
-    uint32_t propagation_mask, grpc_pollset_set* pollset_set, grpc_slice method,
-    const grpc_slice* host, grpc_millis deadline, void* reserved);
+    grpc_channel* channel, grpc_call* parent_call, uint32_t propagation_mask,
+    grpc_pollset_set* pollset_set, grpc_slice method, const grpc_slice* host,
+    grpc_millis deadline, void* reserved);
 
 /** Get a (borrowed) pointer to this channels underlying channel stack */
 grpc_channel_stack* grpc_channel_get_channel_stack(grpc_channel* channel);
@@ -56,8 +52,7 @@
     status_code.
 
     The returned elem is owned by the caller. */
-grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_exec_ctx* exec_ctx,
-                                                grpc_channel* channel,
+grpc_mdelem grpc_channel_get_reffed_status_elem(grpc_channel* channel,
                                                 int status_code);
 
 size_t grpc_channel_get_call_size_estimate(grpc_channel* channel);
@@ -65,28 +60,22 @@
 
 #ifndef NDEBUG
 void grpc_channel_internal_ref(grpc_channel* channel, const char* reason);
-void grpc_channel_internal_unref(grpc_exec_ctx* exec_ctx, grpc_channel* channel,
-                                 const char* reason);
+void grpc_channel_internal_unref(grpc_channel* channel, const char* reason);
 #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
   grpc_channel_internal_ref(channel, reason)
-#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
-  grpc_channel_internal_unref(exec_ctx, channel, reason)
+#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
+  grpc_channel_internal_unref(channel, reason)
 #else
 void grpc_channel_internal_ref(grpc_channel* channel);
-void grpc_channel_internal_unref(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel* channel);
+void grpc_channel_internal_unref(grpc_channel* channel);
 #define GRPC_CHANNEL_INTERNAL_REF(channel, reason) \
   grpc_channel_internal_ref(channel)
-#define GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, channel, reason) \
-  grpc_channel_internal_unref(exec_ctx, channel)
+#define GRPC_CHANNEL_INTERNAL_UNREF(channel, reason) \
+  grpc_channel_internal_unref(channel)
 #endif
 
 /** Return the channel's compression options. */
 grpc_compression_options grpc_channel_compression_options(
     const grpc_channel* channel);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_H */
diff --git a/src/core/lib/surface/channel_init.cc b/src/core/lib/surface/channel_init.cc
index b563537..95cbbbd 100644
--- a/src/core/lib/surface/channel_init.cc
+++ b/src/core/lib/surface/channel_init.cc
@@ -89,8 +89,7 @@
   }
 }
 
-bool grpc_channel_init_create_stack(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_stack_builder* builder,
+bool grpc_channel_init_create_stack(grpc_channel_stack_builder* builder,
                                     grpc_channel_stack_type type) {
   GPR_ASSERT(g_finalized);
 
@@ -99,7 +98,7 @@
 
   for (size_t i = 0; i < g_slots[type].num_slots; i++) {
     const stage_slot* slot = &g_slots[type].slots[i];
-    if (!slot->fn(exec_ctx, builder, slot->arg)) {
+    if (!slot->fn(builder, slot->arg)) {
       return false;
     }
   }
diff --git a/src/core/lib/surface/channel_init.h b/src/core/lib/surface/channel_init.h
index 9932781..d702f0f 100644
--- a/src/core/lib/surface/channel_init.h
+++ b/src/core/lib/surface/channel_init.h
@@ -25,10 +25,6 @@
 
 #define GRPC_CHANNEL_INIT_BUILTIN_PRIORITY 10000
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// This module provides a way for plugins (and the grpc core library itself)
 /// to register mutators for channel stacks.
 /// It also provides a universal entry path to run those mutators to build
@@ -36,8 +32,7 @@
 
 /// One stage of mutation: call functions against \a builder to influence the
 /// finally constructed channel stack
-typedef bool (*grpc_channel_init_stage)(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_stack_builder* builder,
+typedef bool (*grpc_channel_init_stage)(grpc_channel_stack_builder* builder,
                                         void* arg);
 
 /// Global initialization of the system
@@ -70,12 +65,7 @@
 /// \a optional_transport is either NULL or a constructed transport object
 /// Returns a pointer to the base of the memory allocated (the actual channel
 /// stack object will be prefix_bytes past that pointer)
-bool grpc_channel_init_create_stack(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_stack_builder* builder,
+bool grpc_channel_init_create_stack(grpc_channel_stack_builder* builder,
                                     grpc_channel_stack_type type);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_INIT_H */
diff --git a/src/core/lib/surface/channel_ping.cc b/src/core/lib/surface/channel_ping.cc
index e8f47f0..a030d8d 100644
--- a/src/core/lib/surface/channel_ping.cc
+++ b/src/core/lib/surface/channel_ping.cc
@@ -33,15 +33,14 @@
   grpc_cq_completion completion_storage;
 } ping_result;
 
-static void ping_destroy(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_cq_completion* storage) {
+static void ping_destroy(void* arg, grpc_cq_completion* storage) {
   gpr_free(arg);
 }
 
-static void ping_done(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void ping_done(void* arg, grpc_error* error) {
   ping_result* pr = (ping_result*)arg;
-  grpc_cq_end_op(exec_ctx, pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy,
-                 pr, &pr->completion_storage);
+  grpc_cq_end_op(pr->cq, pr->tag, GRPC_ERROR_REF(error), ping_destroy, pr,
+                 &pr->completion_storage);
 }
 
 void grpc_channel_ping(grpc_channel* channel, grpc_completion_queue* cq,
@@ -52,14 +51,13 @@
   ping_result* pr = (ping_result*)gpr_malloc(sizeof(*pr));
   grpc_channel_element* top_elem =
       grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GPR_ASSERT(reserved == nullptr);
   pr->tag = tag;
   pr->cq = cq;
   GRPC_CLOSURE_INIT(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
-  op->send_ping = &pr->closure;
+  op->send_ping.on_ack = &pr->closure;
   op->bind_pollset = grpc_cq_pollset(cq);
   GPR_ASSERT(grpc_cq_begin_op(cq, tag));
-  top_elem->filter->start_transport_op(&exec_ctx, top_elem, op);
-  grpc_exec_ctx_finish(&exec_ctx);
+  top_elem->filter->start_transport_op(top_elem, op);
 }
diff --git a/src/core/lib/surface/channel_stack_type.h b/src/core/lib/surface/channel_stack_type.h
index feecd3a..52f85a6 100644
--- a/src/core/lib/surface/channel_stack_type.h
+++ b/src/core/lib/surface/channel_stack_type.h
@@ -21,10 +21,6 @@
 
 #include <stdbool.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef enum {
   // normal top-half client channel with load-balancing, connection management
   GRPC_CLIENT_CHANNEL,
@@ -46,8 +42,4 @@
 
 const char* grpc_channel_stack_type_string(grpc_channel_stack_type type);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_CHANNEL_STACK_TYPE_H */
diff --git a/src/core/lib/surface/completion_queue.cc b/src/core/lib/surface/completion_queue.cc
index 98d7e35..aa5808d 100644
--- a/src/core/lib/surface/completion_queue.cc
+++ b/src/core/lib/surface/completion_queue.cc
@@ -62,13 +62,12 @@
   bool can_listen;
   size_t (*size)(void);
   void (*init)(grpc_pollset* pollset, gpr_mu** mu);
-  grpc_error* (*kick)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+  grpc_error* (*kick)(grpc_pollset* pollset,
                       grpc_pollset_worker* specific_worker);
-  grpc_error* (*work)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                      grpc_pollset_worker** worker, grpc_millis deadline);
-  void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                   grpc_closure* closure);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset);
+  grpc_error* (*work)(grpc_pollset* pollset, grpc_pollset_worker** worker,
+                      grpc_millis deadline);
+  void (*shutdown)(grpc_pollset* pollset, grpc_closure* closure);
+  void (*destroy)(grpc_pollset* pollset);
 } cq_poller_vtable;
 
 typedef struct non_polling_worker {
@@ -94,14 +93,12 @@
   *mu = &npp->mu;
 }
 
-static void non_polling_poller_destroy(grpc_exec_ctx* exec_ctx,
-                                       grpc_pollset* pollset) {
+static void non_polling_poller_destroy(grpc_pollset* pollset) {
   non_polling_poller* npp = (non_polling_poller*)pollset;
   gpr_mu_destroy(&npp->mu);
 }
 
-static grpc_error* non_polling_poller_work(grpc_exec_ctx* exec_ctx,
-                                           grpc_pollset* pollset,
+static grpc_error* non_polling_poller_work(grpc_pollset* pollset,
                                            grpc_pollset_worker** worker,
                                            grpc_millis deadline) {
   non_polling_poller* npp = (non_polling_poller*)pollset;
@@ -118,16 +115,16 @@
   }
   w.kicked = false;
   gpr_timespec deadline_ts =
-      grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME);
+      grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC);
   while (!npp->shutdown && !w.kicked &&
          !gpr_cv_wait(&w.cv, &npp->mu, deadline_ts))
     ;
-  grpc_exec_ctx_invalidate_now(exec_ctx);
+  grpc_core::ExecCtx::Get()->InvalidateNow();
   if (&w == npp->root) {
     npp->root = w.next;
     if (&w == npp->root) {
       if (npp->shutdown) {
-        GRPC_CLOSURE_SCHED(exec_ctx, npp->shutdown, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(npp->shutdown, GRPC_ERROR_NONE);
       }
       npp->root = nullptr;
     }
@@ -140,8 +137,7 @@
 }
 
 static grpc_error* non_polling_poller_kick(
-    grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-    grpc_pollset_worker* specific_worker) {
+    grpc_pollset* pollset, grpc_pollset_worker* specific_worker) {
   non_polling_poller* p = (non_polling_poller*)pollset;
   if (specific_worker == nullptr)
     specific_worker = (grpc_pollset_worker*)p->root;
@@ -155,14 +151,13 @@
   return GRPC_ERROR_NONE;
 }
 
-static void non_polling_poller_shutdown(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset* pollset,
+static void non_polling_poller_shutdown(grpc_pollset* pollset,
                                         grpc_closure* closure) {
   non_polling_poller* p = (non_polling_poller*)pollset;
   GPR_ASSERT(closure != nullptr);
   p->shutdown = closure;
   if (p->root == nullptr) {
-    GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
   } else {
     non_polling_worker* w = p->root;
     do {
@@ -189,13 +184,11 @@
   grpc_cq_completion_type cq_completion_type;
   size_t data_size;
   void (*init)(void* data);
-  void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq);
+  void (*shutdown)(grpc_completion_queue* cq);
   void (*destroy)(void* data);
   bool (*begin_op)(grpc_completion_queue* cq, void* tag);
-  void (*end_op)(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq, void* tag,
-                 grpc_error* error,
-                 void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
-                              grpc_cq_completion* storage),
+  void (*end_op)(grpc_completion_queue* cq, void* tag, grpc_error* error,
+                 void (*done)(void* done_arg, grpc_cq_completion* storage),
                  void* done_arg, grpc_cq_completion* storage);
   grpc_event (*next)(grpc_completion_queue* cq, gpr_timespec deadline,
                      void* reserved);
@@ -280,31 +273,23 @@
 };
 
 /* Forward declarations */
-static void cq_finish_shutdown_next(grpc_exec_ctx* exec_ctx,
-                                    grpc_completion_queue* cq);
-static void cq_finish_shutdown_pluck(grpc_exec_ctx* exec_ctx,
-                                     grpc_completion_queue* cq);
-static void cq_shutdown_next(grpc_exec_ctx* exec_ctx,
-                             grpc_completion_queue* cq);
-static void cq_shutdown_pluck(grpc_exec_ctx* exec_ctx,
-                              grpc_completion_queue* cq);
+static void cq_finish_shutdown_next(grpc_completion_queue* cq);
+static void cq_finish_shutdown_pluck(grpc_completion_queue* cq);
+static void cq_shutdown_next(grpc_completion_queue* cq);
+static void cq_shutdown_pluck(grpc_completion_queue* cq);
 
 static bool cq_begin_op_for_next(grpc_completion_queue* cq, void* tag);
 static bool cq_begin_op_for_pluck(grpc_completion_queue* cq, void* tag);
 
-static void cq_end_op_for_next(grpc_exec_ctx* exec_ctx,
-                               grpc_completion_queue* cq, void* tag,
+static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag,
                                grpc_error* error,
-                               void (*done)(grpc_exec_ctx* exec_ctx,
-                                            void* done_arg,
+                               void (*done)(void* done_arg,
                                             grpc_cq_completion* storage),
                                void* done_arg, grpc_cq_completion* storage);
 
-static void cq_end_op_for_pluck(grpc_exec_ctx* exec_ctx,
-                                grpc_completion_queue* cq, void* tag,
+static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag,
                                 grpc_error* error,
-                                void (*done)(grpc_exec_ctx* exec_ctx,
-                                             void* done_arg,
+                                void (*done)(void* done_arg,
                                              grpc_cq_completion* storage),
                                 void* done_arg, grpc_cq_completion* storage);
 
@@ -346,8 +331,7 @@
     gpr_free(_ev);                                                         \
   }
 
-static void on_pollset_shutdown_done(grpc_exec_ctx* exec_ctx, void* cq,
-                                     grpc_error* error);
+static void on_pollset_shutdown_done(void* cq, grpc_error* error);
 
 void grpc_cq_global_init() {
   gpr_tls_init(&g_cached_event);
@@ -369,19 +353,18 @@
   if (storage != nullptr &&
       (grpc_completion_queue*)gpr_tls_get(&g_cached_cq) == cq) {
     *tag = storage->tag;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     *ok = (storage->next & (uintptr_t)(1)) == 1;
-    storage->done(&exec_ctx, storage->done_arg, storage);
+    storage->done(storage->done_arg, storage);
     ret = 1;
     cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
     if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
       GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
       gpr_mu_lock(cq->mu);
-      cq_finish_shutdown_next(&exec_ctx, cq);
+      cq_finish_shutdown_next(cq);
       gpr_mu_unlock(cq->mu);
-      GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "shutting_down");
+      GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
     }
-    grpc_exec_ctx_finish(&exec_ctx);
   }
   gpr_tls_set(&g_cached_event, (intptr_t)0);
   gpr_tls_set(&g_cached_cq, (intptr_t)0);
@@ -406,24 +389,22 @@
 
 static grpc_cq_completion* cq_event_queue_pop(grpc_cq_event_queue* q) {
   grpc_cq_completion* c = nullptr;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   if (gpr_spinlock_trylock(&q->queue_lock)) {
-    GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES(&exec_ctx);
+    GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_SUCCESSES();
 
     bool is_empty = false;
     c = (grpc_cq_completion*)gpr_mpscq_pop_and_check_end(&q->queue, &is_empty);
     gpr_spinlock_unlock(&q->queue_lock);
 
     if (c == nullptr && !is_empty) {
-      GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES(&exec_ctx);
+      GRPC_STATS_INC_CQ_EV_QUEUE_TRANSIENT_POP_FAILURES();
     }
   } else {
-    GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES(&exec_ctx);
+    GRPC_STATS_INC_CQ_EV_QUEUE_TRYLOCK_FAILURES();
   }
 
-  grpc_exec_ctx_finish(&exec_ctx);
-
   if (c) {
     gpr_atm_no_barrier_fetch_add(&q->num_queue_items, -1);
   }
@@ -453,9 +434,8 @@
   const cq_poller_vtable* poller_vtable =
       &g_poller_vtable_by_poller_type[polling_type];
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_STATS_INC_CQS_CREATED(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_STATS_INC_CQS_CREATED();
 
   cq = (grpc_completion_queue*)gpr_zalloc(sizeof(grpc_completion_queue) +
                                           vtable->data_size +
@@ -537,15 +517,14 @@
   gpr_ref(&cq->owning_refs);
 }
 
-static void on_pollset_shutdown_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* error) {
+static void on_pollset_shutdown_done(void* arg, grpc_error* error) {
   grpc_completion_queue* cq = (grpc_completion_queue*)arg;
-  GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "pollset_destroy");
+  GRPC_CQ_INTERNAL_UNREF(cq, "pollset_destroy");
 }
 
 #ifndef NDEBUG
-void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq,
-                            const char* reason, const char* file, int line) {
+void grpc_cq_internal_unref(grpc_completion_queue* cq, const char* reason,
+                            const char* file, int line) {
   if (grpc_trace_cq_refcount.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&cq->owning_refs.count);
     gpr_log(file, line, GPR_LOG_SEVERITY_DEBUG,
@@ -553,12 +532,11 @@
             reason);
   }
 #else
-void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx,
-                            grpc_completion_queue* cq) {
+void grpc_cq_internal_unref(grpc_completion_queue* cq) {
 #endif
   if (gpr_unref(&cq->owning_refs)) {
     cq->vtable->destroy(DATA_FROM_CQ(cq));
-    cq->poller_vtable->destroy(exec_ctx, POLLSET_FROM_CQ(cq));
+    cq->poller_vtable->destroy(POLLSET_FROM_CQ(cq));
 #ifndef NDEBUG
     gpr_free(cq->outstanding_tags);
 #endif
@@ -639,11 +617,9 @@
 /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
  * completion
  * type of GRPC_CQ_NEXT) */
-static void cq_end_op_for_next(grpc_exec_ctx* exec_ctx,
-                               grpc_completion_queue* cq, void* tag,
+static void cq_end_op_for_next(grpc_completion_queue* cq, void* tag,
                                grpc_error* error,
-                               void (*done)(grpc_exec_ctx* exec_ctx,
-                                            void* done_arg,
+                               void (*done)(void* done_arg,
                                             grpc_cq_completion* storage),
                                void* done_arg, grpc_cq_completion* storage) {
   GPR_TIMER_BEGIN("cq_end_op_for_next", 0);
@@ -652,9 +628,9 @@
       (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE)) {
     const char* errmsg = grpc_error_string(error);
     GRPC_API_TRACE(
-        "cq_end_op_for_next(exec_ctx=%p, cq=%p, tag=%p, error=%s, "
+        "cq_end_op_for_next(cq=%p, tag=%p, error=%s, "
         "done=%p, done_arg=%p, storage=%p)",
-        7, (exec_ctx, cq, tag, errmsg, done, done_arg, storage));
+        6, (cq, tag, errmsg, done, done_arg, storage));
     if (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE) {
       gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
     }
@@ -689,7 +665,7 @@
       if (is_first) {
         gpr_mu_lock(cq->mu);
         grpc_error* kick_error =
-            cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), nullptr);
+            cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
         gpr_mu_unlock(cq->mu);
 
         if (kick_error != GRPC_ERROR_NONE) {
@@ -701,17 +677,17 @@
       if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
         GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
         gpr_mu_lock(cq->mu);
-        cq_finish_shutdown_next(exec_ctx, cq);
+        cq_finish_shutdown_next(cq);
         gpr_mu_unlock(cq->mu);
-        GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
+        GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
       }
     } else {
       GRPC_CQ_INTERNAL_REF(cq, "shutting_down");
       gpr_atm_rel_store(&cqd->pending_events, 0);
       gpr_mu_lock(cq->mu);
-      cq_finish_shutdown_next(exec_ctx, cq);
+      cq_finish_shutdown_next(cq);
       gpr_mu_unlock(cq->mu);
-      GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
+      GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
     }
   }
 
@@ -723,11 +699,9 @@
 /* Queue a GRPC_OP_COMPLETED operation to a completion queue (with a
  * completion
  * type of GRPC_CQ_PLUCK) */
-static void cq_end_op_for_pluck(grpc_exec_ctx* exec_ctx,
-                                grpc_completion_queue* cq, void* tag,
+static void cq_end_op_for_pluck(grpc_completion_queue* cq, void* tag,
                                 grpc_error* error,
-                                void (*done)(grpc_exec_ctx* exec_ctx,
-                                             void* done_arg,
+                                void (*done)(void* done_arg,
                                              grpc_cq_completion* storage),
                                 void* done_arg, grpc_cq_completion* storage) {
   cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
@@ -739,9 +713,9 @@
       (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE)) {
     const char* errmsg = grpc_error_string(error);
     GRPC_API_TRACE(
-        "cq_end_op_for_pluck(exec_ctx=%p, cq=%p, tag=%p, error=%s, "
+        "cq_end_op_for_pluck(cq=%p, tag=%p, error=%s, "
         "done=%p, done_arg=%p, storage=%p)",
-        7, (exec_ctx, cq, tag, errmsg, done, done_arg, storage));
+        6, (cq, tag, errmsg, done, done_arg, storage));
     if (grpc_trace_operation_failures.enabled() && error != GRPC_ERROR_NONE) {
       gpr_log(GPR_ERROR, "Operation failed: tag=%p, error=%s", tag, errmsg);
     }
@@ -762,7 +736,7 @@
   cqd->completed_tail = storage;
 
   if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
-    cq_finish_shutdown_pluck(exec_ctx, cq);
+    cq_finish_shutdown_pluck(cq);
     gpr_mu_unlock(cq->mu);
   } else {
     grpc_pollset_worker* pluck_worker = nullptr;
@@ -774,7 +748,7 @@
     }
 
     grpc_error* kick_error =
-        cq->poller_vtable->kick(exec_ctx, POLLSET_FROM_CQ(cq), pluck_worker);
+        cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), pluck_worker);
 
     gpr_mu_unlock(cq->mu);
 
@@ -791,12 +765,10 @@
   GRPC_ERROR_UNREF(error);
 }
 
-void grpc_cq_end_op(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cq,
-                    void* tag, grpc_error* error,
-                    void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
-                                 grpc_cq_completion* storage),
+void grpc_cq_end_op(grpc_completion_queue* cq, void* tag, grpc_error* error,
+                    void (*done)(void* done_arg, grpc_cq_completion* storage),
                     void* done_arg, grpc_cq_completion* storage) {
-  cq->vtable->end_op(exec_ctx, cq, tag, error, done, done_arg, storage);
+  cq->vtable->end_op(cq, tag, error, done, done_arg, storage);
 }
 
 typedef struct {
@@ -808,31 +780,40 @@
   bool first_loop;
 } cq_is_finished_arg;
 
-static bool cq_is_next_finished(grpc_exec_ctx* exec_ctx, void* arg) {
-  cq_is_finished_arg* a = (cq_is_finished_arg*)arg;
-  grpc_completion_queue* cq = a->cq;
-  cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
-  GPR_ASSERT(a->stolen_completion == nullptr);
+class ExecCtxNext : public grpc_core::ExecCtx {
+ public:
+  ExecCtxNext(void* arg) : ExecCtx(0), check_ready_to_finish_arg_(arg) {}
 
-  gpr_atm current_last_seen_things_queued_ever =
-      gpr_atm_no_barrier_load(&cqd->things_queued_ever);
+  bool CheckReadyToFinish() override {
+    cq_is_finished_arg* a = (cq_is_finished_arg*)check_ready_to_finish_arg_;
+    grpc_completion_queue* cq = a->cq;
+    cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
+    GPR_ASSERT(a->stolen_completion == nullptr);
 
-  if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
-    a->last_seen_things_queued_ever =
+    gpr_atm current_last_seen_things_queued_ever =
         gpr_atm_no_barrier_load(&cqd->things_queued_ever);
 
-    /* Pop a cq_completion from the queue. Returns NULL if the queue is empty
-     * might return NULL in some cases even if the queue is not empty; but
-     * that
-     * is ok and doesn't affect correctness. Might effect the tail latencies a
-     * bit) */
-    a->stolen_completion = cq_event_queue_pop(&cqd->queue);
-    if (a->stolen_completion != nullptr) {
-      return true;
+    if (current_last_seen_things_queued_ever !=
+        a->last_seen_things_queued_ever) {
+      a->last_seen_things_queued_ever =
+          gpr_atm_no_barrier_load(&cqd->things_queued_ever);
+
+      /* Pop a cq_completion from the queue. Returns NULL if the queue is empty
+       * might return NULL in some cases even if the queue is not empty; but
+       * that
+       * is ok and doesn't affect correctness. Might effect the tail latencies a
+       * bit) */
+      a->stolen_completion = cq_event_queue_pop(&cqd->queue);
+      if (a->stolen_completion != nullptr) {
+        return true;
+      }
     }
+    return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now();
   }
-  return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
-}
+
+ private:
+  void* check_ready_to_finish_arg_;
+};
 
 #ifndef NDEBUG
 static void dump_pending_tags(grpc_completion_queue* cq) {
@@ -887,8 +868,7 @@
       nullptr,
       nullptr,
       true};
-  grpc_exec_ctx exec_ctx =
-      GRPC_EXEC_CTX_INITIALIZER(0, cq_is_next_finished, &is_finished_arg);
+  ExecCtxNext exec_ctx(&is_finished_arg);
   for (;;) {
     grpc_millis iteration_deadline = deadline_millis;
 
@@ -898,7 +878,7 @@
       ret.type = GRPC_OP_COMPLETE;
       ret.success = c->next & 1u;
       ret.tag = c->tag;
-      c->done(&exec_ctx, c->done_arg, c);
+      c->done(c->done_arg, c);
       break;
     }
 
@@ -908,7 +888,7 @@
       ret.type = GRPC_OP_COMPLETE;
       ret.success = c->next & 1u;
       ret.tag = c->tag;
-      c->done(&exec_ctx, c->done_arg, c);
+      c->done(c->done_arg, c);
       break;
     } else {
       /* If c == NULL it means either the queue is empty OR in an transient
@@ -939,7 +919,7 @@
     }
 
     if (!is_finished_arg.first_loop &&
-        grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) {
+        grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
       memset(&ret, 0, sizeof(ret));
       ret.type = GRPC_QUEUE_TIMEOUT;
       dump_pending_tags(cq);
@@ -949,8 +929,8 @@
     /* The main polling work happens in grpc_pollset_work */
     gpr_mu_lock(cq->mu);
     cq->num_polls++;
-    grpc_error* err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
-                                              nullptr, iteration_deadline);
+    grpc_error* err = cq->poller_vtable->work(POLLSET_FROM_CQ(cq), nullptr,
+                                              iteration_deadline);
     gpr_mu_unlock(cq->mu);
 
     if (err != GRPC_ERROR_NONE) {
@@ -969,13 +949,13 @@
   if (cq_event_queue_num_items(&cqd->queue) > 0 &&
       gpr_atm_acq_load(&cqd->pending_events) > 0) {
     gpr_mu_lock(cq->mu);
-    cq->poller_vtable->kick(&exec_ctx, POLLSET_FROM_CQ(cq), nullptr);
+    cq->poller_vtable->kick(POLLSET_FROM_CQ(cq), nullptr);
     gpr_mu_unlock(cq->mu);
   }
 
   GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret);
-  GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "next");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CQ_INTERNAL_UNREF(cq, "next");
+
   GPR_ASSERT(is_finished_arg.stolen_completion == nullptr);
 
   GPR_TIMER_END("grpc_completion_queue_next", 0);
@@ -989,19 +969,16 @@
    - Must be called only once in completion queue's lifetime
    - grpc_completion_queue_shutdown() MUST have been called before calling
    this function */
-static void cq_finish_shutdown_next(grpc_exec_ctx* exec_ctx,
-                                    grpc_completion_queue* cq) {
+static void cq_finish_shutdown_next(grpc_completion_queue* cq) {
   cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
 
   GPR_ASSERT(cqd->shutdown_called);
   GPR_ASSERT(gpr_atm_no_barrier_load(&cqd->pending_events) == 0);
 
-  cq->poller_vtable->shutdown(exec_ctx, POLLSET_FROM_CQ(cq),
-                              &cq->pollset_shutdown_done);
+  cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
 }
 
-static void cq_shutdown_next(grpc_exec_ctx* exec_ctx,
-                             grpc_completion_queue* cq) {
+static void cq_shutdown_next(grpc_completion_queue* cq) {
   cq_next_data* cqd = (cq_next_data*)DATA_FROM_CQ(cq);
 
   /* Need an extra ref for cq here because:
@@ -1014,7 +991,7 @@
   gpr_mu_lock(cq->mu);
   if (cqd->shutdown_called) {
     gpr_mu_unlock(cq->mu);
-    GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
+    GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
     return;
   }
   cqd->shutdown_called = true;
@@ -1022,10 +999,10 @@
    * cq_begin_op_for_next and and cq_end_op_for_next functions which read/write
    * on this counter without necessarily holding a lock on cq */
   if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
-    cq_finish_shutdown_next(exec_ctx, cq);
+    cq_finish_shutdown_next(cq);
   }
   gpr_mu_unlock(cq->mu);
-  GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down");
+  GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down");
 }
 
 grpc_event grpc_completion_queue_next(grpc_completion_queue* cq,
@@ -1058,37 +1035,46 @@
   GPR_UNREACHABLE_CODE(return );
 }
 
-static bool cq_is_pluck_finished(grpc_exec_ctx* exec_ctx, void* arg) {
-  cq_is_finished_arg* a = (cq_is_finished_arg*)arg;
-  grpc_completion_queue* cq = a->cq;
-  cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
+class ExecCtxPluck : public grpc_core::ExecCtx {
+ public:
+  ExecCtxPluck(void* arg) : ExecCtx(0), check_ready_to_finish_arg_(arg) {}
 
-  GPR_ASSERT(a->stolen_completion == nullptr);
-  gpr_atm current_last_seen_things_queued_ever =
-      gpr_atm_no_barrier_load(&cqd->things_queued_ever);
-  if (current_last_seen_things_queued_ever != a->last_seen_things_queued_ever) {
-    gpr_mu_lock(cq->mu);
-    a->last_seen_things_queued_ever =
+  bool CheckReadyToFinish() override {
+    cq_is_finished_arg* a = (cq_is_finished_arg*)check_ready_to_finish_arg_;
+    grpc_completion_queue* cq = a->cq;
+    cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
+
+    GPR_ASSERT(a->stolen_completion == nullptr);
+    gpr_atm current_last_seen_things_queued_ever =
         gpr_atm_no_barrier_load(&cqd->things_queued_ever);
-    grpc_cq_completion* c;
-    grpc_cq_completion* prev = &cqd->completed_head;
-    while ((c = (grpc_cq_completion*)(prev->next & ~(uintptr_t)1)) !=
-           &cqd->completed_head) {
-      if (c->tag == a->tag) {
-        prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
-        if (c == cqd->completed_tail) {
-          cqd->completed_tail = prev;
+    if (current_last_seen_things_queued_ever !=
+        a->last_seen_things_queued_ever) {
+      gpr_mu_lock(cq->mu);
+      a->last_seen_things_queued_ever =
+          gpr_atm_no_barrier_load(&cqd->things_queued_ever);
+      grpc_cq_completion* c;
+      grpc_cq_completion* prev = &cqd->completed_head;
+      while ((c = (grpc_cq_completion*)(prev->next & ~(uintptr_t)1)) !=
+             &cqd->completed_head) {
+        if (c->tag == a->tag) {
+          prev->next = (prev->next & (uintptr_t)1) | (c->next & ~(uintptr_t)1);
+          if (c == cqd->completed_tail) {
+            cqd->completed_tail = prev;
+          }
+          gpr_mu_unlock(cq->mu);
+          a->stolen_completion = c;
+          return true;
         }
-        gpr_mu_unlock(cq->mu);
-        a->stolen_completion = c;
-        return true;
+        prev = c;
       }
-      prev = c;
+      gpr_mu_unlock(cq->mu);
     }
-    gpr_mu_unlock(cq->mu);
+    return !a->first_loop && a->deadline < grpc_core::ExecCtx::Get()->Now();
   }
-  return !a->first_loop && a->deadline < grpc_exec_ctx_now(exec_ctx);
-}
+
+ private:
+  void* check_ready_to_finish_arg_;
+};
 
 static grpc_event cq_pluck(grpc_completion_queue* cq, void* tag,
                            gpr_timespec deadline, void* reserved) {
@@ -1125,8 +1111,7 @@
       nullptr,
       tag,
       true};
-  grpc_exec_ctx exec_ctx =
-      GRPC_EXEC_CTX_INITIALIZER(0, cq_is_pluck_finished, &is_finished_arg);
+  ExecCtxPluck exec_ctx(&is_finished_arg);
   for (;;) {
     if (is_finished_arg.stolen_completion != nullptr) {
       gpr_mu_unlock(cq->mu);
@@ -1135,7 +1120,7 @@
       ret.type = GRPC_OP_COMPLETE;
       ret.success = c->next & 1u;
       ret.tag = c->tag;
-      c->done(&exec_ctx, c->done_arg, c);
+      c->done(c->done_arg, c);
       break;
     }
     prev = &cqd->completed_head;
@@ -1150,7 +1135,7 @@
         ret.type = GRPC_OP_COMPLETE;
         ret.success = c->next & 1u;
         ret.tag = c->tag;
-        c->done(&exec_ctx, c->done_arg, c);
+        c->done(c->done_arg, c);
         goto done;
       }
       prev = c;
@@ -1174,7 +1159,7 @@
       break;
     }
     if (!is_finished_arg.first_loop &&
-        grpc_exec_ctx_now(&exec_ctx) >= deadline_millis) {
+        grpc_core::ExecCtx::Get()->Now() >= deadline_millis) {
       del_plucker(cq, tag, &worker);
       gpr_mu_unlock(cq->mu);
       memset(&ret, 0, sizeof(ret));
@@ -1183,8 +1168,8 @@
       break;
     }
     cq->num_polls++;
-    grpc_error* err = cq->poller_vtable->work(&exec_ctx, POLLSET_FROM_CQ(cq),
-                                              &worker, deadline_millis);
+    grpc_error* err =
+        cq->poller_vtable->work(POLLSET_FROM_CQ(cq), &worker, deadline_millis);
     if (err != GRPC_ERROR_NONE) {
       del_plucker(cq, tag, &worker);
       gpr_mu_unlock(cq->mu);
@@ -1202,8 +1187,8 @@
   }
 done:
   GRPC_SURFACE_TRACE_RETURNED_EVENT(cq, &ret);
-  GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "pluck");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_CQ_INTERNAL_UNREF(cq, "pluck");
+
   GPR_ASSERT(is_finished_arg.stolen_completion == nullptr);
 
   GPR_TIMER_END("grpc_completion_queue_pluck", 0);
@@ -1216,22 +1201,19 @@
   return cq->vtable->pluck(cq, tag, deadline, reserved);
 }
 
-static void cq_finish_shutdown_pluck(grpc_exec_ctx* exec_ctx,
-                                     grpc_completion_queue* cq) {
+static void cq_finish_shutdown_pluck(grpc_completion_queue* cq) {
   cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
 
   GPR_ASSERT(cqd->shutdown_called);
   GPR_ASSERT(!gpr_atm_no_barrier_load(&cqd->shutdown));
   gpr_atm_no_barrier_store(&cqd->shutdown, 1);
 
-  cq->poller_vtable->shutdown(exec_ctx, POLLSET_FROM_CQ(cq),
-                              &cq->pollset_shutdown_done);
+  cq->poller_vtable->shutdown(POLLSET_FROM_CQ(cq), &cq->pollset_shutdown_done);
 }
 
 /* NOTE: This function is almost exactly identical to cq_shutdown_next() but
  * merging them is a bit tricky and probably not worth it */
-static void cq_shutdown_pluck(grpc_exec_ctx* exec_ctx,
-                              grpc_completion_queue* cq) {
+static void cq_shutdown_pluck(grpc_completion_queue* cq) {
   cq_pluck_data* cqd = (cq_pluck_data*)DATA_FROM_CQ(cq);
 
   /* Need an extra ref for cq here because:
@@ -1244,25 +1226,25 @@
   gpr_mu_lock(cq->mu);
   if (cqd->shutdown_called) {
     gpr_mu_unlock(cq->mu);
-    GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)");
+    GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)");
     return;
   }
   cqd->shutdown_called = true;
   if (gpr_atm_full_fetch_add(&cqd->pending_events, -1) == 1) {
-    cq_finish_shutdown_pluck(exec_ctx, cq);
+    cq_finish_shutdown_pluck(cq);
   }
   gpr_mu_unlock(cq->mu);
-  GRPC_CQ_INTERNAL_UNREF(exec_ctx, cq, "shutting_down (pluck cq)");
+  GRPC_CQ_INTERNAL_UNREF(cq, "shutting_down (pluck cq)");
 }
 
 /* Shutdown simply drops a ref that we reserved at creation time; if we drop
    to zero here, then enter shutdown mode and wake up any waiters */
 void grpc_completion_queue_shutdown(grpc_completion_queue* cq) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   GPR_TIMER_BEGIN("grpc_completion_queue_shutdown", 0);
   GRPC_API_TRACE("grpc_completion_queue_shutdown(cq=%p)", 1, (cq));
-  cq->vtable->shutdown(&exec_ctx, cq);
-  grpc_exec_ctx_finish(&exec_ctx);
+  cq->vtable->shutdown(cq);
+
   GPR_TIMER_END("grpc_completion_queue_shutdown", 0);
 }
 
@@ -1271,9 +1253,9 @@
   GPR_TIMER_BEGIN("grpc_completion_queue_destroy", 0);
   grpc_completion_queue_shutdown(cq);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_CQ_INTERNAL_UNREF(&exec_ctx, cq, "destroy");
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_CQ_INTERNAL_UNREF(cq, "destroy");
+
   GPR_TIMER_END("grpc_completion_queue_destroy", 0);
 }
 
diff --git a/src/core/lib/surface/completion_queue.h b/src/core/lib/surface/completion_queue.h
index 9fdb48d..aea47af 100644
--- a/src/core/lib/surface/completion_queue.h
+++ b/src/core/lib/surface/completion_queue.h
@@ -33,10 +33,6 @@
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_pending_tags;
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_cq_refcount;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_cq_completion {
   gpr_mpscq_node node;
 
@@ -44,8 +40,7 @@
   void* tag;
   /** done callback - called when this queue element is no longer
       needed by the completion queue */
-  void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
-               struct grpc_cq_completion* c);
+  void (*done)(void* done_arg, struct grpc_cq_completion* c);
   void* done_arg;
   /** next pointer; low bit is used to indicate success or not */
   uintptr_t next;
@@ -54,17 +49,17 @@
 #ifndef NDEBUG
 void grpc_cq_internal_ref(grpc_completion_queue* cc, const char* reason,
                           const char* file, int line);
-void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cc,
-                            const char* reason, const char* file, int line);
+void grpc_cq_internal_unref(grpc_completion_queue* cc, const char* reason,
+                            const char* file, int line);
 #define GRPC_CQ_INTERNAL_REF(cc, reason) \
   grpc_cq_internal_ref(cc, reason, __FILE__, __LINE__)
-#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) \
-  grpc_cq_internal_unref(ec, cc, reason, __FILE__, __LINE__)
+#define GRPC_CQ_INTERNAL_UNREF(cc, reason) \
+  grpc_cq_internal_unref(cc, reason, __FILE__, __LINE__)
 #else
 void grpc_cq_internal_ref(grpc_completion_queue* cc);
-void grpc_cq_internal_unref(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cc);
+void grpc_cq_internal_unref(grpc_completion_queue* cc);
 #define GRPC_CQ_INTERNAL_REF(cc, reason) grpc_cq_internal_ref(cc)
-#define GRPC_CQ_INTERNAL_UNREF(ec, cc, reason) grpc_cq_internal_unref(ec, cc)
+#define GRPC_CQ_INTERNAL_UNREF(cc, reason) grpc_cq_internal_unref(cc)
 #endif
 
 /* Initializes global variables used by completion queues */
@@ -78,10 +73,8 @@
 
 /* Queue a GRPC_OP_COMPLETED operation; tag must correspond to the tag passed to
    grpc_cq_begin_op */
-void grpc_cq_end_op(grpc_exec_ctx* exec_ctx, grpc_completion_queue* cc,
-                    void* tag, grpc_error* error,
-                    void (*done)(grpc_exec_ctx* exec_ctx, void* done_arg,
-                                 grpc_cq_completion* storage),
+void grpc_cq_end_op(grpc_completion_queue* cc, void* tag, grpc_error* error,
+                    void (*done)(void* done_arg, grpc_cq_completion* storage),
                     void* done_arg, grpc_cq_completion* storage);
 
 grpc_pollset* grpc_cq_pollset(grpc_completion_queue* cc);
@@ -95,8 +88,4 @@
 grpc_completion_queue* grpc_completion_queue_create_internal(
     grpc_cq_completion_type completion_type, grpc_cq_polling_type polling_type);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_H */
diff --git a/src/core/lib/surface/completion_queue_factory.h b/src/core/lib/surface/completion_queue_factory.h
index af8f3d6..89be8f8 100644
--- a/src/core/lib/surface/completion_queue_factory.h
+++ b/src/core/lib/surface/completion_queue_factory.h
@@ -22,10 +22,6 @@
 #include <grpc/grpc.h>
 #include "src/core/lib/surface/completion_queue.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_completion_queue_factory_vtable {
   grpc_completion_queue* (*create)(const grpc_completion_queue_factory*,
                                    const grpc_completion_queue_attributes*);
@@ -37,8 +33,4 @@
   grpc_completion_queue_factory_vtable* vtable;
 };
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_COMPLETION_QUEUE_FACTORY_H */
diff --git a/src/core/lib/surface/event_string.h b/src/core/lib/surface/event_string.h
index 4bdb11f..cbf96da 100644
--- a/src/core/lib/surface/event_string.h
+++ b/src/core/lib/surface/event_string.h
@@ -21,15 +21,7 @@
 
 #include <grpc/grpc.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Returns a string describing an event. Must be later freed with gpr_free() */
 char* grpc_event_string(grpc_event* ev);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_EVENT_STRING_H */
diff --git a/src/core/lib/surface/init.cc b/src/core/lib/surface/init.cc
index 8ee1383..0f40965 100644
--- a/src/core/lib/surface/init.cc
+++ b/src/core/lib/surface/init.cc
@@ -73,14 +73,12 @@
   grpc_fork_handlers_auto_register();
 }
 
-static bool append_filter(grpc_exec_ctx* exec_ctx,
-                          grpc_channel_stack_builder* builder, void* arg) {
+static bool append_filter(grpc_channel_stack_builder* builder, void* arg) {
   return grpc_channel_stack_builder_append_filter(
       builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
 }
 
-static bool prepend_filter(grpc_exec_ctx* exec_ctx,
-                           grpc_channel_stack_builder* builder, void* arg) {
+static bool prepend_filter(grpc_channel_stack_builder* builder, void* arg) {
   return grpc_channel_stack_builder_prepend_filter(
       builder, (const grpc_channel_filter*)arg, nullptr, nullptr);
 }
@@ -123,7 +121,6 @@
   int i;
   gpr_once_init(&g_basic_init, do_basic_init);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   gpr_mu_lock(&g_init_mu);
   if (++g_initializations == 1) {
     gpr_time_init();
@@ -133,7 +130,8 @@
     grpc_mdctx_global_init();
     grpc_channel_init_init();
     grpc_security_pre_init();
-    grpc_iomgr_init(&exec_ctx);
+    grpc_core::ExecCtx::GlobalInit();
+    grpc_iomgr_init();
     gpr_timers_global_init();
     grpc_handshaker_factory_registry_init();
     grpc_security_init();
@@ -149,37 +147,41 @@
     grpc_tracer_init("GRPC_TRACE");
     /* no more changes to channel init pipelines */
     grpc_channel_init_finalize();
-    grpc_iomgr_start(&exec_ctx);
+    grpc_iomgr_start();
   }
   gpr_mu_unlock(&g_init_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   GRPC_API_TRACE("grpc_init(void)", 0, ());
 }
 
 void grpc_shutdown(void) {
   int i;
   GRPC_API_TRACE("grpc_shutdown(void)", 0, ());
-  grpc_exec_ctx exec_ctx =
-      GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, nullptr);
   gpr_mu_lock(&g_init_mu);
   if (--g_initializations == 0) {
-    grpc_executor_shutdown(&exec_ctx);
-    grpc_timer_manager_set_threading(false);  // shutdown timer_manager thread
-    for (i = g_number_of_plugins; i >= 0; i--) {
-      if (g_all_of_the_plugins[i].destroy != nullptr) {
-        g_all_of_the_plugins[i].destroy();
+    {
+      grpc_core::ExecCtx exec_ctx(0);
+      {
+        grpc_executor_shutdown();
+        grpc_timer_manager_set_threading(
+            false);  // shutdown timer_manager thread
+        for (i = g_number_of_plugins; i >= 0; i--) {
+          if (g_all_of_the_plugins[i].destroy != nullptr) {
+            g_all_of_the_plugins[i].destroy();
+          }
+        }
       }
+      grpc_iomgr_shutdown();
+      gpr_timers_global_destroy();
+      grpc_tracer_shutdown();
+      grpc_mdctx_global_shutdown();
+      grpc_handshaker_factory_registry_shutdown();
+      grpc_slice_intern_shutdown();
+      grpc_stats_shutdown();
     }
-    grpc_iomgr_shutdown(&exec_ctx);
-    gpr_timers_global_destroy();
-    grpc_tracer_shutdown();
-    grpc_mdctx_global_shutdown(&exec_ctx);
-    grpc_handshaker_factory_registry_shutdown(&exec_ctx);
-    grpc_slice_intern_shutdown();
-    grpc_stats_shutdown();
+    grpc_core::ExecCtx::GlobalShutdown();
   }
   gpr_mu_unlock(&g_init_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 int grpc_is_initialized(void) {
diff --git a/src/core/lib/surface/init.h b/src/core/lib/surface/init.h
index d429026..9353208 100644
--- a/src/core/lib/surface/init.h
+++ b/src/core/lib/surface/init.h
@@ -19,17 +19,9 @@
 #ifndef GRPC_CORE_LIB_SURFACE_INIT_H
 #define GRPC_CORE_LIB_SURFACE_INIT_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 void grpc_register_security_filters(void);
 void grpc_security_pre_init(void);
 void grpc_security_init(void);
 int grpc_is_initialized(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_INIT_H */
diff --git a/src/core/lib/surface/init_secure.cc b/src/core/lib/surface/init_secure.cc
index 3eee570..75ed9fa 100644
--- a/src/core/lib/surface/init_secure.cc
+++ b/src/core/lib/surface/init_secure.cc
@@ -37,7 +37,7 @@
 void grpc_security_pre_init(void) {}
 
 static bool maybe_prepend_client_auth_filter(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   const grpc_channel_args* args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
   if (args) {
@@ -52,7 +52,7 @@
 }
 
 static bool maybe_prepend_server_auth_filter(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   const grpc_channel_args* args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
   if (args) {
diff --git a/src/core/lib/surface/lame_client.cc b/src/core/lib/surface/lame_client.cc
index d1cf4d7..08611ff 100644
--- a/src/core/lib/surface/lame_client.cc
+++ b/src/core/lib/surface/lame_client.cc
@@ -49,8 +49,7 @@
   const char* error_message;
 };
 
-static void fill_metadata(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-                          grpc_metadata_batch* mdb) {
+static void fill_metadata(grpc_call_element* elem, grpc_metadata_batch* mdb) {
   CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
   bool expected = false;
   if (!calld->filled_metadata.compare_exchange_strong(
@@ -62,9 +61,9 @@
   char tmp[GPR_LTOA_MIN_BUFSIZE];
   gpr_ltoa(chand->error_code, tmp);
   calld->status.md = grpc_mdelem_from_slices(
-      exec_ctx, GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp));
+      GRPC_MDSTR_GRPC_STATUS, grpc_slice_from_copied_string(tmp));
   calld->details.md = grpc_mdelem_from_slices(
-      exec_ctx, GRPC_MDSTR_GRPC_MESSAGE,
+      GRPC_MDSTR_GRPC_MESSAGE,
       grpc_slice_from_copied_string(chand->error_message));
   calld->status.prev = calld->details.next = nullptr;
   calld->status.next = &calld->details;
@@ -76,75 +75,73 @@
 }
 
 static void lame_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
   if (op->recv_initial_metadata) {
-    fill_metadata(exec_ctx, elem,
+    fill_metadata(elem,
                   op->payload->recv_initial_metadata.recv_initial_metadata);
   } else if (op->recv_trailing_metadata) {
-    fill_metadata(exec_ctx, elem,
+    fill_metadata(elem,
                   op->payload->recv_trailing_metadata.recv_trailing_metadata);
   }
   grpc_transport_stream_op_batch_finish_with_failure(
-      exec_ctx, op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"),
+      op, GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"),
       calld->call_combiner);
 }
 
-static void lame_get_channel_info(grpc_exec_ctx* exec_ctx,
-                                  grpc_channel_element* elem,
+static void lame_get_channel_info(grpc_channel_element* elem,
                                   const grpc_channel_info* channel_info) {}
 
-static void lame_start_transport_op(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_element* elem,
+static void lame_start_transport_op(grpc_channel_element* elem,
                                     grpc_transport_op* op) {
   if (op->on_connectivity_state_change) {
     GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
     *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
-    GRPC_CLOSURE_SCHED(exec_ctx, op->on_connectivity_state_change,
-                       GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(op->on_connectivity_state_change, GRPC_ERROR_NONE);
   }
-  if (op->send_ping != nullptr) {
+  if (op->send_ping.on_initiate != nullptr) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, op->send_ping,
+        op->send_ping.on_initiate,
+        GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
+  }
+  if (op->send_ping.on_ack != nullptr) {
+    GRPC_CLOSURE_SCHED(
+        op->send_ping.on_ack,
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("lame client channel"));
   }
   GRPC_ERROR_UNREF(op->disconnect_with_error);
   if (op->on_consumed != nullptr) {
-    GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
   }
 }
 
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   CallData* calld = reinterpret_cast<CallData*>(elem->call_data);
   calld->call_combiner = args->call_combiner;
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* then_schedule_closure) {
-  GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
 }
 
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(args->is_first);
   GPR_ASSERT(args->is_last);
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 }  // namespace
 
 }  // namespace grpc_core
 
-extern "C" const grpc_channel_filter grpc_lame_filter = {
+const grpc_channel_filter grpc_lame_filter = {
     grpc_core::lame_start_transport_stream_op_batch,
     grpc_core::lame_start_transport_op,
     sizeof(grpc_core::CallData),
@@ -163,10 +160,10 @@
 grpc_channel* grpc_lame_client_channel_create(const char* target,
                                               grpc_status_code error_code,
                                               const char* error_message) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_element* elem;
-  grpc_channel* channel = grpc_channel_create(
-      &exec_ctx, target, nullptr, GRPC_CLIENT_LAME_CHANNEL, nullptr);
+  grpc_channel* channel =
+      grpc_channel_create(target, nullptr, GRPC_CLIENT_LAME_CHANNEL, nullptr);
   elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
   GRPC_API_TRACE(
       "grpc_lame_client_channel_create(target=%s, error_code=%d, "
@@ -176,6 +173,6 @@
   auto chand = reinterpret_cast<grpc_core::ChannelData*>(elem->channel_data);
   chand->error_code = error_code;
   chand->error_message = error_message;
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return channel;
 }
diff --git a/src/core/lib/surface/lame_client.h b/src/core/lib/surface/lame_client.h
index 2f6f9cd..3ce353f 100644
--- a/src/core/lib/surface/lame_client.h
+++ b/src/core/lib/surface/lame_client.h
@@ -21,14 +21,6 @@
 
 #include "src/core/lib/channel/channel_stack.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_lame_filter;
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_LAME_CLIENT_H */
diff --git a/src/core/lib/surface/server.cc b/src/core/lib/surface/server.cc
index 0f8a057..f1d428f 100644
--- a/src/core/lib/surface/server.cc
+++ b/src/core/lib/surface/server.cc
@@ -46,10 +46,9 @@
 
 typedef struct listener {
   void* arg;
-  void (*start)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-                grpc_pollset** pollsets, size_t pollset_count);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-                  grpc_closure* closure);
+  void (*start)(grpc_server* server, void* arg, grpc_pollset** pollsets,
+                size_t pollset_count);
+  void (*destroy)(grpc_server* server, void* arg, grpc_closure* closure);
   struct listener* next;
   grpc_closure destroy_done;
 } listener;
@@ -224,13 +223,12 @@
 #define SERVER_FROM_CALL_ELEM(elem) \
   (((channel_data*)(elem)->channel_data)->server)
 
-static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* calld,
-                            grpc_error* error);
-static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
-                      size_t cq_idx, requested_call* rc, grpc_error* error);
+static void publish_new_rpc(void* calld, grpc_error* error);
+static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,
+                      grpc_error* error);
 /* Before calling maybe_finish_shutdown, we must hold mu_global and not
    hold mu_call */
-static void maybe_finish_shutdown(grpc_exec_ctx* exec_ctx, grpc_server* server);
+static void maybe_finish_shutdown(grpc_server* server);
 
 /*
  * channel broadcaster
@@ -258,15 +256,14 @@
   grpc_slice slice;
 };
 
-static void shutdown_cleanup(grpc_exec_ctx* exec_ctx, void* arg,
-                             grpc_error* error) {
+static void shutdown_cleanup(void* arg, grpc_error* error) {
   struct shutdown_cleanup_args* a = (struct shutdown_cleanup_args*)arg;
-  grpc_slice_unref_internal(exec_ctx, a->slice);
+  grpc_slice_unref_internal(a->slice);
   gpr_free(a);
 }
 
-static void send_shutdown(grpc_exec_ctx* exec_ctx, grpc_channel* channel,
-                          bool send_goaway, grpc_error* send_disconnect) {
+static void send_shutdown(grpc_channel* channel, bool send_goaway,
+                          grpc_error* send_disconnect) {
   struct shutdown_cleanup_args* sc =
       (struct shutdown_cleanup_args*)gpr_malloc(sizeof(*sc));
   GRPC_CLOSURE_INIT(&sc->closure, shutdown_cleanup, sc,
@@ -284,19 +281,18 @@
   op->disconnect_with_error = send_disconnect;
 
   elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
-  elem->filter->start_transport_op(exec_ctx, elem, op);
+  elem->filter->start_transport_op(elem, op);
 }
 
-static void channel_broadcaster_shutdown(grpc_exec_ctx* exec_ctx,
-                                         channel_broadcaster* cb,
+static void channel_broadcaster_shutdown(channel_broadcaster* cb,
                                          bool send_goaway,
                                          grpc_error* force_disconnect) {
   size_t i;
 
   for (i = 0; i < cb->num_channels; i++) {
-    send_shutdown(exec_ctx, cb->channels[i], send_goaway,
+    send_shutdown(cb->channels[i], send_goaway,
                   GRPC_ERROR_REF(force_disconnect));
-    GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, cb->channels[i], "broadcast");
+    GRPC_CHANNEL_INTERNAL_UNREF(cb->channels[i], "broadcast");
   }
   gpr_free(cb->channels);
   GRPC_ERROR_UNREF(force_disconnect);
@@ -324,13 +320,11 @@
   gpr_free(rm->requests_per_cq);
 }
 
-static void kill_zombie(grpc_exec_ctx* exec_ctx, void* elem,
-                        grpc_error* error) {
+static void kill_zombie(void* elem, grpc_error* error) {
   grpc_call_unref(grpc_call_from_top_element((grpc_call_element*)elem));
 }
 
-static void request_matcher_zombify_all_pending_calls(grpc_exec_ctx* exec_ctx,
-                                                      request_matcher* rm) {
+static void request_matcher_zombify_all_pending_calls(request_matcher* rm) {
   while (rm->pending_head) {
     call_data* calld = rm->pending_head;
     rm->pending_head = calld->pending_next;
@@ -339,19 +333,18 @@
         &calld->kill_zombie_closure, kill_zombie,
         grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
         grpc_schedule_on_exec_ctx);
-    GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
   }
 }
 
-static void request_matcher_kill_requests(grpc_exec_ctx* exec_ctx,
-                                          grpc_server* server,
+static void request_matcher_kill_requests(grpc_server* server,
                                           request_matcher* rm,
                                           grpc_error* error) {
   requested_call* rc;
   for (size_t i = 0; i < server->cq_count; i++) {
     while ((rc = (requested_call*)gpr_locked_mpscq_pop(
                 &rm->requests_per_cq[i])) != nullptr) {
-      fail_call(exec_ctx, server, i, rc, GRPC_ERROR_REF(error));
+      fail_call(server, i, rc, GRPC_ERROR_REF(error));
     }
   }
   GRPC_ERROR_UNREF(error);
@@ -365,10 +358,10 @@
   gpr_ref(&server->internal_refcount);
 }
 
-static void server_delete(grpc_exec_ctx* exec_ctx, grpc_server* server) {
+static void server_delete(grpc_server* server) {
   registered_method* rm;
   size_t i;
-  grpc_channel_args_destroy(exec_ctx, server->channel_args);
+  grpc_channel_args_destroy(server->channel_args);
   gpr_mu_destroy(&server->mu_global);
   gpr_mu_destroy(&server->mu_call);
   gpr_cv_destroy(&server->starting_cv);
@@ -385,7 +378,7 @@
     request_matcher_destroy(&server->unregistered_request_matcher);
   }
   for (i = 0; i < server->cq_count; i++) {
-    GRPC_CQ_INTERNAL_UNREF(exec_ctx, server->cqs[i], "server");
+    GRPC_CQ_INTERNAL_UNREF(server->cqs[i], "server");
   }
   gpr_free(server->cqs);
   gpr_free(server->pollsets);
@@ -393,9 +386,9 @@
   gpr_free(server);
 }
 
-static void server_unref(grpc_exec_ctx* exec_ctx, grpc_server* server) {
+static void server_unref(grpc_server* server) {
   if (gpr_unref(&server->internal_refcount)) {
-    server_delete(exec_ctx, server);
+    server_delete(server);
   }
 }
 
@@ -409,21 +402,19 @@
   chand->next = chand->prev = chand;
 }
 
-static void finish_destroy_channel(grpc_exec_ctx* exec_ctx, void* cd,
-                                   grpc_error* error) {
+static void finish_destroy_channel(void* cd, grpc_error* error) {
   channel_data* chand = (channel_data*)cd;
   grpc_server* server = chand->server;
-  GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "server");
-  server_unref(exec_ctx, server);
+  GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "server");
+  server_unref(server);
 }
 
-static void destroy_channel(grpc_exec_ctx* exec_ctx, channel_data* chand,
-                            grpc_error* error) {
+static void destroy_channel(channel_data* chand, grpc_error* error) {
   if (is_channel_orphaned(chand)) return;
   GPR_ASSERT(chand->server != nullptr);
   orphan_channel(chand);
   server_ref(chand->server);
-  maybe_finish_shutdown(exec_ctx, chand->server);
+  maybe_finish_shutdown(chand->server);
   GRPC_CLOSURE_INIT(&chand->finish_destroy_channel_closure,
                     finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
 
@@ -436,20 +427,18 @@
   grpc_transport_op* op =
       grpc_make_transport_op(&chand->finish_destroy_channel_closure);
   op->set_accept_stream = true;
-  grpc_channel_next_op(exec_ctx,
-                       grpc_channel_stack_element(
+  grpc_channel_next_op(grpc_channel_stack_element(
                            grpc_channel_get_channel_stack(chand->channel), 0),
                        op);
 }
 
-static void done_request_event(grpc_exec_ctx* exec_ctx, void* req,
-                               grpc_cq_completion* c) {
+static void done_request_event(void* req, grpc_cq_completion* c) {
   gpr_free(req);
 }
 
-static void publish_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
-                         call_data* calld, size_t cq_idx, requested_call* rc) {
-  grpc_call_set_completion_queue(exec_ctx, calld->call, rc->cq_bound_to_call);
+static void publish_call(grpc_server* server, call_data* calld, size_t cq_idx,
+                         requested_call* rc) {
+  grpc_call_set_completion_queue(calld->call, rc->cq_bound_to_call);
   grpc_call* call = calld->call;
   *rc->call = call;
   calld->cq_new = server->cqs[cq_idx];
@@ -476,12 +465,11 @@
       GPR_UNREACHABLE_CODE(return );
   }
 
-  grpc_cq_end_op(exec_ctx, calld->cq_new, rc->tag, GRPC_ERROR_NONE,
-                 done_request_event, rc, &rc->completion);
+  grpc_cq_end_op(calld->cq_new, rc->tag, GRPC_ERROR_NONE, done_request_event,
+                 rc, &rc->completion);
 }
 
-static void publish_new_rpc(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void publish_new_rpc(void* arg, grpc_error* error) {
   grpc_call_element* call_elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)call_elem->call_data;
   channel_data* chand = (channel_data*)call_elem->channel_data;
@@ -494,8 +482,7 @@
         &calld->kill_zombie_closure, kill_zombie,
         grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
         grpc_schedule_on_exec_ctx);
-    GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
-                       GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_REF(error));
     return;
   }
 
@@ -506,15 +493,15 @@
     if (rc == nullptr) {
       continue;
     } else {
-      GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i);
+      GRPC_STATS_INC_SERVER_CQS_CHECKED(i);
       gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
-      publish_call(exec_ctx, server, calld, cq_idx, rc);
+      publish_call(server, calld, cq_idx, rc);
       return; /* early out */
     }
   }
 
   /* no cq to take the request found: queue it on the slow list */
-  GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED(exec_ctx);
+  GRPC_STATS_INC_SERVER_SLOWPATH_REQUESTS_QUEUED();
   gpr_mu_lock(&server->mu_call);
 
   // We need to ensure that all the queues are empty.  We do this under
@@ -529,9 +516,9 @@
       continue;
     } else {
       gpr_mu_unlock(&server->mu_call);
-      GRPC_STATS_INC_SERVER_CQS_CHECKED(exec_ctx, i + server->cq_count);
+      GRPC_STATS_INC_SERVER_CQS_CHECKED(i + server->cq_count);
       gpr_atm_no_barrier_store(&calld->state, ACTIVATED);
-      publish_call(exec_ctx, server, calld, cq_idx, rc);
+      publish_call(server, calld, cq_idx, rc);
       return; /* early out */
     }
   }
@@ -548,8 +535,7 @@
 }
 
 static void finish_start_new_rpc(
-    grpc_exec_ctx* exec_ctx, grpc_server* server, grpc_call_element* elem,
-    request_matcher* rm,
+    grpc_server* server, grpc_call_element* elem, request_matcher* rm,
     grpc_server_register_method_payload_handling payload_handling) {
   call_data* calld = (call_data*)elem->call_data;
 
@@ -557,7 +543,7 @@
     gpr_atm_no_barrier_store(&calld->state, ZOMBIED);
     GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
     return;
   }
 
@@ -565,7 +551,7 @@
 
   switch (payload_handling) {
     case GRPC_SRM_PAYLOAD_NONE:
-      publish_new_rpc(exec_ctx, elem, GRPC_ERROR_NONE);
+      publish_new_rpc(elem, GRPC_ERROR_NONE);
       break;
     case GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER: {
       grpc_op op;
@@ -574,14 +560,13 @@
       op.data.recv_message.recv_message = &calld->payload;
       GRPC_CLOSURE_INIT(&calld->publish, publish_new_rpc, elem,
                         grpc_schedule_on_exec_ctx);
-      grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
-                                        &calld->publish);
+      grpc_call_start_batch_and_execute(calld->call, &op, 1, &calld->publish);
       break;
     }
   }
 }
 
-static void start_new_rpc(grpc_exec_ctx* exec_ctx, grpc_call_element* elem) {
+static void start_new_rpc(grpc_call_element* elem) {
   channel_data* chand = (channel_data*)elem->channel_data;
   call_data* calld = (call_data*)elem->call_data;
   grpc_server* server = chand->server;
@@ -606,8 +591,7 @@
                 GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
         continue;
       }
-      finish_start_new_rpc(exec_ctx, server, elem,
-                           &rm->server_registered_method->matcher,
+      finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher,
                            rm->server_registered_method->payload_handling);
       return;
     }
@@ -624,14 +608,12 @@
                 GRPC_INITIAL_METADATA_IDEMPOTENT_REQUEST)) {
         continue;
       }
-      finish_start_new_rpc(exec_ctx, server, elem,
-                           &rm->server_registered_method->matcher,
+      finish_start_new_rpc(server, elem, &rm->server_registered_method->matcher,
                            rm->server_registered_method->payload_handling);
       return;
     }
   }
-  finish_start_new_rpc(exec_ctx, server, elem,
-                       &server->unregistered_request_matcher,
+  finish_start_new_rpc(server, elem, &server->unregistered_request_matcher,
                        GRPC_SRM_PAYLOAD_NONE);
 }
 
@@ -644,9 +626,8 @@
   return n;
 }
 
-static void done_shutdown_event(grpc_exec_ctx* exec_ctx, void* server,
-                                grpc_cq_completion* completion) {
-  server_unref(exec_ctx, (grpc_server*)server);
+static void done_shutdown_event(void* server, grpc_cq_completion* completion) {
+  server_unref((grpc_server*)server);
 }
 
 static int num_channels(grpc_server* server) {
@@ -659,34 +640,30 @@
   return n;
 }
 
-static void kill_pending_work_locked(grpc_exec_ctx* exec_ctx,
-                                     grpc_server* server, grpc_error* error) {
+static void kill_pending_work_locked(grpc_server* server, grpc_error* error) {
   if (server->started) {
-    request_matcher_kill_requests(exec_ctx, server,
-                                  &server->unregistered_request_matcher,
+    request_matcher_kill_requests(server, &server->unregistered_request_matcher,
                                   GRPC_ERROR_REF(error));
     request_matcher_zombify_all_pending_calls(
-        exec_ctx, &server->unregistered_request_matcher);
+        &server->unregistered_request_matcher);
     for (registered_method* rm = server->registered_methods; rm;
          rm = rm->next) {
-      request_matcher_kill_requests(exec_ctx, server, &rm->matcher,
+      request_matcher_kill_requests(server, &rm->matcher,
                                     GRPC_ERROR_REF(error));
-      request_matcher_zombify_all_pending_calls(exec_ctx, &rm->matcher);
+      request_matcher_zombify_all_pending_calls(&rm->matcher);
     }
   }
   GRPC_ERROR_UNREF(error);
 }
 
-static void maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
-                                  grpc_server* server) {
+static void maybe_finish_shutdown(grpc_server* server) {
   size_t i;
   if (!gpr_atm_acq_load(&server->shutdown_flag) || server->shutdown_published) {
     return;
   }
 
   kill_pending_work_locked(
-      exec_ctx, server,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
+      server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
 
   if (server->root_channel_data.next != &server->root_channel_data ||
       server->listeners_destroyed < num_listeners(server)) {
@@ -706,15 +683,13 @@
   server->shutdown_published = 1;
   for (i = 0; i < server->num_shutdown_tags; i++) {
     server_ref(server);
-    grpc_cq_end_op(exec_ctx, server->shutdown_tags[i].cq,
-                   server->shutdown_tags[i].tag, GRPC_ERROR_NONE,
-                   done_shutdown_event, server,
+    grpc_cq_end_op(server->shutdown_tags[i].cq, server->shutdown_tags[i].tag,
+                   GRPC_ERROR_NONE, done_shutdown_event, server,
                    &server->shutdown_tags[i].completion);
   }
 }
 
-static void server_on_recv_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
-                                            grpc_error* error) {
+static void server_on_recv_initial_metadata(void* ptr, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)ptr;
   call_data* calld = (call_data*)elem->call_data;
   grpc_millis op_deadline;
@@ -728,10 +703,10 @@
         GRPC_MDVALUE(calld->recv_initial_metadata->idx.named.authority->md));
     calld->path_set = true;
     calld->host_set = true;
-    grpc_metadata_batch_remove(exec_ctx, calld->recv_initial_metadata,
+    grpc_metadata_batch_remove(calld->recv_initial_metadata,
                                calld->recv_initial_metadata->idx.named.path);
     grpc_metadata_batch_remove(
-        exec_ctx, calld->recv_initial_metadata,
+        calld->recv_initial_metadata,
         calld->recv_initial_metadata->idx.named.authority);
   } else {
     GRPC_ERROR_REF(error);
@@ -749,7 +724,7 @@
     GRPC_ERROR_UNREF(src_error);
   }
 
-  GRPC_CLOSURE_RUN(exec_ctx, calld->on_done_recv_initial_metadata, error);
+  GRPC_CLOSURE_RUN(calld->on_done_recv_initial_metadata, error);
 }
 
 static void server_mutate_op(grpc_call_element* elem,
@@ -770,24 +745,21 @@
 }
 
 static void server_start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   server_mutate_op(elem, op);
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 }
 
-static void got_initial_metadata(grpc_exec_ctx* exec_ctx, void* ptr,
-                                 grpc_error* error) {
+static void got_initial_metadata(void* ptr, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)ptr;
   call_data* calld = (call_data*)elem->call_data;
   if (error == GRPC_ERROR_NONE) {
-    start_new_rpc(exec_ctx, elem);
+    start_new_rpc(elem);
   } else {
     if (gpr_atm_full_cas(&calld->state, NOT_STARTED, ZOMBIED)) {
       GRPC_CLOSURE_INIT(&calld->kill_zombie_closure, kill_zombie, elem,
                         grpc_schedule_on_exec_ctx);
-      GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
-                         GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
     } else if (gpr_atm_full_cas(&calld->state, PENDING, ZOMBIED)) {
       /* zombied call will be destroyed when it's removed from the pending
          queue... later */
@@ -795,8 +767,7 @@
   }
 }
 
-static void accept_stream(grpc_exec_ctx* exec_ctx, void* cd,
-                          grpc_transport* transport,
+static void accept_stream(void* cd, grpc_transport* transport,
                           const void* transport_server_data) {
   channel_data* chand = (channel_data*)cd;
   /* create a call */
@@ -806,11 +777,11 @@
   args.server_transport_data = transport_server_data;
   args.send_deadline = GRPC_MILLIS_INF_FUTURE;
   grpc_call* call;
-  grpc_error* error = grpc_call_create(exec_ctx, &args, &call);
+  grpc_error* error = grpc_call_create(&args, &call);
   grpc_call_element* elem =
       grpc_call_stack_element(grpc_call_get_call_stack(call), 0);
   if (error != GRPC_ERROR_NONE) {
-    got_initial_metadata(exec_ctx, elem, error);
+    got_initial_metadata(elem, error);
     GRPC_ERROR_UNREF(error);
     return;
   }
@@ -822,32 +793,28 @@
       &calld->initial_metadata;
   GRPC_CLOSURE_INIT(&calld->got_initial_metadata, got_initial_metadata, elem,
                     grpc_schedule_on_exec_ctx);
-  grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
-                                    &calld->got_initial_metadata);
+  grpc_call_start_batch_and_execute(call, &op, 1, &calld->got_initial_metadata);
 }
 
-static void channel_connectivity_changed(grpc_exec_ctx* exec_ctx, void* cd,
-                                         grpc_error* error) {
+static void channel_connectivity_changed(void* cd, grpc_error* error) {
   channel_data* chand = (channel_data*)cd;
   grpc_server* server = chand->server;
   if (chand->connectivity_state != GRPC_CHANNEL_SHUTDOWN) {
     grpc_transport_op* op = grpc_make_transport_op(nullptr);
     op->on_connectivity_state_change = &chand->channel_connectivity_changed;
     op->connectivity_state = &chand->connectivity_state;
-    grpc_channel_next_op(exec_ctx,
-                         grpc_channel_stack_element(
+    grpc_channel_next_op(grpc_channel_stack_element(
                              grpc_channel_get_channel_stack(chand->channel), 0),
                          op);
   } else {
     gpr_mu_lock(&server->mu_global);
-    destroy_channel(exec_ctx, chand, GRPC_ERROR_REF(error));
+    destroy_channel(chand, GRPC_ERROR_REF(error));
     gpr_mu_unlock(&server->mu_global);
-    GRPC_CHANNEL_INTERNAL_UNREF(exec_ctx, chand->channel, "connectivity");
+    GRPC_CHANNEL_INTERNAL_UNREF(chand->channel, "connectivity");
   }
 }
 
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   call_data* calld = (call_data*)elem->call_data;
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -863,7 +830,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   channel_data* chand = (channel_data*)elem->channel_data;
@@ -872,19 +839,18 @@
   GPR_ASSERT(calld->state != PENDING);
 
   if (calld->host_set) {
-    grpc_slice_unref_internal(exec_ctx, calld->host);
+    grpc_slice_unref_internal(calld->host);
   }
   if (calld->path_set) {
-    grpc_slice_unref_internal(exec_ctx, calld->path);
+    grpc_slice_unref_internal(calld->path);
   }
   grpc_metadata_array_destroy(&calld->initial_metadata);
   grpc_byte_buffer_destroy(calld->payload);
 
-  server_unref(exec_ctx, chand->server);
+  server_unref(chand->server);
 }
 
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   channel_data* chand = (channel_data*)elem->channel_data;
   GPR_ASSERT(args->is_first);
@@ -900,15 +866,14 @@
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {
+static void destroy_channel_elem(grpc_channel_element* elem) {
   size_t i;
   channel_data* chand = (channel_data*)elem->channel_data;
   if (chand->registered_methods) {
     for (i = 0; i < chand->registered_method_slots; i++) {
-      grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].method);
+      grpc_slice_unref_internal(chand->registered_methods[i].method);
       if (chand->registered_methods[i].has_host) {
-        grpc_slice_unref_internal(exec_ctx, chand->registered_methods[i].host);
+        grpc_slice_unref_internal(chand->registered_methods[i].host);
       }
     }
     gpr_free(chand->registered_methods);
@@ -918,9 +883,9 @@
     chand->next->prev = chand->prev;
     chand->prev->next = chand->next;
     chand->next = chand->prev = chand;
-    maybe_finish_shutdown(exec_ctx, chand->server);
+    maybe_finish_shutdown(chand->server);
     gpr_mu_unlock(&chand->server->mu_global);
-    server_unref(exec_ctx, chand->server);
+    server_unref(chand->server);
   }
 }
 
@@ -1034,11 +999,10 @@
   return m;
 }
 
-static void start_listeners(grpc_exec_ctx* exec_ctx, void* s,
-                            grpc_error* error) {
+static void start_listeners(void* s, grpc_error* error) {
   grpc_server* server = (grpc_server*)s;
   for (listener* l = server->listeners; l; l = l->next) {
-    l->start(exec_ctx, server, l->arg, server->pollsets, server->pollset_count);
+    l->start(server, l->arg, server->pollsets, server->pollset_count);
   }
 
   gpr_mu_lock(&server->mu_global);
@@ -1046,12 +1010,12 @@
   gpr_cv_signal(&server->starting_cv);
   gpr_mu_unlock(&server->mu_global);
 
-  server_unref(exec_ctx, server);
+  server_unref(server);
 }
 
 void grpc_server_start(grpc_server* server) {
   size_t i;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_start(server=%p)", 1, (server));
 
@@ -1073,12 +1037,9 @@
   server_ref(server);
   server->starting = true;
   GRPC_CLOSURE_SCHED(
-      &exec_ctx,
       GRPC_CLOSURE_CREATE(start_listeners, server,
                           grpc_executor_scheduler(GRPC_EXECUTOR_SHORT)),
       GRPC_ERROR_NONE);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
@@ -1087,8 +1048,7 @@
   *pollsets = server->pollsets;
 }
 
-void grpc_server_setup_transport(grpc_exec_ctx* exec_ctx, grpc_server* s,
-                                 grpc_transport* transport,
+void grpc_server_setup_transport(grpc_server* s, grpc_transport* transport,
                                  grpc_pollset* accepting_pollset,
                                  const grpc_channel_args* args) {
   size_t num_registered_methods;
@@ -1103,8 +1063,7 @@
   uint32_t max_probes = 0;
   grpc_transport_op* op = nullptr;
 
-  channel = grpc_channel_create(exec_ctx, nullptr, args, GRPC_SERVER_CHANNEL,
-                                transport);
+  channel = grpc_channel_create(nullptr, args, GRPC_SERVER_CHANNEL, transport);
   chand = (channel_data*)grpc_channel_stack_element(
               grpc_channel_get_channel_stack(channel), 0)
               ->channel_data;
@@ -1181,21 +1140,19 @@
     op->disconnect_with_error =
         GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server shutdown");
   }
-  grpc_transport_perform_op(exec_ctx, transport, op);
+  grpc_transport_perform_op(transport, op);
 }
 
-void done_published_shutdown(grpc_exec_ctx* exec_ctx, void* done_arg,
-                             grpc_cq_completion* storage) {
+void done_published_shutdown(void* done_arg, grpc_cq_completion* storage) {
   (void)done_arg;
   gpr_free(storage);
 }
 
-static void listener_destroy_done(grpc_exec_ctx* exec_ctx, void* s,
-                                  grpc_error* error) {
+static void listener_destroy_done(void* s, grpc_error* error) {
   grpc_server* server = (grpc_server*)s;
   gpr_mu_lock(&server->mu_global);
   server->listeners_destroyed++;
-  maybe_finish_shutdown(exec_ctx, server);
+  maybe_finish_shutdown(server);
   gpr_mu_unlock(&server->mu_global);
 }
 
@@ -1204,7 +1161,7 @@
   listener* l;
   shutdown_tag* sdt;
   channel_broadcaster broadcaster;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_shutdown_and_notify(server=%p, cq=%p, tag=%p)", 3,
                  (server, cq, tag));
@@ -1213,17 +1170,16 @@
   gpr_mu_lock(&server->mu_global);
   while (server->starting) {
     gpr_cv_wait(&server->starting_cv, &server->mu_global,
-                gpr_inf_future(GPR_CLOCK_REALTIME));
+                gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
 
   /* stay locked, and gather up some stuff to do */
   GPR_ASSERT(grpc_cq_begin_op(cq, tag));
   if (server->shutdown_published) {
-    grpc_cq_end_op(&exec_ctx, cq, tag, GRPC_ERROR_NONE, done_published_shutdown,
-                   nullptr,
+    grpc_cq_end_op(cq, tag, GRPC_ERROR_NONE, done_published_shutdown, nullptr,
                    (grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
     gpr_mu_unlock(&server->mu_global);
-    goto done;
+    return;
   }
   server->shutdown_tags = (shutdown_tag*)gpr_realloc(
       server->shutdown_tags,
@@ -1233,7 +1189,7 @@
   sdt->cq = cq;
   if (gpr_atm_acq_load(&server->shutdown_flag)) {
     gpr_mu_unlock(&server->mu_global);
-    goto done;
+    return;
   }
 
   server->last_shutdown_message_time = gpr_now(GPR_CLOCK_REALTIME);
@@ -1245,30 +1201,26 @@
   /* collect all unregistered then registered calls */
   gpr_mu_lock(&server->mu_call);
   kill_pending_work_locked(
-      &exec_ctx, server,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
+      server, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
   gpr_mu_unlock(&server->mu_call);
 
-  maybe_finish_shutdown(&exec_ctx, server);
+  maybe_finish_shutdown(server);
   gpr_mu_unlock(&server->mu_global);
 
   /* Shutdown listeners */
   for (l = server->listeners; l; l = l->next) {
     GRPC_CLOSURE_INIT(&l->destroy_done, listener_destroy_done, server,
                       grpc_schedule_on_exec_ctx);
-    l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
+    l->destroy(server, l->arg, &l->destroy_done);
   }
 
-  channel_broadcaster_shutdown(&exec_ctx, &broadcaster, true /* send_goaway */,
+  channel_broadcaster_shutdown(&broadcaster, true /* send_goaway */,
                                GRPC_ERROR_NONE);
-
-done:
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void grpc_server_cancel_all_calls(grpc_server* server) {
   channel_broadcaster broadcaster;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_cancel_all_calls(server=%p)", 1, (server));
 
@@ -1277,14 +1229,13 @@
   gpr_mu_unlock(&server->mu_global);
 
   channel_broadcaster_shutdown(
-      &exec_ctx, &broadcaster, false /* send_goaway */,
+      &broadcaster, false /* send_goaway */,
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("Cancelling all calls"));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void grpc_server_destroy(grpc_server* server) {
   listener* l;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_API_TRACE("grpc_server_destroy(server=%p)", 1, (server));
 
@@ -1300,16 +1251,15 @@
 
   gpr_mu_unlock(&server->mu_global);
 
-  server_unref(&exec_ctx, server);
-  grpc_exec_ctx_finish(&exec_ctx);
+  server_unref(server);
 }
 
-void grpc_server_add_listener(
-    grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-    void (*start)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-                  grpc_pollset** pollsets, size_t pollset_count),
-    void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-                    grpc_closure* on_done)) {
+void grpc_server_add_listener(grpc_server* server, void* arg,
+                              void (*start)(grpc_server* server, void* arg,
+                                            grpc_pollset** pollsets,
+                                            size_t pollset_count),
+                              void (*destroy)(grpc_server* server, void* arg,
+                                              grpc_closure* on_done)) {
   listener* l = (listener*)gpr_malloc(sizeof(listener));
   l->arg = arg;
   l->start = start;
@@ -1318,13 +1268,12 @@
   server->listeners = l;
 }
 
-static grpc_call_error queue_call_request(grpc_exec_ctx* exec_ctx,
-                                          grpc_server* server, size_t cq_idx,
+static grpc_call_error queue_call_request(grpc_server* server, size_t cq_idx,
                                           requested_call* rc) {
   call_data* calld = nullptr;
   request_matcher* rm = nullptr;
   if (gpr_atm_acq_load(&server->shutdown_flag)) {
-    fail_call(exec_ctx, server, cq_idx, rc,
+    fail_call(server, cq_idx, rc,
               GRPC_ERROR_CREATE_FROM_STATIC_STRING("Server Shutdown"));
     return GRPC_CALL_OK;
   }
@@ -1351,10 +1300,9 @@
             &calld->kill_zombie_closure, kill_zombie,
             grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
             grpc_schedule_on_exec_ctx);
-        GRPC_CLOSURE_SCHED(exec_ctx, &calld->kill_zombie_closure,
-                           GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(&calld->kill_zombie_closure, GRPC_ERROR_NONE);
       } else {
-        publish_call(exec_ctx, server, calld, cq_idx, rc);
+        publish_call(server, calld, cq_idx, rc);
       }
       gpr_mu_lock(&server->mu_call);
     }
@@ -1369,9 +1317,9 @@
     grpc_completion_queue* cq_bound_to_call,
     grpc_completion_queue* cq_for_notification, void* tag) {
   grpc_call_error error;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   requested_call* rc = (requested_call*)gpr_malloc(sizeof(*rc));
-  GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
+  GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
   GRPC_API_TRACE(
       "grpc_server_request_call("
       "server=%p, call=%p, details=%p, initial_metadata=%p, "
@@ -1404,9 +1352,9 @@
   rc->call = call;
   rc->data.batch.details = details;
   rc->initial_metadata = initial_metadata;
-  error = queue_call_request(&exec_ctx, server, cq_idx, rc);
+  error = queue_call_request(server, cq_idx, rc);
 done:
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return error;
 }
 
@@ -1416,10 +1364,10 @@
     grpc_completion_queue* cq_bound_to_call,
     grpc_completion_queue* cq_for_notification, void* tag) {
   grpc_call_error error;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   requested_call* rc = (requested_call*)gpr_malloc(sizeof(*rc));
   registered_method* rm = (registered_method*)rmp;
-  GRPC_STATS_INC_SERVER_REQUESTED_CALLS(&exec_ctx);
+  GRPC_STATS_INC_SERVER_REQUESTED_CALLS();
   GRPC_API_TRACE(
       "grpc_server_request_registered_call("
       "server=%p, rmp=%p, call=%p, deadline=%p, initial_metadata=%p, "
@@ -1461,20 +1409,20 @@
   rc->data.registered.deadline = deadline;
   rc->initial_metadata = initial_metadata;
   rc->data.registered.optional_payload = optional_payload;
-  error = queue_call_request(&exec_ctx, server, cq_idx, rc);
+  error = queue_call_request(server, cq_idx, rc);
 done:
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return error;
 }
 
-static void fail_call(grpc_exec_ctx* exec_ctx, grpc_server* server,
-                      size_t cq_idx, requested_call* rc, grpc_error* error) {
+static void fail_call(grpc_server* server, size_t cq_idx, requested_call* rc,
+                      grpc_error* error) {
   *rc->call = nullptr;
   rc->initial_metadata->count = 0;
   GPR_ASSERT(error != GRPC_ERROR_NONE);
 
-  grpc_cq_end_op(exec_ctx, server->cqs[cq_idx], rc->tag, error,
-                 done_request_event, rc, &rc->completion);
+  grpc_cq_end_op(server->cqs[cq_idx], rc->tag, error, done_request_event, rc,
+                 &rc->completion);
 }
 
 const grpc_channel_args* grpc_server_get_channel_args(grpc_server* server) {
diff --git a/src/core/lib/surface/server.h b/src/core/lib/surface/server.h
index d5c2b0f..63b6dff 100644
--- a/src/core/lib/surface/server.h
+++ b/src/core/lib/surface/server.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/debug/trace.h"
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const grpc_channel_filter grpc_server_top_filter;
 
 /** Lightweight tracing of server channel state */
@@ -35,17 +31,16 @@
 
 /* Add a listener to the server: when the server starts, it will call start,
    and when it shuts down, it will call destroy */
-void grpc_server_add_listener(
-    grpc_exec_ctx* exec_ctx, grpc_server* server, void* listener,
-    void (*start)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-                  grpc_pollset** pollsets, size_t npollsets),
-    void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_server* server, void* arg,
-                    grpc_closure* on_done));
+void grpc_server_add_listener(grpc_server* server, void* listener,
+                              void (*start)(grpc_server* server, void* arg,
+                                            grpc_pollset** pollsets,
+                                            size_t npollsets),
+                              void (*destroy)(grpc_server* server, void* arg,
+                                              grpc_closure* on_done));
 
 /* Setup a transport - creates a channel stack, binds the transport to the
    server */
-void grpc_server_setup_transport(grpc_exec_ctx* exec_ctx, grpc_server* server,
-                                 grpc_transport* transport,
+void grpc_server_setup_transport(grpc_server* server, grpc_transport* transport,
                                  grpc_pollset* accepting_pollset,
                                  const grpc_channel_args* args);
 
@@ -58,8 +53,4 @@
 void grpc_server_get_pollsets(grpc_server* server, grpc_pollset*** pollsets,
                               size_t* pollset_count);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_SERVER_H */
diff --git a/src/core/lib/surface/validate_metadata.h b/src/core/lib/surface/validate_metadata.h
index 9ca2069..ff074b0 100644
--- a/src/core/lib/surface/validate_metadata.h
+++ b/src/core/lib/surface/validate_metadata.h
@@ -22,15 +22,7 @@
 #include <grpc/slice.h>
 #include "src/core/lib/iomgr/error.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 grpc_error* grpc_validate_header_key_is_legal(grpc_slice slice);
 grpc_error* grpc_validate_header_nonbin_value_is_legal(grpc_slice slice);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_SURFACE_VALIDATE_METADATA_H */
diff --git a/src/core/lib/surface/version.cc b/src/core/lib/surface/version.cc
index 5e4f620..7d36c6c 100644
--- a/src/core/lib/surface/version.cc
+++ b/src/core/lib/surface/version.cc
@@ -21,6 +21,6 @@
 
 #include <grpc/grpc.h>
 
-const char* grpc_version_string(void) { return "5.0.0"; }
+const char* grpc_version_string(void) { return "5.0.0-dev"; }
 
-const char* grpc_g_stands_for(void) { return "generous"; }
+const char* grpc_g_stands_for(void) { return "glossy"; }
diff --git a/src/core/lib/transport/bdp_estimator.cc b/src/core/lib/transport/bdp_estimator.cc
index bb0e583..5fcc62e 100644
--- a/src/core/lib/transport/bdp_estimator.cc
+++ b/src/core/lib/transport/bdp_estimator.cc
@@ -37,7 +37,7 @@
       bw_est_(0),
       name_(name) {}
 
-grpc_millis BdpEstimator::CompletePing(grpc_exec_ctx* exec_ctx) {
+grpc_millis BdpEstimator::CompletePing() {
   gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
   gpr_timespec dt_ts = gpr_time_sub(now, ping_start_time_);
   double dt = (double)dt_ts.tv_sec + 1e-9 * (double)dt_ts.tv_nsec;
@@ -78,7 +78,7 @@
   }
   ping_state_ = PingState::UNSCHEDULED;
   accumulator_ = 0;
-  return grpc_exec_ctx_now(exec_ctx) + inter_ping_delay_;
+  return grpc_core::ExecCtx::Get()->Now() + inter_ping_delay_;
 }
 
 }  // namespace grpc_core
diff --git a/src/core/lib/transport/bdp_estimator.h b/src/core/lib/transport/bdp_estimator.h
index df3a86c..e703af1 100644
--- a/src/core/lib/transport/bdp_estimator.h
+++ b/src/core/lib/transport/bdp_estimator.h
@@ -73,7 +73,7 @@
   }
 
   // Completes a previously started ping, returns when to schedule the next one
-  grpc_millis CompletePing(grpc_exec_ctx* exec_ctx);
+  grpc_millis CompletePing();
 
  private:
   enum class PingState { UNSCHEDULED, SCHEDULED, STARTED };
diff --git a/src/core/lib/transport/byte_stream.cc b/src/core/lib/transport/byte_stream.cc
index b872025..8dcb1e0 100644
--- a/src/core/lib/transport/byte_stream.cc
+++ b/src/core/lib/transport/byte_stream.cc
@@ -25,34 +25,28 @@
 
 #include "src/core/lib/slice/slice_internal.h"
 
-bool grpc_byte_stream_next(grpc_exec_ctx* exec_ctx,
-                           grpc_byte_stream* byte_stream, size_t max_size_hint,
+bool grpc_byte_stream_next(grpc_byte_stream* byte_stream, size_t max_size_hint,
                            grpc_closure* on_complete) {
-  return byte_stream->vtable->next(exec_ctx, byte_stream, max_size_hint,
-                                   on_complete);
+  return byte_stream->vtable->next(byte_stream, max_size_hint, on_complete);
 }
 
-grpc_error* grpc_byte_stream_pull(grpc_exec_ctx* exec_ctx,
-                                  grpc_byte_stream* byte_stream,
+grpc_error* grpc_byte_stream_pull(grpc_byte_stream* byte_stream,
                                   grpc_slice* slice) {
-  return byte_stream->vtable->pull(exec_ctx, byte_stream, slice);
+  return byte_stream->vtable->pull(byte_stream, slice);
 }
 
-void grpc_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
-                               grpc_byte_stream* byte_stream,
+void grpc_byte_stream_shutdown(grpc_byte_stream* byte_stream,
                                grpc_error* error) {
-  byte_stream->vtable->shutdown(exec_ctx, byte_stream, error);
+  byte_stream->vtable->shutdown(byte_stream, error);
 }
 
-void grpc_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
-                              grpc_byte_stream* byte_stream) {
-  byte_stream->vtable->destroy(exec_ctx, byte_stream);
+void grpc_byte_stream_destroy(grpc_byte_stream* byte_stream) {
+  byte_stream->vtable->destroy(byte_stream);
 }
 
 // grpc_slice_buffer_stream
 
-static bool slice_buffer_stream_next(grpc_exec_ctx* exec_ctx,
-                                     grpc_byte_stream* byte_stream,
+static bool slice_buffer_stream_next(grpc_byte_stream* byte_stream,
                                      size_t max_size_hint,
                                      grpc_closure* on_complete) {
   grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
@@ -60,8 +54,7 @@
   return true;
 }
 
-static grpc_error* slice_buffer_stream_pull(grpc_exec_ctx* exec_ctx,
-                                            grpc_byte_stream* byte_stream,
+static grpc_error* slice_buffer_stream_pull(grpc_byte_stream* byte_stream,
                                             grpc_slice* slice) {
   grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
   if (stream->shutdown_error != GRPC_ERROR_NONE) {
@@ -74,18 +67,16 @@
   return GRPC_ERROR_NONE;
 }
 
-static void slice_buffer_stream_shutdown(grpc_exec_ctx* exec_ctx,
-                                         grpc_byte_stream* byte_stream,
+static void slice_buffer_stream_shutdown(grpc_byte_stream* byte_stream,
                                          grpc_error* error) {
   grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
   GRPC_ERROR_UNREF(stream->shutdown_error);
   stream->shutdown_error = error;
 }
 
-static void slice_buffer_stream_destroy(grpc_exec_ctx* exec_ctx,
-                                        grpc_byte_stream* byte_stream) {
+static void slice_buffer_stream_destroy(grpc_byte_stream* byte_stream) {
   grpc_slice_buffer_stream* stream = (grpc_slice_buffer_stream*)byte_stream;
-  grpc_slice_buffer_reset_and_unref_internal(exec_ctx, stream->backing_buffer);
+  grpc_slice_buffer_reset_and_unref_internal(stream->backing_buffer);
   GRPC_ERROR_UNREF(stream->shutdown_error);
 }
 
@@ -113,25 +104,22 @@
   grpc_slice_buffer_init(&cache->cache_buffer);
 }
 
-void grpc_byte_stream_cache_destroy(grpc_exec_ctx* exec_ctx,
-                                    grpc_byte_stream_cache* cache) {
-  grpc_byte_stream_destroy(exec_ctx, cache->underlying_stream);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &cache->cache_buffer);
+void grpc_byte_stream_cache_destroy(grpc_byte_stream_cache* cache) {
+  grpc_byte_stream_destroy(cache->underlying_stream);
+  grpc_slice_buffer_destroy_internal(&cache->cache_buffer);
 }
 
-static bool caching_byte_stream_next(grpc_exec_ctx* exec_ctx,
-                                     grpc_byte_stream* byte_stream,
+static bool caching_byte_stream_next(grpc_byte_stream* byte_stream,
                                      size_t max_size_hint,
                                      grpc_closure* on_complete) {
   grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
   if (stream->shutdown_error != GRPC_ERROR_NONE) return true;
   if (stream->cursor < stream->cache->cache_buffer.count) return true;
-  return grpc_byte_stream_next(exec_ctx, stream->cache->underlying_stream,
-                               max_size_hint, on_complete);
+  return grpc_byte_stream_next(stream->cache->underlying_stream, max_size_hint,
+                               on_complete);
 }
 
-static grpc_error* caching_byte_stream_pull(grpc_exec_ctx* exec_ctx,
-                                            grpc_byte_stream* byte_stream,
+static grpc_error* caching_byte_stream_pull(grpc_byte_stream* byte_stream,
                                             grpc_slice* slice) {
   grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
   if (stream->shutdown_error != GRPC_ERROR_NONE) {
@@ -144,7 +132,7 @@
     return GRPC_ERROR_NONE;
   }
   grpc_error* error =
-      grpc_byte_stream_pull(exec_ctx, stream->cache->underlying_stream, slice);
+      grpc_byte_stream_pull(stream->cache->underlying_stream, slice);
   if (error == GRPC_ERROR_NONE) {
     ++stream->cursor;
     grpc_slice_buffer_add(&stream->cache->cache_buffer,
@@ -153,17 +141,15 @@
   return error;
 }
 
-static void caching_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
-                                         grpc_byte_stream* byte_stream,
+static void caching_byte_stream_shutdown(grpc_byte_stream* byte_stream,
                                          grpc_error* error) {
   grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
   GRPC_ERROR_UNREF(stream->shutdown_error);
   stream->shutdown_error = GRPC_ERROR_REF(error);
-  grpc_byte_stream_shutdown(exec_ctx, stream->cache->underlying_stream, error);
+  grpc_byte_stream_shutdown(stream->cache->underlying_stream, error);
 }
 
-static void caching_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
-                                        grpc_byte_stream* byte_stream) {
+static void caching_byte_stream_destroy(grpc_byte_stream* byte_stream) {
   grpc_caching_byte_stream* stream = (grpc_caching_byte_stream*)byte_stream;
   GRPC_ERROR_UNREF(stream->shutdown_error);
 }
diff --git a/src/core/lib/transport/byte_stream.h b/src/core/lib/transport/byte_stream.h
index 54ad4b9..52c7a07 100644
--- a/src/core/lib/transport/byte_stream.h
+++ b/src/core/lib/transport/byte_stream.h
@@ -28,20 +28,14 @@
 /** Mask of all valid internal flags. */
 #define GRPC_WRITE_INTERNAL_USED_MASK (GRPC_WRITE_INTERNAL_COMPRESS)
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_byte_stream grpc_byte_stream;
 
 typedef struct {
-  bool (*next)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream,
-               size_t max_size_hint, grpc_closure* on_complete);
-  grpc_error* (*pull)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream,
-                      grpc_slice* slice);
-  void (*shutdown)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream,
-                   grpc_error* error);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_byte_stream* byte_stream);
+  bool (*next)(grpc_byte_stream* byte_stream, size_t max_size_hint,
+               grpc_closure* on_complete);
+  grpc_error* (*pull)(grpc_byte_stream* byte_stream, grpc_slice* slice);
+  void (*shutdown)(grpc_byte_stream* byte_stream, grpc_error* error);
+  void (*destroy)(grpc_byte_stream* byte_stream);
 } grpc_byte_stream_vtable;
 
 struct grpc_byte_stream {
@@ -56,8 +50,7 @@
 //
 // max_size_hint can be set as a hint as to the maximum number
 // of bytes that would be acceptable to read.
-bool grpc_byte_stream_next(grpc_exec_ctx* exec_ctx,
-                           grpc_byte_stream* byte_stream, size_t max_size_hint,
+bool grpc_byte_stream_next(grpc_byte_stream* byte_stream, size_t max_size_hint,
                            grpc_closure* on_complete);
 
 // Returns the next slice in the byte stream when it is ready (indicated by
@@ -65,8 +58,7 @@
 // grpc_byte_stream_next is called).
 //
 // Once a slice is returned into *slice, it is owned by the caller.
-grpc_error* grpc_byte_stream_pull(grpc_exec_ctx* exec_ctx,
-                                  grpc_byte_stream* byte_stream,
+grpc_error* grpc_byte_stream_pull(grpc_byte_stream* byte_stream,
                                   grpc_slice* slice);
 
 // Shuts down the byte stream.
@@ -76,12 +68,10 @@
 //
 // The next call to grpc_byte_stream_pull() (if any) will return the error
 // passed to grpc_byte_stream_shutdown().
-void grpc_byte_stream_shutdown(grpc_exec_ctx* exec_ctx,
-                               grpc_byte_stream* byte_stream,
+void grpc_byte_stream_shutdown(grpc_byte_stream* byte_stream,
                                grpc_error* error);
 
-void grpc_byte_stream_destroy(grpc_exec_ctx* exec_ctx,
-                              grpc_byte_stream* byte_stream);
+void grpc_byte_stream_destroy(grpc_byte_stream* byte_stream);
 
 // grpc_slice_buffer_stream
 //
@@ -123,8 +113,7 @@
                                  grpc_byte_stream* underlying_stream);
 
 // Must not be called while still in use by a grpc_caching_byte_stream.
-void grpc_byte_stream_cache_destroy(grpc_exec_ctx* exec_ctx,
-                                    grpc_byte_stream_cache* cache);
+void grpc_byte_stream_cache_destroy(grpc_byte_stream_cache* cache);
 
 typedef struct {
   grpc_byte_stream base;
@@ -139,8 +128,4 @@
 // Resets the byte stream to the start of the underlying stream.
 void grpc_caching_byte_stream_reset(grpc_caching_byte_stream* stream);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_BYTE_STREAM_H */
diff --git a/src/core/lib/transport/connectivity_state.cc b/src/core/lib/transport/connectivity_state.cc
index e7e5dbd..c42cc9c 100644
--- a/src/core/lib/transport/connectivity_state.cc
+++ b/src/core/lib/transport/connectivity_state.cc
@@ -51,8 +51,7 @@
   tracker->name = gpr_strdup(name);
 }
 
-void grpc_connectivity_state_destroy(grpc_exec_ctx* exec_ctx,
-                                     grpc_connectivity_state_tracker* tracker) {
+void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker* tracker) {
   grpc_error* error;
   grpc_connectivity_state_watcher* w;
   while ((w = tracker->watchers)) {
@@ -65,7 +64,7 @@
       error =
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Shutdown connectivity owner");
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, w->notify, error);
+    GRPC_CLOSURE_SCHED(w->notify, error);
     gpr_free(w);
   }
   GRPC_ERROR_UNREF(tracker->current_error);
@@ -105,8 +104,8 @@
 }
 
 bool grpc_connectivity_state_notify_on_state_change(
-    grpc_exec_ctx* exec_ctx, grpc_connectivity_state_tracker* tracker,
-    grpc_connectivity_state* current, grpc_closure* notify) {
+    grpc_connectivity_state_tracker* tracker, grpc_connectivity_state* current,
+    grpc_closure* notify) {
   grpc_connectivity_state cur =
       (grpc_connectivity_state)gpr_atm_no_barrier_load(
           &tracker->current_state_atm);
@@ -123,7 +122,7 @@
   if (current == nullptr) {
     grpc_connectivity_state_watcher* w = tracker->watchers;
     if (w != nullptr && w->notify == notify) {
-      GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
+      GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_CANCELLED);
       tracker->watchers = w->next;
       gpr_free(w);
       return false;
@@ -131,7 +130,7 @@
     while (w != nullptr) {
       grpc_connectivity_state_watcher* rm_candidate = w->next;
       if (rm_candidate != nullptr && rm_candidate->notify == notify) {
-        GRPC_CLOSURE_SCHED(exec_ctx, notify, GRPC_ERROR_CANCELLED);
+        GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_CANCELLED);
         w->next = w->next->next;
         gpr_free(rm_candidate);
         return false;
@@ -142,8 +141,7 @@
   } else {
     if (cur != *current) {
       *current = cur;
-      GRPC_CLOSURE_SCHED(exec_ctx, notify,
-                         GRPC_ERROR_REF(tracker->current_error));
+      GRPC_CLOSURE_SCHED(notify, GRPC_ERROR_REF(tracker->current_error));
     } else {
       grpc_connectivity_state_watcher* w =
           (grpc_connectivity_state_watcher*)gpr_malloc(sizeof(*w));
@@ -156,8 +154,7 @@
   }
 }
 
-void grpc_connectivity_state_set(grpc_exec_ctx* exec_ctx,
-                                 grpc_connectivity_state_tracker* tracker,
+void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
                                  grpc_connectivity_state state,
                                  grpc_error* error, const char* reason) {
   grpc_connectivity_state cur =
@@ -195,8 +192,7 @@
       gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
               w->notify);
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, w->notify,
-                       GRPC_ERROR_REF(tracker->current_error));
+    GRPC_CLOSURE_SCHED(w->notify, GRPC_ERROR_REF(tracker->current_error));
     gpr_free(w);
   }
 }
diff --git a/src/core/lib/transport/connectivity_state.h b/src/core/lib/transport/connectivity_state.h
index 60d20dd..c3a50f3 100644
--- a/src/core/lib/transport/connectivity_state.h
+++ b/src/core/lib/transport/connectivity_state.h
@@ -23,10 +23,6 @@
 #include "src/core/lib/debug/trace.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_connectivity_state_watcher {
   /** we keep watchers in a linked list */
   struct grpc_connectivity_state_watcher* next;
@@ -55,13 +51,11 @@
 void grpc_connectivity_state_init(grpc_connectivity_state_tracker* tracker,
                                   grpc_connectivity_state init_state,
                                   const char* name);
-void grpc_connectivity_state_destroy(grpc_exec_ctx* exec_ctx,
-                                     grpc_connectivity_state_tracker* tracker);
+void grpc_connectivity_state_destroy(grpc_connectivity_state_tracker* tracker);
 
 /** Set connectivity state; not thread safe; access must be serialized with an
  *  external lock */
-void grpc_connectivity_state_set(grpc_exec_ctx* exec_ctx,
-                                 grpc_connectivity_state_tracker* tracker,
+void grpc_connectivity_state_set(grpc_connectivity_state_tracker* tracker,
                                  grpc_connectivity_state state,
                                  grpc_error* associated_error,
                                  const char* reason);
@@ -85,11 +79,7 @@
     case).
     Access must be serialized with an external lock. */
 bool grpc_connectivity_state_notify_on_state_change(
-    grpc_exec_ctx* exec_ctx, grpc_connectivity_state_tracker* tracker,
-    grpc_connectivity_state* current, grpc_closure* notify);
-
-#ifdef __cplusplus
-}
-#endif
+    grpc_connectivity_state_tracker* tracker, grpc_connectivity_state* current,
+    grpc_closure* notify);
 
 #endif /* GRPC_CORE_LIB_TRANSPORT_CONNECTIVITY_STATE_H */
diff --git a/src/core/lib/transport/error_utils.cc b/src/core/lib/transport/error_utils.cc
index 69c8ae6..ffaf327 100644
--- a/src/core/lib/transport/error_utils.cc
+++ b/src/core/lib/transport/error_utils.cc
@@ -40,9 +40,9 @@
   return nullptr;
 }
 
-void grpc_error_get_status(grpc_exec_ctx* exec_ctx, grpc_error* error,
-                           grpc_millis deadline, grpc_status_code* code,
-                           grpc_slice* slice, grpc_http2_error_code* http_error,
+void grpc_error_get_status(grpc_error* error, grpc_millis deadline,
+                           grpc_status_code* code, grpc_slice* slice,
+                           grpc_http2_error_code* http_error,
                            const char** error_string) {
   // Start with the parent error and recurse through the tree of children
   // until we find the first one that has a status code.
@@ -65,8 +65,8 @@
     status = (grpc_status_code)integer;
   } else if (grpc_error_get_int(found_error, GRPC_ERROR_INT_HTTP2_ERROR,
                                 &integer)) {
-    status = grpc_http2_error_to_grpc_status(
-        exec_ctx, (grpc_http2_error_code)integer, deadline);
+    status = grpc_http2_error_to_grpc_status((grpc_http2_error_code)integer,
+                                             deadline);
   }
   if (code != nullptr) *code = status;
 
diff --git a/src/core/lib/transport/error_utils.h b/src/core/lib/transport/error_utils.h
index 6f21f48..4100f65 100644
--- a/src/core/lib/transport/error_utils.h
+++ b/src/core/lib/transport/error_utils.h
@@ -23,10 +23,6 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/transport/http2_errors.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /// A utility function to get the status code and message to be returned
 /// to the application.  If not set in the top-level message, looks
 /// through child errors until it finds the first one with these attributes.
@@ -34,9 +30,8 @@
 /// be populated with the entire error string. If any of the attributes (code,
 /// msg, http_status, error_string) are unneeded, they can be passed as
 /// NULL.
-void grpc_error_get_status(grpc_exec_ctx* exec_ctx, grpc_error* error,
-                           grpc_millis deadline, grpc_status_code* code,
-                           grpc_slice* slice,
+void grpc_error_get_status(grpc_error* error, grpc_millis deadline,
+                           grpc_status_code* code, grpc_slice* slice,
                            grpc_http2_error_code* http_status,
                            const char** error_string);
 
@@ -46,8 +41,4 @@
 /// GRPC_ERROR_CANCELLED
 bool grpc_error_has_clear_grpc_status(grpc_error* error);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_ERROR_UTILS_H */
diff --git a/src/core/lib/transport/metadata.cc b/src/core/lib/transport/metadata.cc
index 0f30c75..5f0673e 100644
--- a/src/core/lib/transport/metadata.cc
+++ b/src/core/lib/transport/metadata.cc
@@ -108,7 +108,7 @@
 
 static mdtab_shard g_shards[SHARD_COUNT];
 
-static void gc_mdtab(grpc_exec_ctx* exec_ctx, mdtab_shard* shard);
+static void gc_mdtab(mdtab_shard* shard);
 
 void grpc_mdctx_global_init(void) {
   /* initialize shards */
@@ -123,11 +123,11 @@
   }
 }
 
-void grpc_mdctx_global_shutdown(grpc_exec_ctx* exec_ctx) {
+void grpc_mdctx_global_shutdown() {
   for (size_t i = 0; i < SHARD_COUNT; i++) {
     mdtab_shard* shard = &g_shards[i];
     gpr_mu_destroy(&shard->mu);
-    gc_mdtab(exec_ctx, shard);
+    gc_mdtab(shard);
     /* TODO(ctiller): GPR_ASSERT(shard->count == 0); */
     if (shard->count != 0) {
       gpr_log(GPR_DEBUG, "WARNING: %" PRIuPTR " metadata elements were leaked",
@@ -165,7 +165,7 @@
   }
 }
 
-static void gc_mdtab(grpc_exec_ctx* exec_ctx, mdtab_shard* shard) {
+static void gc_mdtab(mdtab_shard* shard) {
   size_t i;
   interned_metadata** prev_next;
   interned_metadata *md, *next;
@@ -178,8 +178,8 @@
       void* user_data = (void*)gpr_atm_no_barrier_load(&md->user_data);
       next = md->bucket_next;
       if (gpr_atm_acq_load(&md->refcnt) == 0) {
-        grpc_slice_unref_internal(exec_ctx, md->key);
-        grpc_slice_unref_internal(exec_ctx, md->value);
+        grpc_slice_unref_internal(md->key);
+        grpc_slice_unref_internal(md->value);
         if (md->user_data) {
           ((destroy_user_data_func)gpr_atm_no_barrier_load(
               &md->destroy_user_data))(user_data);
@@ -228,17 +228,17 @@
   GPR_TIMER_END("grow_mdtab", 0);
 }
 
-static void rehash_mdtab(grpc_exec_ctx* exec_ctx, mdtab_shard* shard) {
+static void rehash_mdtab(mdtab_shard* shard) {
   if (gpr_atm_no_barrier_load(&shard->free_estimate) >
       (gpr_atm)(shard->capacity / 4)) {
-    gc_mdtab(exec_ctx, shard);
+    gc_mdtab(shard);
   } else {
     grow_mdtab(shard);
   }
 }
 
 grpc_mdelem grpc_mdelem_create(
-    grpc_exec_ctx* exec_ctx, grpc_slice key, grpc_slice value,
+    grpc_slice key, grpc_slice value,
     grpc_mdelem_data* compatible_external_backing_store) {
   if (!grpc_slice_is_interned(key) || !grpc_slice_is_interned(value)) {
     if (compatible_external_backing_store != nullptr) {
@@ -318,7 +318,7 @@
   shard->count++;
 
   if (shard->count > shard->capacity * 2) {
-    rehash_mdtab(exec_ctx, shard);
+    rehash_mdtab(shard);
   }
 
   gpr_mu_unlock(&shard->mu);
@@ -328,22 +328,20 @@
   return GRPC_MAKE_MDELEM(md, GRPC_MDELEM_STORAGE_INTERNED);
 }
 
-grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx* exec_ctx, grpc_slice key,
-                                    grpc_slice value) {
-  grpc_mdelem out = grpc_mdelem_create(exec_ctx, key, value, nullptr);
-  grpc_slice_unref_internal(exec_ctx, key);
-  grpc_slice_unref_internal(exec_ctx, value);
+grpc_mdelem grpc_mdelem_from_slices(grpc_slice key, grpc_slice value) {
+  grpc_mdelem out = grpc_mdelem_create(key, value, nullptr);
+  grpc_slice_unref_internal(key);
+  grpc_slice_unref_internal(value);
   return out;
 }
 
-grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx* exec_ctx,
-                                           grpc_metadata* metadata) {
+grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata) {
   bool changed = false;
   grpc_slice key_slice =
       grpc_slice_maybe_static_intern(metadata->key, &changed);
   grpc_slice value_slice =
       grpc_slice_maybe_static_intern(metadata->value, &changed);
-  return grpc_mdelem_create(exec_ctx, key_slice, value_slice,
+  return grpc_mdelem_create(key_slice, value_slice,
                             changed ? nullptr : (grpc_mdelem_data*)metadata);
 }
 
@@ -417,7 +415,7 @@
   return gmd;
 }
 
-void grpc_mdelem_unref(grpc_exec_ctx* exec_ctx, grpc_mdelem gmd DEBUG_ARGS) {
+void grpc_mdelem_unref(grpc_mdelem gmd DEBUG_ARGS) {
   switch (GRPC_MDELEM_STORAGE(gmd)) {
     case GRPC_MDELEM_STORAGE_EXTERNAL:
     case GRPC_MDELEM_STORAGE_STATIC:
@@ -465,8 +463,8 @@
       const gpr_atm prev_refcount = gpr_atm_full_fetch_add(&md->refcnt, -1);
       GPR_ASSERT(prev_refcount >= 1);
       if (1 == prev_refcount) {
-        grpc_slice_unref_internal(exec_ctx, md->key);
-        grpc_slice_unref_internal(exec_ctx, md->value);
+        grpc_slice_unref_internal(md->key);
+        grpc_slice_unref_internal(md->value);
         gpr_free(md);
       }
       break;
diff --git a/src/core/lib/transport/metadata.h b/src/core/lib/transport/metadata.h
index 931ba0b..78e6bef 100644
--- a/src/core/lib/transport/metadata.h
+++ b/src/core/lib/transport/metadata.h
@@ -27,10 +27,6 @@
 
 extern grpc_core::DebugOnlyTraceFlag grpc_trace_metadata;
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* This file provides a mechanism for tracking metadata through the grpc stack.
    It's not intended for consumption outside of the library.
 
@@ -111,20 +107,18 @@
                               (uintptr_t)GRPC_MDELEM_STORAGE_INTERNED_BIT))
 
 /* Unrefs the slices. */
-grpc_mdelem grpc_mdelem_from_slices(grpc_exec_ctx* exec_ctx, grpc_slice key,
-                                    grpc_slice value);
+grpc_mdelem grpc_mdelem_from_slices(grpc_slice key, grpc_slice value);
 
 /* Cheaply convert a grpc_metadata to a grpc_mdelem; may use the grpc_metadata
    object as backing storage (so lifetimes should align) */
-grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_exec_ctx* exec_ctx,
-                                           grpc_metadata* metadata);
+grpc_mdelem grpc_mdelem_from_grpc_metadata(grpc_metadata* metadata);
 
 /* Does not unref the slices; if a new non-interned mdelem is needed, allocates
    one if compatible_external_backing_store is NULL, or uses
    compatible_external_backing_store if it is non-NULL (in which case it's the
    users responsibility to ensure that it outlives usage) */
 grpc_mdelem grpc_mdelem_create(
-    grpc_exec_ctx* exec_ctx, grpc_slice key, grpc_slice value,
+    grpc_slice key, grpc_slice value,
     grpc_mdelem_data* compatible_external_backing_store);
 
 bool grpc_mdelem_eq(grpc_mdelem a, grpc_mdelem b);
@@ -140,16 +134,14 @@
 
 #ifndef NDEBUG
 #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s), __FILE__, __LINE__)
-#define GRPC_MDELEM_UNREF(exec_ctx, s) \
-  grpc_mdelem_unref((exec_ctx), (s), __FILE__, __LINE__)
+#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s), __FILE__, __LINE__)
 grpc_mdelem grpc_mdelem_ref(grpc_mdelem md, const char* file, int line);
-void grpc_mdelem_unref(grpc_exec_ctx* exec_ctx, grpc_mdelem md,
-                       const char* file, int line);
+void grpc_mdelem_unref(grpc_mdelem md, const char* file, int line);
 #else
 #define GRPC_MDELEM_REF(s) grpc_mdelem_ref((s))
-#define GRPC_MDELEM_UNREF(exec_ctx, s) grpc_mdelem_unref((exec_ctx), (s))
+#define GRPC_MDELEM_UNREF(s) grpc_mdelem_unref((s))
 grpc_mdelem grpc_mdelem_ref(grpc_mdelem md);
-void grpc_mdelem_unref(grpc_exec_ctx* exec_ctx, grpc_mdelem md);
+void grpc_mdelem_unref(grpc_mdelem md);
 #endif
 
 #define GRPC_MDKEY(md) (GRPC_MDELEM_DATA(md)->key)
@@ -166,10 +158,6 @@
 #define GRPC_MDSTR_KV_HASH(k_hash, v_hash) (GPR_ROTL((k_hash), 2) ^ (v_hash))
 
 void grpc_mdctx_global_init(void);
-void grpc_mdctx_global_shutdown(grpc_exec_ctx* exec_ctx);
-
-#ifdef __cplusplus
-}
-#endif
+void grpc_mdctx_global_shutdown();
 
 #endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_H */
diff --git a/src/core/lib/transport/metadata_batch.cc b/src/core/lib/transport/metadata_batch.cc
index 5817765..9c95339 100644
--- a/src/core/lib/transport/metadata_batch.cc
+++ b/src/core/lib/transport/metadata_batch.cc
@@ -51,8 +51,7 @@
 #endif /* NDEBUG */
 }
 
-static void assert_valid_callouts(grpc_exec_ctx* exec_ctx,
-                                  grpc_metadata_batch* batch) {
+static void assert_valid_callouts(grpc_metadata_batch* batch) {
 #ifndef NDEBUG
   for (grpc_linked_mdelem* l = batch->list.head; l != nullptr; l = l->next) {
     grpc_slice key_interned = grpc_slice_intern(GRPC_MDKEY(l->md));
@@ -61,7 +60,7 @@
     if (callout_idx != GRPC_BATCH_CALLOUTS_COUNT) {
       GPR_ASSERT(batch->idx.array[callout_idx] == l);
     }
-    grpc_slice_unref_internal(exec_ctx, key_interned);
+    grpc_slice_unref_internal(key_interned);
   }
 #endif
 }
@@ -77,11 +76,10 @@
   batch->deadline = GRPC_MILLIS_INF_FUTURE;
 }
 
-void grpc_metadata_batch_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_metadata_batch* batch) {
+void grpc_metadata_batch_destroy(grpc_metadata_batch* batch) {
   grpc_linked_mdelem* l;
   for (l = batch->list.head; l; l = l->next) {
-    GRPC_MDELEM_UNREF(exec_ctx, l->md);
+    GRPC_MDELEM_UNREF(l->md);
   }
 }
 
@@ -126,13 +124,12 @@
   batch->idx.array[idx] = nullptr;
 }
 
-grpc_error* grpc_metadata_batch_add_head(grpc_exec_ctx* exec_ctx,
-                                         grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_add_head(grpc_metadata_batch* batch,
                                          grpc_linked_mdelem* storage,
                                          grpc_mdelem elem_to_add) {
   GPR_ASSERT(!GRPC_MDISNULL(elem_to_add));
   storage->md = elem_to_add;
-  return grpc_metadata_batch_link_head(exec_ctx, batch, storage);
+  return grpc_metadata_batch_link_head(batch, storage);
 }
 
 static void link_head(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
@@ -150,27 +147,25 @@
   assert_valid_list(list);
 }
 
-grpc_error* grpc_metadata_batch_link_head(grpc_exec_ctx* exec_ctx,
-                                          grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_link_head(grpc_metadata_batch* batch,
                                           grpc_linked_mdelem* storage) {
-  assert_valid_callouts(exec_ctx, batch);
+  assert_valid_callouts(batch);
   grpc_error* err = maybe_link_callout(batch, storage);
   if (err != GRPC_ERROR_NONE) {
-    assert_valid_callouts(exec_ctx, batch);
+    assert_valid_callouts(batch);
     return err;
   }
   link_head(&batch->list, storage);
-  assert_valid_callouts(exec_ctx, batch);
+  assert_valid_callouts(batch);
   return GRPC_ERROR_NONE;
 }
 
-grpc_error* grpc_metadata_batch_add_tail(grpc_exec_ctx* exec_ctx,
-                                         grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_add_tail(grpc_metadata_batch* batch,
                                          grpc_linked_mdelem* storage,
                                          grpc_mdelem elem_to_add) {
   GPR_ASSERT(!GRPC_MDISNULL(elem_to_add));
   storage->md = elem_to_add;
-  return grpc_metadata_batch_link_tail(exec_ctx, batch, storage);
+  return grpc_metadata_batch_link_tail(batch, storage);
 }
 
 static void link_tail(grpc_mdelem_list* list, grpc_linked_mdelem* storage) {
@@ -189,17 +184,16 @@
   assert_valid_list(list);
 }
 
-grpc_error* grpc_metadata_batch_link_tail(grpc_exec_ctx* exec_ctx,
-                                          grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_link_tail(grpc_metadata_batch* batch,
                                           grpc_linked_mdelem* storage) {
-  assert_valid_callouts(exec_ctx, batch);
+  assert_valid_callouts(batch);
   grpc_error* err = maybe_link_callout(batch, storage);
   if (err != GRPC_ERROR_NONE) {
-    assert_valid_callouts(exec_ctx, batch);
+    assert_valid_callouts(batch);
     return err;
   }
   link_tail(&batch->list, storage);
-  assert_valid_callouts(exec_ctx, batch);
+  assert_valid_callouts(batch);
   return GRPC_ERROR_NONE;
 }
 
@@ -220,31 +214,28 @@
   assert_valid_list(list);
 }
 
-void grpc_metadata_batch_remove(grpc_exec_ctx* exec_ctx,
-                                grpc_metadata_batch* batch,
+void grpc_metadata_batch_remove(grpc_metadata_batch* batch,
                                 grpc_linked_mdelem* storage) {
-  assert_valid_callouts(exec_ctx, batch);
+  assert_valid_callouts(batch);
   maybe_unlink_callout(batch, storage);
   unlink_storage(&batch->list, storage);
-  GRPC_MDELEM_UNREF(exec_ctx, storage->md);
-  assert_valid_callouts(exec_ctx, batch);
+  GRPC_MDELEM_UNREF(storage->md);
+  assert_valid_callouts(batch);
 }
 
-void grpc_metadata_batch_set_value(grpc_exec_ctx* exec_ctx,
-                                   grpc_linked_mdelem* storage,
+void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
                                    grpc_slice value) {
   grpc_mdelem old_mdelem = storage->md;
   grpc_mdelem new_mdelem = grpc_mdelem_from_slices(
-      exec_ctx, grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
+      grpc_slice_ref_internal(GRPC_MDKEY(old_mdelem)), value);
   storage->md = new_mdelem;
-  GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
+  GRPC_MDELEM_UNREF(old_mdelem);
 }
 
-grpc_error* grpc_metadata_batch_substitute(grpc_exec_ctx* exec_ctx,
-                                           grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch,
                                            grpc_linked_mdelem* storage,
                                            grpc_mdelem new_mdelem) {
-  assert_valid_callouts(exec_ctx, batch);
+  assert_valid_callouts(batch);
   grpc_error* error = GRPC_ERROR_NONE;
   grpc_mdelem old_mdelem = storage->md;
   if (!grpc_slice_eq(GRPC_MDKEY(new_mdelem), GRPC_MDKEY(old_mdelem))) {
@@ -253,19 +244,18 @@
     error = maybe_link_callout(batch, storage);
     if (error != GRPC_ERROR_NONE) {
       unlink_storage(&batch->list, storage);
-      GRPC_MDELEM_UNREF(exec_ctx, storage->md);
+      GRPC_MDELEM_UNREF(storage->md);
     }
   } else {
     storage->md = new_mdelem;
   }
-  GRPC_MDELEM_UNREF(exec_ctx, old_mdelem);
-  assert_valid_callouts(exec_ctx, batch);
+  GRPC_MDELEM_UNREF(old_mdelem);
+  assert_valid_callouts(batch);
   return error;
 }
 
-void grpc_metadata_batch_clear(grpc_exec_ctx* exec_ctx,
-                               grpc_metadata_batch* batch) {
-  grpc_metadata_batch_destroy(exec_ctx, batch);
+void grpc_metadata_batch_clear(grpc_metadata_batch* batch) {
+  grpc_metadata_batch_destroy(batch);
   grpc_metadata_batch_init(batch);
 }
 
@@ -292,8 +282,7 @@
   *composite = grpc_error_add_child(*composite, error);
 }
 
-grpc_error* grpc_metadata_batch_filter(grpc_exec_ctx* exec_ctx,
-                                       grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_filter(grpc_metadata_batch* batch,
                                        grpc_metadata_batch_filter_func func,
                                        void* user_data,
                                        const char* composite_error_string) {
@@ -301,12 +290,12 @@
   grpc_error* error = GRPC_ERROR_NONE;
   while (l) {
     grpc_linked_mdelem* next = l->next;
-    grpc_filtered_mdelem new_mdelem = func(exec_ctx, user_data, l->md);
+    grpc_filtered_mdelem new_mdelem = func(user_data, l->md);
     add_error(&error, new_mdelem.error, composite_error_string);
     if (GRPC_MDISNULL(new_mdelem.md)) {
-      grpc_metadata_batch_remove(exec_ctx, batch, l);
+      grpc_metadata_batch_remove(batch, l);
     } else if (new_mdelem.md.payload != l->md.payload) {
-      grpc_metadata_batch_substitute(exec_ctx, batch, l, new_mdelem.md);
+      grpc_metadata_batch_substitute(batch, l, new_mdelem.md);
     }
     l = next;
   }
diff --git a/src/core/lib/transport/metadata_batch.h b/src/core/lib/transport/metadata_batch.h
index 7d17393..8353a42 100644
--- a/src/core/lib/transport/metadata_batch.h
+++ b/src/core/lib/transport/metadata_batch.h
@@ -28,10 +28,6 @@
 #include "src/core/lib/transport/metadata.h"
 #include "src/core/lib/transport/static_metadata.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_linked_mdelem {
   grpc_mdelem md;
   struct grpc_linked_mdelem* next;
@@ -57,28 +53,23 @@
 } grpc_metadata_batch;
 
 void grpc_metadata_batch_init(grpc_metadata_batch* batch);
-void grpc_metadata_batch_destroy(grpc_exec_ctx* exec_ctx,
-                                 grpc_metadata_batch* batch);
-void grpc_metadata_batch_clear(grpc_exec_ctx* exec_ctx,
-                               grpc_metadata_batch* batch);
+void grpc_metadata_batch_destroy(grpc_metadata_batch* batch);
+void grpc_metadata_batch_clear(grpc_metadata_batch* batch);
 bool grpc_metadata_batch_is_empty(grpc_metadata_batch* batch);
 
 /* Returns the transport size of the batch. */
 size_t grpc_metadata_batch_size(grpc_metadata_batch* batch);
 
 /** Remove \a storage from the batch, unreffing the mdelem contained */
-void grpc_metadata_batch_remove(grpc_exec_ctx* exec_ctx,
-                                grpc_metadata_batch* batch,
+void grpc_metadata_batch_remove(grpc_metadata_batch* batch,
                                 grpc_linked_mdelem* storage);
 
 /** Substitute a new mdelem for an old value */
-grpc_error* grpc_metadata_batch_substitute(grpc_exec_ctx* exec_ctx,
-                                           grpc_metadata_batch* batch,
+grpc_error* grpc_metadata_batch_substitute(grpc_metadata_batch* batch,
                                            grpc_linked_mdelem* storage,
                                            grpc_mdelem new_value);
 
-void grpc_metadata_batch_set_value(grpc_exec_ctx* exec_ctx,
-                                   grpc_linked_mdelem* storage,
+void grpc_metadata_batch_set_value(grpc_linked_mdelem* storage,
                                    grpc_slice value);
 
 /** Add \a storage to the beginning of \a batch. storage->md is
@@ -86,17 +77,17 @@
     \a storage is owned by the caller and must survive for the
     lifetime of batch. This usually means it should be around
     for the lifetime of the call. */
-grpc_error* grpc_metadata_batch_link_head(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
-    grpc_linked_mdelem* storage) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_metadata_batch_link_head(grpc_metadata_batch* batch,
+                                          grpc_linked_mdelem* storage)
+    GRPC_MUST_USE_RESULT;
 /** Add \a storage to the end of \a batch. storage->md is
     assumed to be valid.
     \a storage is owned by the caller and must survive for the
     lifetime of batch. This usually means it should be around
     for the lifetime of the call. */
-grpc_error* grpc_metadata_batch_link_tail(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
-    grpc_linked_mdelem* storage) GRPC_MUST_USE_RESULT;
+grpc_error* grpc_metadata_batch_link_tail(grpc_metadata_batch* batch,
+                                          grpc_linked_mdelem* storage)
+    GRPC_MUST_USE_RESULT;
 
 /** Add \a elem_to_add as the first element in \a batch, using
     \a storage as backing storage for the linked list element.
@@ -105,8 +96,8 @@
     for the lifetime of the call.
     Takes ownership of \a elem_to_add */
 grpc_error* grpc_metadata_batch_add_head(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
-    grpc_linked_mdelem* storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
+    grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
+    grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
 /** Add \a elem_to_add as the last element in \a batch, using
     \a storage as backing storage for the linked list element.
     \a storage is owned by the caller and must survive for the
@@ -114,8 +105,8 @@
     for the lifetime of the call.
     Takes ownership of \a elem_to_add */
 grpc_error* grpc_metadata_batch_add_tail(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
-    grpc_linked_mdelem* storage, grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
+    grpc_metadata_batch* batch, grpc_linked_mdelem* storage,
+    grpc_mdelem elem_to_add) GRPC_MUST_USE_RESULT;
 
 grpc_error* grpc_attach_md_to_error(grpc_error* src, grpc_mdelem md);
 
@@ -132,11 +123,10 @@
   { GRPC_ERROR_NONE, GRPC_MDNULL }
 
 typedef grpc_filtered_mdelem (*grpc_metadata_batch_filter_func)(
-    grpc_exec_ctx* exec_ctx, void* user_data, grpc_mdelem elem);
+    void* user_data, grpc_mdelem elem);
 grpc_error* grpc_metadata_batch_filter(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* batch,
-    grpc_metadata_batch_filter_func func, void* user_data,
-    const char* composite_error_string) GRPC_MUST_USE_RESULT;
+    grpc_metadata_batch* batch, grpc_metadata_batch_filter_func func,
+    void* user_data, const char* composite_error_string) GRPC_MUST_USE_RESULT;
 
 #ifndef NDEBUG
 void grpc_metadata_batch_assert_ok(grpc_metadata_batch* comd);
@@ -146,8 +136,4 @@
   } while (0)
 #endif
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_METADATA_BATCH_H */
diff --git a/src/core/lib/transport/service_config.cc b/src/core/lib/transport/service_config.cc
index adcec8c..cbafc33 100644
--- a/src/core/lib/transport/service_config.cc
+++ b/src/core/lib/transport/service_config.cc
@@ -152,10 +152,8 @@
 // each name found, incrementing \a idx for each entry added.
 // Returns false on error.
 static bool parse_json_method_config(
-    grpc_exec_ctx* exec_ctx, grpc_json* json,
-    void* (*create_value)(const grpc_json* method_config_json),
-    void* (*ref_value)(void* value),
-    void (*unref_value)(grpc_exec_ctx* exec_ctx, void* value),
+    grpc_json* json, void* (*create_value)(const grpc_json* method_config_json),
+    void* (*ref_value)(void* value), void (*unref_value)(void* value),
     grpc_slice_hash_table_entry* entries, size_t* idx) {
   // Construct value.
   void* method_config = create_value(json);
@@ -184,16 +182,15 @@
   }
   success = true;
 done:
-  unref_value(exec_ctx, method_config);
+  unref_value(method_config);
   gpr_strvec_destroy(&paths);
   return success;
 }
 
 grpc_slice_hash_table* grpc_service_config_create_method_config_table(
-    grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config,
+    const grpc_service_config* service_config,
     void* (*create_value)(const grpc_json* method_config_json),
-    void* (*ref_value)(void* value),
-    void (*unref_value)(grpc_exec_ctx* exec_ctx, void* value)) {
+    void* (*ref_value)(void* value), void (*unref_value)(void* value)) {
   const grpc_json* json = service_config->json_tree;
   // Traverse parsed JSON tree.
   if (json->type != GRPC_JSON_OBJECT || json->key != nullptr) return nullptr;
@@ -217,11 +214,11 @@
       size_t idx = 0;
       for (grpc_json* method = field->child; method != nullptr;
            method = method->next) {
-        if (!parse_json_method_config(exec_ctx, method, create_value, ref_value,
+        if (!parse_json_method_config(method, create_value, ref_value,
                                       unref_value, entries, &idx)) {
           for (size_t i = 0; i < idx; ++i) {
-            grpc_slice_unref_internal(exec_ctx, entries[i].key);
-            unref_value(exec_ctx, entries[i].value);
+            grpc_slice_unref_internal(entries[i].key);
+            unref_value(entries[i].value);
           }
           gpr_free(entries);
           return nullptr;
@@ -240,8 +237,7 @@
   return method_config_table;
 }
 
-void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
-                                   const grpc_slice_hash_table* table,
+void* grpc_method_config_table_get(const grpc_slice_hash_table* table,
                                    grpc_slice path) {
   void* value = grpc_slice_hash_table_get(table, path);
   // If we didn't find a match for the path, try looking for a wildcard
@@ -257,7 +253,7 @@
     grpc_slice wildcard_path = grpc_slice_from_copied_string(buf);
     gpr_free(buf);
     value = grpc_slice_hash_table_get(table, wildcard_path);
-    grpc_slice_unref_internal(exec_ctx, wildcard_path);
+    grpc_slice_unref_internal(wildcard_path);
     gpr_free(path_str);
   }
   return value;
diff --git a/src/core/lib/transport/service_config.h b/src/core/lib/transport/service_config.h
index 405d0f5..98554b9 100644
--- a/src/core/lib/transport/service_config.h
+++ b/src/core/lib/transport/service_config.h
@@ -22,10 +22,6 @@
 #include "src/core/lib/json/json.h"
 #include "src/core/lib/slice/slice_hash_table.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_service_config grpc_service_config;
 
 grpc_service_config* grpc_service_config_create(const char* json_string);
@@ -49,10 +45,9 @@
 /// \a ref_value() and \a unref_value() are used to ref and unref values.
 /// Returns NULL on error.
 grpc_slice_hash_table* grpc_service_config_create_method_config_table(
-    grpc_exec_ctx* exec_ctx, const grpc_service_config* service_config,
+    const grpc_service_config* service_config,
     void* (*create_value)(const grpc_json* method_config_json),
-    void* (*ref_value)(void* value),
-    void (*unref_value)(grpc_exec_ctx* exec_ctx, void* value));
+    void* (*ref_value)(void* value), void (*unref_value)(void* value));
 
 /// A helper function for looking up values in the table returned by
 /// \a grpc_service_config_create_method_config_table().
@@ -60,12 +55,7 @@
 /// the form "/service/method".
 /// Returns NULL if the method has no config.
 /// Caller does NOT own a reference to the result.
-void* grpc_method_config_table_get(grpc_exec_ctx* exec_ctx,
-                                   const grpc_slice_hash_table* table,
+void* grpc_method_config_table_get(const grpc_slice_hash_table* table,
                                    grpc_slice path);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_SERVICE_CONFIG_H */
diff --git a/src/core/lib/transport/static_metadata.cc b/src/core/lib/transport/static_metadata.cc
index 844724c..2213b30 100644
--- a/src/core/lib/transport/static_metadata.cc
+++ b/src/core/lib/transport/static_metadata.cc
@@ -104,7 +104,7 @@
     101, 44,  103, 122, 105, 112};
 
 static void static_ref(void* unused) {}
-static void static_unref(grpc_exec_ctx* exec_ctx, void* unused) {}
+static void static_unref(void* unused) {}
 static const grpc_slice_refcount_vtable static_sub_vtable = {
     static_ref, static_unref, grpc_slice_default_eq_impl,
     grpc_slice_default_hash_impl};
diff --git a/src/core/lib/transport/static_metadata.h b/src/core/lib/transport/static_metadata.h
index 8e73d5f..ce3a11b 100644
--- a/src/core/lib/transport/static_metadata.h
+++ b/src/core/lib/transport/static_metadata.h
@@ -27,10 +27,6 @@
 #ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H
 #define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 #include "src/core/lib/transport/metadata.h"
 
 #define GRPC_STATIC_MDSTR_COUNT 100
@@ -588,7 +584,5 @@
   (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table                                  \
                         [grpc_static_accept_stream_encoding_metadata[(algs)]], \
                     GRPC_MDELEM_STORAGE_STATIC))
-#ifdef __cplusplus
-}
-#endif
+
 #endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */
diff --git a/src/core/lib/transport/status_conversion.cc b/src/core/lib/transport/status_conversion.cc
index a0a5f1b..46cba42 100644
--- a/src/core/lib/transport/status_conversion.cc
+++ b/src/core/lib/transport/status_conversion.cc
@@ -37,8 +37,7 @@
   }
 }
 
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx* exec_ctx,
-                                                 grpc_http2_error_code error,
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
                                                  grpc_millis deadline) {
   switch (error) {
     case GRPC_HTTP2_NO_ERROR:
@@ -47,7 +46,7 @@
     case GRPC_HTTP2_CANCEL:
       /* http2 cancel translates to STATUS_CANCELLED iff deadline hasn't been
        * exceeded */
-      return grpc_exec_ctx_now(exec_ctx) > deadline
+      return grpc_core::ExecCtx::Get()->Now() > deadline
                  ? GRPC_STATUS_DEADLINE_EXCEEDED
                  : GRPC_STATUS_CANCELLED;
     case GRPC_HTTP2_ENHANCE_YOUR_CALM:
diff --git a/src/core/lib/transport/status_conversion.h b/src/core/lib/transport/status_conversion.h
index b6fcebd..107eb92 100644
--- a/src/core/lib/transport/status_conversion.h
+++ b/src/core/lib/transport/status_conversion.h
@@ -23,22 +23,13 @@
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/transport/http2_errors.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Conversion of grpc status codes to http2 error codes (for RST_STREAM) */
 grpc_http2_error_code grpc_status_to_http2_error(grpc_status_code status);
-grpc_status_code grpc_http2_error_to_grpc_status(grpc_exec_ctx* exec_ctx,
-                                                 grpc_http2_error_code error,
+grpc_status_code grpc_http2_error_to_grpc_status(grpc_http2_error_code error,
                                                  grpc_millis deadline);
 
 /* Conversion of HTTP status codes (:status) to grpc status codes */
 grpc_status_code grpc_http2_status_to_grpc_status(int status);
 int grpc_status_to_http2_status(grpc_status_code status);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_STATUS_CONVERSION_H */
diff --git a/src/core/lib/transport/timeout_encoding.h b/src/core/lib/transport/timeout_encoding.h
index 9c3c459..8611f49 100644
--- a/src/core/lib/transport/timeout_encoding.h
+++ b/src/core/lib/transport/timeout_encoding.h
@@ -27,17 +27,9 @@
 
 #define GRPC_HTTP2_TIMEOUT_ENCODE_MIN_BUFSIZE (GPR_LTOA_MIN_BUFSIZE + 1)
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Encode/decode timeouts to the GRPC over HTTP/2 format;
    encoding may round up arbitrarily */
 void grpc_http2_encode_timeout(grpc_millis timeout, char* buffer);
 int grpc_http2_decode_timeout(grpc_slice text, grpc_millis* timeout);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_TIMEOUT_ENCODING_H */
diff --git a/src/core/lib/transport/transport.cc b/src/core/lib/transport/transport.cc
index 5bda154..08aee04 100644
--- a/src/core/lib/transport/transport.cc
+++ b/src/core/lib/transport/transport.cc
@@ -49,8 +49,7 @@
 }
 
 #ifndef NDEBUG
-void grpc_stream_unref(grpc_exec_ctx* exec_ctx, grpc_stream_refcount* refcount,
-                       const char* reason) {
+void grpc_stream_unref(grpc_stream_refcount* refcount, const char* reason) {
   if (grpc_trace_stream_refcount.enabled()) {
     gpr_atm val = gpr_atm_no_barrier_load(&refcount->refs.count);
     gpr_log(GPR_DEBUG, "%s %p:%p UNREF %" PRIdPTR "->%" PRIdPTR " %s",
@@ -58,11 +57,11 @@
             val - 1, reason);
   }
 #else
-void grpc_stream_unref(grpc_exec_ctx* exec_ctx,
-                       grpc_stream_refcount* refcount) {
+void grpc_stream_unref(grpc_stream_refcount* refcount) {
 #endif
   if (gpr_unref(&refcount->refs)) {
-    if (exec_ctx->flags & GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) {
+    if (grpc_core::ExecCtx::Get()->flags() &
+        GRPC_EXEC_CTX_FLAG_THREAD_RESOURCE_LOOP) {
       /* Ick.
          The thread we're running on MAY be owned (indirectly) by a call-stack.
          If that's the case, destroying the call-stack MAY try to destroy the
@@ -73,7 +72,7 @@
       refcount->destroy.scheduler =
           grpc_executor_scheduler(GRPC_EXECUTOR_SHORT);
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&refcount->destroy, GRPC_ERROR_NONE);
   }
 }
 
@@ -89,11 +88,11 @@
 #endif
 }
 
-static void slice_stream_unref(grpc_exec_ctx* exec_ctx, void* p) {
+static void slice_stream_unref(void* p) {
 #ifndef NDEBUG
-  grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p), "slice");
+  grpc_stream_unref(STREAM_REF_FROM_SLICE_REF(p), "slice");
 #else
-  grpc_stream_unref(exec_ctx, STREAM_REF_FROM_SLICE_REF(p));
+  grpc_stream_unref(STREAM_REF_FROM_SLICE_REF(p));
 #endif
 }
 
@@ -151,59 +150,50 @@
   return transport->vtable->sizeof_stream;
 }
 
-void grpc_transport_destroy(grpc_exec_ctx* exec_ctx,
-                            grpc_transport* transport) {
-  transport->vtable->destroy(exec_ctx, transport);
+void grpc_transport_destroy(grpc_transport* transport) {
+  transport->vtable->destroy(transport);
 }
 
-int grpc_transport_init_stream(grpc_exec_ctx* exec_ctx,
-                               grpc_transport* transport, grpc_stream* stream,
+int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
                                grpc_stream_refcount* refcount,
                                const void* server_data, gpr_arena* arena) {
-  return transport->vtable->init_stream(exec_ctx, transport, stream, refcount,
+  return transport->vtable->init_stream(transport, stream, refcount,
                                         server_data, arena);
 }
 
-void grpc_transport_perform_stream_op(grpc_exec_ctx* exec_ctx,
-                                      grpc_transport* transport,
+void grpc_transport_perform_stream_op(grpc_transport* transport,
                                       grpc_stream* stream,
                                       grpc_transport_stream_op_batch* op) {
-  transport->vtable->perform_stream_op(exec_ctx, transport, stream, op);
+  transport->vtable->perform_stream_op(transport, stream, op);
 }
 
-void grpc_transport_perform_op(grpc_exec_ctx* exec_ctx,
-                               grpc_transport* transport,
+void grpc_transport_perform_op(grpc_transport* transport,
                                grpc_transport_op* op) {
-  transport->vtable->perform_op(exec_ctx, transport, op);
+  transport->vtable->perform_op(transport, op);
 }
 
-void grpc_transport_set_pops(grpc_exec_ctx* exec_ctx, grpc_transport* transport,
-                             grpc_stream* stream,
+void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream,
                              grpc_polling_entity* pollent) {
   grpc_pollset* pollset;
   grpc_pollset_set* pollset_set;
   if ((pollset = grpc_polling_entity_pollset(pollent)) != nullptr) {
-    transport->vtable->set_pollset(exec_ctx, transport, stream, pollset);
+    transport->vtable->set_pollset(transport, stream, pollset);
   } else if ((pollset_set = grpc_polling_entity_pollset_set(pollent)) !=
              nullptr) {
-    transport->vtable->set_pollset_set(exec_ctx, transport, stream,
-                                       pollset_set);
+    transport->vtable->set_pollset_set(transport, stream, pollset_set);
   } else {
     abort();
   }
 }
 
-void grpc_transport_destroy_stream(grpc_exec_ctx* exec_ctx,
-                                   grpc_transport* transport,
+void grpc_transport_destroy_stream(grpc_transport* transport,
                                    grpc_stream* stream,
                                    grpc_closure* then_schedule_closure) {
-  transport->vtable->destroy_stream(exec_ctx, transport, stream,
-                                    then_schedule_closure);
+  transport->vtable->destroy_stream(transport, stream, then_schedule_closure);
 }
 
-grpc_endpoint* grpc_transport_get_endpoint(grpc_exec_ctx* exec_ctx,
-                                           grpc_transport* transport) {
-  return transport->vtable->get_endpoint(exec_ctx, transport);
+grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport) {
+  return transport->vtable->get_endpoint(transport);
 }
 
 // This comment should be sung to the tune of
@@ -214,25 +204,23 @@
 // though it lives in lib, it handles transport stream ops sure
 // it's grpc_transport_stream_op_batch_finish_with_failure
 void grpc_transport_stream_op_batch_finish_with_failure(
-    grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* batch,
-    grpc_error* error, grpc_call_combiner* call_combiner) {
+    grpc_transport_stream_op_batch* batch, grpc_error* error,
+    grpc_call_combiner* call_combiner) {
   if (batch->send_message) {
-    grpc_byte_stream_destroy(exec_ctx,
-                             batch->payload->send_message.send_message);
+    grpc_byte_stream_destroy(batch->payload->send_message.send_message);
   }
   if (batch->recv_message) {
-    GRPC_CALL_COMBINER_START(exec_ctx, call_combiner,
-                             batch->payload->recv_message.recv_message_ready,
-                             GRPC_ERROR_REF(error),
-                             "failing recv_message_ready");
+    GRPC_CALL_COMBINER_START(
+        call_combiner, batch->payload->recv_message.recv_message_ready,
+        GRPC_ERROR_REF(error), "failing recv_message_ready");
   }
   if (batch->recv_initial_metadata) {
     GRPC_CALL_COMBINER_START(
-        exec_ctx, call_combiner,
+        call_combiner,
         batch->payload->recv_initial_metadata.recv_initial_metadata_ready,
         GRPC_ERROR_REF(error), "failing recv_initial_metadata_ready");
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, batch->on_complete, error);
+  GRPC_CLOSURE_SCHED(batch->on_complete, error);
   if (batch->cancel_stream) {
     GRPC_ERROR_UNREF(batch->payload->cancel_stream.cancel_error);
   }
@@ -244,10 +232,9 @@
   grpc_transport_op op;
 } made_transport_op;
 
-static void destroy_made_transport_op(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_error* error) {
+static void destroy_made_transport_op(void* arg, grpc_error* error) {
   made_transport_op* op = (made_transport_op*)arg;
-  GRPC_CLOSURE_SCHED(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(op->inner_on_complete, GRPC_ERROR_REF(error));
   gpr_free(op);
 }
 
@@ -268,12 +255,11 @@
   grpc_transport_stream_op_batch_payload payload;
 } made_transport_stream_op;
 
-static void destroy_made_transport_stream_op(grpc_exec_ctx* exec_ctx, void* arg,
-                                             grpc_error* error) {
+static void destroy_made_transport_stream_op(void* arg, grpc_error* error) {
   made_transport_stream_op* op = (made_transport_stream_op*)arg;
   grpc_closure* c = op->inner_on_complete;
   gpr_free(op);
-  GRPC_CLOSURE_RUN(exec_ctx, c, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(c, GRPC_ERROR_REF(error));
 }
 
 grpc_transport_stream_op_batch* grpc_make_transport_stream_op(
diff --git a/src/core/lib/transport/transport.h b/src/core/lib/transport/transport.h
index 9774912..80a7ff9 100644
--- a/src/core/lib/transport/transport.h
+++ b/src/core/lib/transport/transport.h
@@ -31,11 +31,14 @@
 #include "src/core/lib/transport/byte_stream.h"
 #include "src/core/lib/transport/metadata_batch.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+/* Minimum and maximum protocol accepted versions. */
+#define GRPC_PROTOCOL_VERSION_MAX_MAJOR 2
+#define GRPC_PROTOCOL_VERSION_MAX_MINOR 1
+#define GRPC_PROTOCOL_VERSION_MIN_MAJOR 2
+#define GRPC_PROTOCOL_VERSION_MIN_MINOR 1
 
 /* forward declarations */
+
 typedef struct grpc_transport grpc_transport;
 
 /* grpc_stream doesn't actually exist. It's used as a typesafe
@@ -59,15 +62,14 @@
                           grpc_iomgr_cb_func cb, void* cb_arg,
                           const char* object_type);
 void grpc_stream_ref(grpc_stream_refcount* refcount, const char* reason);
-void grpc_stream_unref(grpc_exec_ctx* exec_ctx, grpc_stream_refcount* refcount,
-                       const char* reason);
+void grpc_stream_unref(grpc_stream_refcount* refcount, const char* reason);
 #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
   grpc_stream_ref_init(rc, ir, cb, cb_arg, objtype)
 #else
 void grpc_stream_ref_init(grpc_stream_refcount* refcount, int initial_refs,
                           grpc_iomgr_cb_func cb, void* cb_arg);
 void grpc_stream_ref(grpc_stream_refcount* refcount);
-void grpc_stream_unref(grpc_exec_ctx* exec_ctx, grpc_stream_refcount* refcount);
+void grpc_stream_unref(grpc_stream_refcount* refcount);
 #define GRPC_STREAM_REF_INIT(rc, ir, cb, cb_arg, objtype) \
   grpc_stream_ref_init(rc, ir, cb, cb_arg)
 #endif
@@ -241,16 +243,21 @@
       If true, the callback is set to set_accept_stream_fn, with its
       user_data argument set to set_accept_stream_user_data */
   bool set_accept_stream;
-  void (*set_accept_stream_fn)(grpc_exec_ctx* exec_ctx, void* user_data,
-                               grpc_transport* transport,
+  void (*set_accept_stream_fn)(void* user_data, grpc_transport* transport,
                                const void* server_data);
   void* set_accept_stream_user_data;
   /** add this transport to a pollset */
   grpc_pollset* bind_pollset;
   /** add this transport to a pollset_set */
   grpc_pollset_set* bind_pollset_set;
-  /** send a ping, call this back if not NULL */
-  grpc_closure* send_ping;
+  /** send a ping, if either on_initiate or on_ack is not NULL */
+  struct {
+    /** Ping may be delayed by the transport, on_initiate callback will be
+        called when the ping is actually being sent. */
+    grpc_closure* on_initiate;
+    /** Called when the ping ack is received */
+    grpc_closure* on_ack;
+  } send_ping;
 
   /***************************************************************************
    * remaining fields are initialized and used at the discretion of the
@@ -273,13 +280,12 @@
      stream      - a pointer to uninitialized memory to initialize
      server_data - either NULL for a client initiated stream, or a pointer
                    supplied from the accept_stream callback function */
-int grpc_transport_init_stream(grpc_exec_ctx* exec_ctx,
-                               grpc_transport* transport, grpc_stream* stream,
+int grpc_transport_init_stream(grpc_transport* transport, grpc_stream* stream,
                                grpc_stream_refcount* refcount,
                                const void* server_data, gpr_arena* arena);
 
-void grpc_transport_set_pops(grpc_exec_ctx* exec_ctx, grpc_transport* transport,
-                             grpc_stream* stream, grpc_polling_entity* pollent);
+void grpc_transport_set_pops(grpc_transport* transport, grpc_stream* stream,
+                             grpc_polling_entity* pollent);
 
 /* Destroy transport data for a stream.
 
@@ -291,14 +297,13 @@
      transport - the transport on which to create this stream
      stream    - the grpc_stream to destroy (memory is still owned by the
                  caller, but any child memory must be cleaned up) */
-void grpc_transport_destroy_stream(grpc_exec_ctx* exec_ctx,
-                                   grpc_transport* transport,
+void grpc_transport_destroy_stream(grpc_transport* transport,
                                    grpc_stream* stream,
                                    grpc_closure* then_schedule_closure);
 
 void grpc_transport_stream_op_batch_finish_with_failure(
-    grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* op,
-    grpc_error* error, grpc_call_combiner* call_combiner);
+    grpc_transport_stream_op_batch* op, grpc_error* error,
+    grpc_call_combiner* call_combiner);
 
 char* grpc_transport_stream_op_batch_string(grpc_transport_stream_op_batch* op);
 char* grpc_transport_op_string(grpc_transport_op* op);
@@ -313,13 +318,11 @@
                  non-NULL and previously initialized by the same transport.
      op        - a grpc_transport_stream_op_batch specifying the op to perform
    */
-void grpc_transport_perform_stream_op(grpc_exec_ctx* exec_ctx,
-                                      grpc_transport* transport,
+void grpc_transport_perform_stream_op(grpc_transport* transport,
                                       grpc_stream* stream,
                                       grpc_transport_stream_op_batch* op);
 
-void grpc_transport_perform_op(grpc_exec_ctx* exec_ctx,
-                               grpc_transport* transport,
+void grpc_transport_perform_op(grpc_transport* transport,
                                grpc_transport_op* op);
 
 /* Send a ping on a transport
@@ -332,11 +335,10 @@
                            grpc_slice debug_data);
 
 /* Destroy the transport */
-void grpc_transport_destroy(grpc_exec_ctx* exec_ctx, grpc_transport* transport);
+void grpc_transport_destroy(grpc_transport* transport);
 
 /* Get the endpoint used by \a transport */
-grpc_endpoint* grpc_transport_get_endpoint(grpc_exec_ctx* exec_ctx,
-                                           grpc_transport* transport);
+grpc_endpoint* grpc_transport_get_endpoint(grpc_transport* transport);
 
 /* Allocate a grpc_transport_op, and preconfigure the on_consumed closure to
    \a on_consumed and then delete the returned transport op */
@@ -347,8 +349,4 @@
 grpc_transport_stream_op_batch* grpc_make_transport_stream_op(
     grpc_closure* on_consumed);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_TRANSPORT_H */
diff --git a/src/core/lib/transport/transport_impl.h b/src/core/lib/transport/transport_impl.h
index 22ad599..50b8a5f 100644
--- a/src/core/lib/transport/transport_impl.h
+++ b/src/core/lib/transport/transport_impl.h
@@ -21,10 +21,6 @@
 
 #include "src/core/lib/transport/transport.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_transport_vtable {
   /* Memory required for a single stream element - this is allocated by upper
      layers and initialized by the transport */
@@ -34,37 +30,34 @@
   const char* name;
 
   /* implementation of grpc_transport_init_stream */
-  int (*init_stream)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                     grpc_stream* stream, grpc_stream_refcount* refcount,
-                     const void* server_data, gpr_arena* arena);
+  int (*init_stream)(grpc_transport* self, grpc_stream* stream,
+                     grpc_stream_refcount* refcount, const void* server_data,
+                     gpr_arena* arena);
 
   /* implementation of grpc_transport_set_pollset */
-  void (*set_pollset)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                      grpc_stream* stream, grpc_pollset* pollset);
+  void (*set_pollset)(grpc_transport* self, grpc_stream* stream,
+                      grpc_pollset* pollset);
 
   /* implementation of grpc_transport_set_pollset */
-  void (*set_pollset_set)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                          grpc_stream* stream, grpc_pollset_set* pollset_set);
+  void (*set_pollset_set)(grpc_transport* self, grpc_stream* stream,
+                          grpc_pollset_set* pollset_set);
 
   /* implementation of grpc_transport_perform_stream_op */
-  void (*perform_stream_op)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                            grpc_stream* stream,
+  void (*perform_stream_op)(grpc_transport* self, grpc_stream* stream,
                             grpc_transport_stream_op_batch* op);
 
   /* implementation of grpc_transport_perform_op */
-  void (*perform_op)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                     grpc_transport_op* op);
+  void (*perform_op)(grpc_transport* self, grpc_transport_op* op);
 
   /* implementation of grpc_transport_destroy_stream */
-  void (*destroy_stream)(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                         grpc_stream* stream,
+  void (*destroy_stream)(grpc_transport* self, grpc_stream* stream,
                          grpc_closure* then_schedule_closure);
 
   /* implementation of grpc_transport_destroy */
-  void (*destroy)(grpc_exec_ctx* exec_ctx, grpc_transport* self);
+  void (*destroy)(grpc_transport* self);
 
   /* implementation of grpc_transport_get_endpoint */
-  grpc_endpoint* (*get_endpoint)(grpc_exec_ctx* exec_ctx, grpc_transport* self);
+  grpc_endpoint* (*get_endpoint)(grpc_transport* self);
 } grpc_transport_vtable;
 
 /* an instance of a grpc transport */
@@ -73,8 +66,4 @@
   const grpc_transport_vtable* vtable;
 };
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_LIB_TRANSPORT_TRANSPORT_IMPL_H */
diff --git a/src/core/lib/transport/transport_op_string.cc b/src/core/lib/transport/transport_op_string.cc
index e69ab02..c0f82fe 100644
--- a/src/core/lib/transport/transport_op_string.cc
+++ b/src/core/lib/transport/transport_op_string.cc
@@ -187,7 +187,7 @@
     gpr_strvec_add(&b, gpr_strdup("BIND_POLLSET_SET"));
   }
 
-  if (op->send_ping != nullptr) {
+  if (op->send_ping.on_initiate != nullptr || op->send_ping.on_ack != nullptr) {
     if (!first) gpr_strvec_add(&b, gpr_strdup(" "));
     // first = false;
     gpr_strvec_add(&b, gpr_strdup("SEND_PING"));
diff --git a/src/core/plugin_registry/grpc_cronet_plugin_registry.cc b/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
index e0422f6..101e29c 100644
--- a/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
+++ b/src/core/plugin_registry/grpc_cronet_plugin_registry.cc
@@ -18,18 +18,18 @@
 
 #include <grpc/grpc.h>
 
-extern "C" void grpc_http_filters_init(void);
-extern "C" void grpc_http_filters_shutdown(void);
-extern "C" void grpc_chttp2_plugin_init(void);
-extern "C" void grpc_chttp2_plugin_shutdown(void);
-extern "C" void grpc_deadline_filter_init(void);
-extern "C" void grpc_deadline_filter_shutdown(void);
-extern "C" void grpc_client_channel_init(void);
-extern "C" void grpc_client_channel_shutdown(void);
-extern "C" void grpc_tsi_gts_init(void);
-extern "C" void grpc_tsi_gts_shutdown(void);
-extern "C" void grpc_server_load_reporting_plugin_init(void);
-extern "C" void grpc_server_load_reporting_plugin_shutdown(void);
+void grpc_http_filters_init(void);
+void grpc_http_filters_shutdown(void);
+void grpc_chttp2_plugin_init(void);
+void grpc_chttp2_plugin_shutdown(void);
+void grpc_deadline_filter_init(void);
+void grpc_deadline_filter_shutdown(void);
+void grpc_client_channel_init(void);
+void grpc_client_channel_shutdown(void);
+void grpc_tsi_gts_init(void);
+void grpc_tsi_gts_shutdown(void);
+void grpc_server_load_reporting_plugin_init(void);
+void grpc_server_load_reporting_plugin_shutdown(void);
 
 void grpc_register_built_in_plugins(void) {
   grpc_register_plugin(grpc_http_filters_init,
diff --git a/src/core/plugin_registry/grpc_plugin_registry.cc b/src/core/plugin_registry/grpc_plugin_registry.cc
index 339c9bb..89be351 100644
--- a/src/core/plugin_registry/grpc_plugin_registry.cc
+++ b/src/core/plugin_registry/grpc_plugin_registry.cc
@@ -18,40 +18,40 @@
 
 #include <grpc/grpc.h>
 
-extern "C" void grpc_http_filters_init(void);
-extern "C" void grpc_http_filters_shutdown(void);
-extern "C" void grpc_chttp2_plugin_init(void);
-extern "C" void grpc_chttp2_plugin_shutdown(void);
-extern "C" void grpc_tsi_gts_init(void);
-extern "C" void grpc_tsi_gts_shutdown(void);
-extern "C" void grpc_deadline_filter_init(void);
-extern "C" void grpc_deadline_filter_shutdown(void);
-extern "C" void grpc_client_channel_init(void);
-extern "C" void grpc_client_channel_shutdown(void);
-extern "C" void grpc_inproc_plugin_init(void);
-extern "C" void grpc_inproc_plugin_shutdown(void);
-extern "C" void grpc_resolver_fake_init(void);
-extern "C" void grpc_resolver_fake_shutdown(void);
-extern "C" void grpc_lb_policy_grpclb_init(void);
-extern "C" void grpc_lb_policy_grpclb_shutdown(void);
-extern "C" void grpc_lb_policy_pick_first_init(void);
-extern "C" void grpc_lb_policy_pick_first_shutdown(void);
-extern "C" void grpc_lb_policy_round_robin_init(void);
-extern "C" void grpc_lb_policy_round_robin_shutdown(void);
-extern "C" void grpc_resolver_dns_ares_init(void);
-extern "C" void grpc_resolver_dns_ares_shutdown(void);
-extern "C" void grpc_resolver_dns_native_init(void);
-extern "C" void grpc_resolver_dns_native_shutdown(void);
-extern "C" void grpc_resolver_sockaddr_init(void);
-extern "C" void grpc_resolver_sockaddr_shutdown(void);
-extern "C" void grpc_server_load_reporting_plugin_init(void);
-extern "C" void grpc_server_load_reporting_plugin_shutdown(void);
-extern "C" void grpc_max_age_filter_init(void);
-extern "C" void grpc_max_age_filter_shutdown(void);
-extern "C" void grpc_message_size_filter_init(void);
-extern "C" void grpc_message_size_filter_shutdown(void);
-extern "C" void grpc_workaround_cronet_compression_filter_init(void);
-extern "C" void grpc_workaround_cronet_compression_filter_shutdown(void);
+void grpc_http_filters_init(void);
+void grpc_http_filters_shutdown(void);
+void grpc_chttp2_plugin_init(void);
+void grpc_chttp2_plugin_shutdown(void);
+void grpc_tsi_gts_init(void);
+void grpc_tsi_gts_shutdown(void);
+void grpc_deadline_filter_init(void);
+void grpc_deadline_filter_shutdown(void);
+void grpc_client_channel_init(void);
+void grpc_client_channel_shutdown(void);
+void grpc_inproc_plugin_init(void);
+void grpc_inproc_plugin_shutdown(void);
+void grpc_resolver_fake_init(void);
+void grpc_resolver_fake_shutdown(void);
+void grpc_lb_policy_grpclb_init(void);
+void grpc_lb_policy_grpclb_shutdown(void);
+void grpc_lb_policy_pick_first_init(void);
+void grpc_lb_policy_pick_first_shutdown(void);
+void grpc_lb_policy_round_robin_init(void);
+void grpc_lb_policy_round_robin_shutdown(void);
+void grpc_resolver_dns_ares_init(void);
+void grpc_resolver_dns_ares_shutdown(void);
+void grpc_resolver_dns_native_init(void);
+void grpc_resolver_dns_native_shutdown(void);
+void grpc_resolver_sockaddr_init(void);
+void grpc_resolver_sockaddr_shutdown(void);
+void grpc_server_load_reporting_plugin_init(void);
+void grpc_server_load_reporting_plugin_shutdown(void);
+void grpc_max_age_filter_init(void);
+void grpc_max_age_filter_shutdown(void);
+void grpc_message_size_filter_init(void);
+void grpc_message_size_filter_shutdown(void);
+void grpc_workaround_cronet_compression_filter_init(void);
+void grpc_workaround_cronet_compression_filter_shutdown(void);
 
 void grpc_register_built_in_plugins(void) {
   grpc_register_plugin(grpc_http_filters_init,
diff --git a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
index c9fc17d..d73f946 100644
--- a/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
+++ b/src/core/plugin_registry/grpc_unsecure_plugin_registry.cc
@@ -18,38 +18,38 @@
 
 #include <grpc/grpc.h>
 
-extern "C" void grpc_http_filters_init(void);
-extern "C" void grpc_http_filters_shutdown(void);
-extern "C" void grpc_chttp2_plugin_init(void);
-extern "C" void grpc_chttp2_plugin_shutdown(void);
-extern "C" void grpc_deadline_filter_init(void);
-extern "C" void grpc_deadline_filter_shutdown(void);
-extern "C" void grpc_client_channel_init(void);
-extern "C" void grpc_client_channel_shutdown(void);
-extern "C" void grpc_inproc_plugin_init(void);
-extern "C" void grpc_inproc_plugin_shutdown(void);
-extern "C" void grpc_resolver_dns_ares_init(void);
-extern "C" void grpc_resolver_dns_ares_shutdown(void);
-extern "C" void grpc_resolver_dns_native_init(void);
-extern "C" void grpc_resolver_dns_native_shutdown(void);
-extern "C" void grpc_resolver_sockaddr_init(void);
-extern "C" void grpc_resolver_sockaddr_shutdown(void);
-extern "C" void grpc_resolver_fake_init(void);
-extern "C" void grpc_resolver_fake_shutdown(void);
-extern "C" void grpc_server_load_reporting_plugin_init(void);
-extern "C" void grpc_server_load_reporting_plugin_shutdown(void);
-extern "C" void grpc_lb_policy_grpclb_init(void);
-extern "C" void grpc_lb_policy_grpclb_shutdown(void);
-extern "C" void grpc_lb_policy_pick_first_init(void);
-extern "C" void grpc_lb_policy_pick_first_shutdown(void);
-extern "C" void grpc_lb_policy_round_robin_init(void);
-extern "C" void grpc_lb_policy_round_robin_shutdown(void);
-extern "C" void grpc_max_age_filter_init(void);
-extern "C" void grpc_max_age_filter_shutdown(void);
-extern "C" void grpc_message_size_filter_init(void);
-extern "C" void grpc_message_size_filter_shutdown(void);
-extern "C" void grpc_workaround_cronet_compression_filter_init(void);
-extern "C" void grpc_workaround_cronet_compression_filter_shutdown(void);
+void grpc_http_filters_init(void);
+void grpc_http_filters_shutdown(void);
+void grpc_chttp2_plugin_init(void);
+void grpc_chttp2_plugin_shutdown(void);
+void grpc_deadline_filter_init(void);
+void grpc_deadline_filter_shutdown(void);
+void grpc_client_channel_init(void);
+void grpc_client_channel_shutdown(void);
+void grpc_inproc_plugin_init(void);
+void grpc_inproc_plugin_shutdown(void);
+void grpc_resolver_dns_ares_init(void);
+void grpc_resolver_dns_ares_shutdown(void);
+void grpc_resolver_dns_native_init(void);
+void grpc_resolver_dns_native_shutdown(void);
+void grpc_resolver_sockaddr_init(void);
+void grpc_resolver_sockaddr_shutdown(void);
+void grpc_resolver_fake_init(void);
+void grpc_resolver_fake_shutdown(void);
+void grpc_server_load_reporting_plugin_init(void);
+void grpc_server_load_reporting_plugin_shutdown(void);
+void grpc_lb_policy_grpclb_init(void);
+void grpc_lb_policy_grpclb_shutdown(void);
+void grpc_lb_policy_pick_first_init(void);
+void grpc_lb_policy_pick_first_shutdown(void);
+void grpc_lb_policy_round_robin_init(void);
+void grpc_lb_policy_round_robin_shutdown(void);
+void grpc_max_age_filter_init(void);
+void grpc_max_age_filter_shutdown(void);
+void grpc_message_size_filter_init(void);
+void grpc_message_size_filter_shutdown(void);
+void grpc_workaround_cronet_compression_filter_init(void);
+void grpc_workaround_cronet_compression_filter_shutdown(void);
 
 void grpc_register_built_in_plugins(void) {
   grpc_register_plugin(grpc_http_filters_init,
diff --git a/src/core/tsi/fake_transport_security.cc b/src/core/tsi/fake_transport_security.cc
index f2f365f..b907636 100644
--- a/src/core/tsi/fake_transport_security.cc
+++ b/src/core/tsi/fake_transport_security.cc
@@ -399,8 +399,7 @@
 /* --- tsi_zero_copy_grpc_protector methods implementation. ---*/
 
 static tsi_result fake_zero_copy_grpc_protector_protect(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
-    grpc_slice_buffer* unprotected_slices,
+    tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices,
     grpc_slice_buffer* protected_slices) {
   if (self == nullptr || unprotected_slices == nullptr ||
       protected_slices == nullptr) {
@@ -424,8 +423,7 @@
 }
 
 static tsi_result fake_zero_copy_grpc_protector_unprotect(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
-    grpc_slice_buffer* protected_slices,
+    tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices,
     grpc_slice_buffer* unprotected_slices) {
   if (self == nullptr || unprotected_slices == nullptr ||
       protected_slices == nullptr) {
@@ -454,18 +452,18 @@
         impl->parsed_frame_size - TSI_FAKE_FRAME_HEADER_SIZE,
         unprotected_slices);
     impl->parsed_frame_size = 0;
-    grpc_slice_buffer_reset_and_unref_internal(exec_ctx, &impl->header_sb);
+    grpc_slice_buffer_reset_and_unref_internal(&impl->header_sb);
   }
   return TSI_OK;
 }
 
 static void fake_zero_copy_grpc_protector_destroy(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self) {
+    tsi_zero_copy_grpc_protector* self) {
   if (self == nullptr) return;
   tsi_fake_zero_copy_grpc_protector* impl =
       (tsi_fake_zero_copy_grpc_protector*)self;
-  grpc_slice_buffer_destroy_internal(exec_ctx, &impl->header_sb);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &impl->protected_sb);
+  grpc_slice_buffer_destroy_internal(&impl->header_sb);
+  grpc_slice_buffer_destroy_internal(&impl->protected_sb);
   gpr_free(impl);
 }
 
@@ -497,8 +495,7 @@
 }
 
 static tsi_result fake_handshaker_result_create_zero_copy_grpc_protector(
-    void* exec_ctx, const tsi_handshaker_result* self,
-    size_t* max_output_protected_frame_size,
+    const tsi_handshaker_result* self, size_t* max_output_protected_frame_size,
     tsi_zero_copy_grpc_protector** protector) {
   *protector =
       tsi_create_fake_zero_copy_grpc_protector(max_output_protected_frame_size);
diff --git a/src/core/tsi/fake_transport_security.h b/src/core/tsi/fake_transport_security.h
index b90b996..3848e7c 100644
--- a/src/core/tsi/fake_transport_security.h
+++ b/src/core/tsi/fake_transport_security.h
@@ -21,10 +21,6 @@
 
 #include "src/core/tsi/transport_security_interface.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Value for the TSI_CERTIFICATE_TYPE_PEER_PROPERTY property for FAKE certs. */
 #define TSI_FAKE_CERTIFICATE_TYPE "FAKE"
 
@@ -44,8 +40,4 @@
 tsi_zero_copy_grpc_protector* tsi_create_fake_zero_copy_grpc_protector(
     size_t* max_protected_frame_size);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_FAKE_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/gts_transport_security.cc b/src/core/tsi/gts_transport_security.cc
index d5948c9..2b09977 100644
--- a/src/core/tsi/gts_transport_security.cc
+++ b/src/core/tsi/gts_transport_security.cc
@@ -24,12 +24,12 @@
 
 gts_shared_resource* gts_get_shared_resource(void) { return &g_gts_resource; }
 
-extern "C" void grpc_tsi_gts_init() {
+void grpc_tsi_gts_init() {
   memset(&g_gts_resource, 0, sizeof(gts_shared_resource));
   gpr_mu_init(&g_gts_resource.mu);
 }
 
-extern "C" void grpc_tsi_gts_shutdown() {
+void grpc_tsi_gts_shutdown() {
   gpr_mu_destroy(&g_gts_resource.mu);
   if (g_gts_resource.cq == nullptr) {
     return;
diff --git a/src/core/tsi/gts_transport_security.h b/src/core/tsi/gts_transport_security.h
index 8bc2107..23b2b66 100644
--- a/src/core/tsi/gts_transport_security.h
+++ b/src/core/tsi/gts_transport_security.h
@@ -23,10 +23,6 @@
 #include <grpc/support/sync.h>
 #include <grpc/support/thd.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct gts_shared_resource {
   gpr_thd_id thread_id;
   grpc_channel* channel;
@@ -38,8 +34,4 @@
  *    TSI handshakes. */
 gts_shared_resource* gts_get_shared_resource(void);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_GTS_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/ssl_transport_security.h b/src/core/tsi/ssl_transport_security.h
index 595c4cc..bf211e1 100644
--- a/src/core/tsi/ssl_transport_security.h
+++ b/src/core/tsi/ssl_transport_security.h
@@ -21,10 +21,6 @@
 
 #include "src/core/tsi/transport_security_interface.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Value for the TSI_CERTIFICATE_TYPE_PEER_PROPERTY property for X509 certs. */
 #define TSI_X509_CERTIFICATE_TYPE "X509"
 
@@ -193,8 +189,4 @@
     tsi_ssl_handshaker_factory* factory,
     tsi_ssl_handshaker_factory_vtable* new_vtable);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_SSL_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/ssl_types.h b/src/core/tsi/ssl_types.h
index e0e9670..3788643 100644
--- a/src/core/tsi/ssl_types.h
+++ b/src/core/tsi/ssl_types.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_CORE_TSI_SSL_TYPES_H
 #define GRPC_CORE_TSI_SSL_TYPES_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A collection of macros to cast between various integer types that are
  * used differently between BoringSSL and OpenSSL:
  * TSI_INT_AS_SIZE(x):  convert 'int x' to a length parameter for an OpenSSL
@@ -41,8 +37,4 @@
 #define TSI_SIZE_AS_SIZE(x) ((int)(x))
 #endif
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_SSL_TYPES_H */
diff --git a/src/core/tsi/transport_security.h b/src/core/tsi/transport_security.h
index 7d6dd44..ed662d4 100644
--- a/src/core/tsi/transport_security.h
+++ b/src/core/tsi/transport_security.h
@@ -24,10 +24,6 @@
 #include "src/core/lib/debug/trace.h"
 #include "src/core/tsi/transport_security_interface.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern grpc_core::TraceFlag tsi_tracing_enabled;
 
 /* Base for tsi_frame_protector implementations.
@@ -94,7 +90,7 @@
 typedef struct {
   tsi_result (*extract_peer)(const tsi_handshaker_result* self, tsi_peer* peer);
   tsi_result (*create_zero_copy_grpc_protector)(
-      void* exec_ctx, const tsi_handshaker_result* self,
+      const tsi_handshaker_result* self,
       size_t* max_output_protected_frame_size,
       tsi_zero_copy_grpc_protector** protector);
   tsi_result (*create_frame_protector)(const tsi_handshaker_result* self,
@@ -126,8 +122,4 @@
 /* Utils. */
 char* tsi_strdup(const char* src); /* Sadly, no strdup in C89. */
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_H */
diff --git a/src/core/tsi/transport_security_adapter.h b/src/core/tsi/transport_security_adapter.h
index 232705f..9818fce 100644
--- a/src/core/tsi/transport_security_adapter.h
+++ b/src/core/tsi/transport_security_adapter.h
@@ -21,10 +21,6 @@
 
 #include "src/core/tsi/transport_security_interface.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* Create a tsi handshaker that takes an implementation of old interface and
    converts into an implementation of new interface. In the old interface,
    there are get_bytes_to_send_to_peer, process_bytes_from_peer, get_result,
@@ -40,8 +36,4 @@
    the caller. */
 tsi_handshaker* tsi_adapter_handshaker_get_wrapped(tsi_handshaker* adapter);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_ADAPTER_H */
diff --git a/src/core/tsi/transport_security_grpc.cc b/src/core/tsi/transport_security_grpc.cc
index 875d367..76f7ae7 100644
--- a/src/core/tsi/transport_security_grpc.cc
+++ b/src/core/tsi/transport_security_grpc.cc
@@ -20,18 +20,16 @@
 
 /* This method creates a tsi_zero_copy_grpc_protector object.  */
 tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
-    grpc_exec_ctx* exec_ctx, const tsi_handshaker_result* self,
-    size_t* max_output_protected_frame_size,
+    const tsi_handshaker_result* self, size_t* max_output_protected_frame_size,
     tsi_zero_copy_grpc_protector** protector) {
-  if (exec_ctx == nullptr || self == nullptr || self->vtable == nullptr ||
-      protector == nullptr) {
+  if (self == nullptr || self->vtable == nullptr || protector == nullptr) {
     return TSI_INVALID_ARGUMENT;
   }
   if (self->vtable->create_zero_copy_grpc_protector == nullptr) {
     return TSI_UNIMPLEMENTED;
   }
   return self->vtable->create_zero_copy_grpc_protector(
-      exec_ctx, self, max_output_protected_frame_size, protector);
+      self, max_output_protected_frame_size, protector);
 }
 
 /* --- tsi_zero_copy_grpc_protector common implementation. ---
@@ -39,33 +37,28 @@
    Calls specific implementation after state/input validation. */
 
 tsi_result tsi_zero_copy_grpc_protector_protect(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
-    grpc_slice_buffer* unprotected_slices,
+    tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices,
     grpc_slice_buffer* protected_slices) {
-  if (exec_ctx == nullptr || self == nullptr || self->vtable == nullptr ||
+  if (self == nullptr || self->vtable == nullptr ||
       unprotected_slices == nullptr || protected_slices == nullptr) {
     return TSI_INVALID_ARGUMENT;
   }
   if (self->vtable->protect == nullptr) return TSI_UNIMPLEMENTED;
-  return self->vtable->protect(exec_ctx, self, unprotected_slices,
-                               protected_slices);
+  return self->vtable->protect(self, unprotected_slices, protected_slices);
 }
 
 tsi_result tsi_zero_copy_grpc_protector_unprotect(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
-    grpc_slice_buffer* protected_slices,
+    tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices,
     grpc_slice_buffer* unprotected_slices) {
-  if (exec_ctx == nullptr || self == nullptr || self->vtable == nullptr ||
+  if (self == nullptr || self->vtable == nullptr ||
       protected_slices == nullptr || unprotected_slices == nullptr) {
     return TSI_INVALID_ARGUMENT;
   }
   if (self->vtable->unprotect == nullptr) return TSI_UNIMPLEMENTED;
-  return self->vtable->unprotect(exec_ctx, self, protected_slices,
-                                 unprotected_slices);
+  return self->vtable->unprotect(self, protected_slices, unprotected_slices);
 }
 
-void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx* exec_ctx,
-                                          tsi_zero_copy_grpc_protector* self) {
+void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector* self) {
   if (self == nullptr) return;
-  self->vtable->destroy(exec_ctx, self);
+  self->vtable->destroy(self);
 }
diff --git a/src/core/tsi/transport_security_grpc.h b/src/core/tsi/transport_security_grpc.h
index 1c54693..0156ff1 100644
--- a/src/core/tsi/transport_security_grpc.h
+++ b/src/core/tsi/transport_security_grpc.h
@@ -22,16 +22,11 @@
 #include <grpc/slice_buffer.h>
 #include "src/core/tsi/transport_security.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* This method creates a tsi_zero_copy_grpc_protector object. It return TSI_OK
    assuming there is no fatal error.
    The caller is responsible for destroying the protector.  */
 tsi_result tsi_handshaker_result_create_zero_copy_grpc_protector(
-    grpc_exec_ctx* exec_ctx, const tsi_handshaker_result* self,
-    size_t* max_output_protected_frame_size,
+    const tsi_handshaker_result* self, size_t* max_output_protected_frame_size,
     tsi_zero_copy_grpc_protector** protector);
 
 /* -- tsi_zero_copy_grpc_protector object --  */
@@ -43,8 +38,8 @@
    - This method returns TSI_OK in case of success or a specific error code in
      case of failure.  */
 tsi_result tsi_zero_copy_grpc_protector_protect(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
-    grpc_slice_buffer* unprotected_slices, grpc_slice_buffer* protected_slices);
+    tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* unprotected_slices,
+    grpc_slice_buffer* protected_slices);
 
 /* Outputs unprotected bytes.
    - protected_slices is the bytes of protected frames.
@@ -53,32 +48,25 @@
      there is not enough data to output in which case unprotected_slices has 0
      bytes.  */
 tsi_result tsi_zero_copy_grpc_protector_unprotect(
-    grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self,
-    grpc_slice_buffer* protected_slices, grpc_slice_buffer* unprotected_slices);
+    tsi_zero_copy_grpc_protector* self, grpc_slice_buffer* protected_slices,
+    grpc_slice_buffer* unprotected_slices);
 
 /* Destroys the tsi_zero_copy_grpc_protector object.  */
-void tsi_zero_copy_grpc_protector_destroy(grpc_exec_ctx* exec_ctx,
-                                          tsi_zero_copy_grpc_protector* self);
+void tsi_zero_copy_grpc_protector_destroy(tsi_zero_copy_grpc_protector* self);
 
 /* Base for tsi_zero_copy_grpc_protector implementations.  */
 typedef struct {
-  tsi_result (*protect)(grpc_exec_ctx* exec_ctx,
-                        tsi_zero_copy_grpc_protector* self,
+  tsi_result (*protect)(tsi_zero_copy_grpc_protector* self,
                         grpc_slice_buffer* unprotected_slices,
                         grpc_slice_buffer* protected_slices);
-  tsi_result (*unprotect)(grpc_exec_ctx* exec_ctx,
-                          tsi_zero_copy_grpc_protector* self,
+  tsi_result (*unprotect)(tsi_zero_copy_grpc_protector* self,
                           grpc_slice_buffer* protected_slices,
                           grpc_slice_buffer* unprotected_slices);
-  void (*destroy)(grpc_exec_ctx* exec_ctx, tsi_zero_copy_grpc_protector* self);
+  void (*destroy)(tsi_zero_copy_grpc_protector* self);
 } tsi_zero_copy_grpc_protector_vtable;
 
 struct tsi_zero_copy_grpc_protector {
   const tsi_zero_copy_grpc_protector_vtable* vtable;
 };
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_GRPC_H */
diff --git a/src/core/tsi/transport_security_interface.h b/src/core/tsi/transport_security_interface.h
index 0f3d468..e925598 100644
--- a/src/core/tsi/transport_security_interface.h
+++ b/src/core/tsi/transport_security_interface.h
@@ -24,10 +24,6 @@
 
 #include "src/core/lib/debug/trace.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* --- tsi result ---  */
 
 typedef enum {
@@ -453,8 +449,4 @@
 /* This method destroys the shared objects created by tsi_init.  */
 void tsi_destroy();
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_CORE_TSI_TRANSPORT_SECURITY_INTERFACE_H */
diff --git a/src/cpp/common/channel_arguments.cc b/src/cpp/common/channel_arguments.cc
index f89f5f1..b696774 100644
--- a/src/cpp/common/channel_arguments.cc
+++ b/src/cpp/common/channel_arguments.cc
@@ -23,11 +23,10 @@
 #include <grpc++/resource_quota.h>
 #include <grpc/impl/codegen/grpc_types.h>
 #include <grpc/support/log.h>
-extern "C" {
 #include "src/core/lib/channel/channel_args.h"
 #include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/iomgr/socket_mutator.h"
-}
+
 namespace grpc {
 
 ChannelArguments::ChannelArguments() {
@@ -67,13 +66,12 @@
 }
 
 ChannelArguments::~ChannelArguments() {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   for (auto it = args_.begin(); it != args_.end(); ++it) {
     if (it->type == GRPC_ARG_POINTER) {
-      it->value.pointer.vtable->destroy(&exec_ctx, it->value.pointer.p);
+      it->value.pointer.vtable->destroy(it->value.pointer.p);
     }
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void ChannelArguments::Swap(ChannelArguments& other) {
@@ -96,17 +94,17 @@
   }
   grpc_arg mutator_arg = grpc_socket_mutator_to_arg(mutator);
   bool replaced = false;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   for (auto it = args_.begin(); it != args_.end(); ++it) {
     if (it->type == mutator_arg.type &&
         grpc::string(it->key) == grpc::string(mutator_arg.key)) {
       GPR_ASSERT(!replaced);
-      it->value.pointer.vtable->destroy(&exec_ctx, it->value.pointer.p);
+      it->value.pointer.vtable->destroy(it->value.pointer.p);
       it->value.pointer = mutator_arg.value.pointer;
       replaced = true;
     }
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   if (!replaced) {
     args_.push_back(mutator_arg);
   }
diff --git a/src/cpp/common/channel_filter.cc b/src/cpp/common/channel_filter.cc
index d1cfd2b..cbe2a20 100644
--- a/src/cpp/common/channel_filter.cc
+++ b/src/cpp/common/channel_filter.cc
@@ -18,9 +18,7 @@
 
 #include <string.h>
 
-extern "C" {
 #include "src/core/lib/channel/channel_stack.h"
-}
 #include "src/cpp/common/channel_filter.h"
 
 #include <grpc++/impl/codegen/slice.h>
@@ -29,43 +27,39 @@
 
 // MetadataBatch
 
-grpc_linked_mdelem* MetadataBatch::AddMetadata(grpc_exec_ctx* exec_ctx,
-                                               const string& key,
+grpc_linked_mdelem* MetadataBatch::AddMetadata(const string& key,
                                                const string& value) {
   grpc_linked_mdelem* storage = new grpc_linked_mdelem;
   memset(storage, 0, sizeof(grpc_linked_mdelem));
-  storage->md = grpc_mdelem_from_slices(exec_ctx, SliceFromCopiedString(key),
+  storage->md = grpc_mdelem_from_slices(SliceFromCopiedString(key),
                                         SliceFromCopiedString(value));
   GRPC_LOG_IF_ERROR("MetadataBatch::AddMetadata",
-                    grpc_metadata_batch_link_head(exec_ctx, batch_, storage));
+                    grpc_metadata_batch_link_head(batch_, storage));
   return storage;
 }
 
 // ChannelData
 
-void ChannelData::StartTransportOp(grpc_exec_ctx* exec_ctx,
-                                   grpc_channel_element* elem,
+void ChannelData::StartTransportOp(grpc_channel_element* elem,
                                    TransportOp* op) {
-  grpc_channel_next_op(exec_ctx, elem, op->op());
+  grpc_channel_next_op(elem, op->op());
 }
 
-void ChannelData::GetInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+void ChannelData::GetInfo(grpc_channel_element* elem,
                           const grpc_channel_info* channel_info) {
-  grpc_channel_next_get_info(exec_ctx, elem, channel_info);
+  grpc_channel_next_get_info(elem, channel_info);
 }
 
 // CallData
 
-void CallData::StartTransportStreamOpBatch(grpc_exec_ctx* exec_ctx,
-                                           grpc_call_element* elem,
+void CallData::StartTransportStreamOpBatch(grpc_call_element* elem,
                                            TransportStreamOpBatch* op) {
-  grpc_call_next_op(exec_ctx, elem, op->op());
+  grpc_call_next_op(elem, op->op());
 }
 
-void CallData::SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
-                                      grpc_call_element* elem,
+void CallData::SetPollsetOrPollsetSet(grpc_call_element* elem,
                                       grpc_polling_entity* pollent) {
-  grpc_call_stack_ignore_set_pollset_or_pollset_set(exec_ctx, elem, pollent);
+  grpc_call_stack_ignore_set_pollset_or_pollset_set(elem, pollent);
 }
 
 // internal code used by RegisterChannelFilter()
@@ -77,8 +71,7 @@
 
 namespace {
 
-bool MaybeAddFilter(grpc_exec_ctx* exec_ctx,
-                    grpc_channel_stack_builder* builder, void* arg) {
+bool MaybeAddFilter(grpc_channel_stack_builder* builder, void* arg) {
   const FilterRecord& filter = *(FilterRecord*)arg;
   if (filter.include_filter) {
     const grpc_channel_args* args =
diff --git a/src/cpp/common/channel_filter.h b/src/cpp/common/channel_filter.h
index 4fb81ec..a1f42c0 100644
--- a/src/cpp/common/channel_filter.h
+++ b/src/cpp/common/channel_filter.h
@@ -26,11 +26,9 @@
 #include <functional>
 #include <vector>
 
-extern "C" {
 #include "src/core/lib/channel/channel_stack.h"
 #include "src/core/lib/surface/channel_init.h"
 #include "src/core/lib/transport/metadata_batch.h"
-}
 
 /// An interface to define filters.
 ///
@@ -56,8 +54,7 @@
   /// Adds metadata and returns the newly allocated storage.
   /// The caller takes ownership of the result, which must exist for the
   /// lifetime of the gRPC call.
-  grpc_linked_mdelem* AddMetadata(grpc_exec_ctx* exec_ctx, const string& key,
-                                  const string& value);
+  grpc_linked_mdelem* AddMetadata(const string& key, const string& value);
 
   class const_iterator : public std::iterator<std::bidirectional_iterator_tag,
                                               const grpc_mdelem> {
@@ -224,18 +221,17 @@
   // TODO(roth): Come up with a more C++-like API for the channel element.
 
   /// Initializes the channel data.
-  virtual grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+  virtual grpc_error* Init(grpc_channel_element* elem,
                            grpc_channel_element_args* args) {
     return GRPC_ERROR_NONE;
   }
 
   // Called before destruction.
-  virtual void Destroy(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {}
+  virtual void Destroy(grpc_channel_element* elem) {}
 
-  virtual void StartTransportOp(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_element* elem, TransportOp* op);
+  virtual void StartTransportOp(grpc_channel_element* elem, TransportOp* op);
 
-  virtual void GetInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+  virtual void GetInfo(grpc_channel_element* elem,
                        const grpc_channel_info* channel_info);
 };
 
@@ -248,24 +244,22 @@
   // TODO(roth): Come up with a more C++-like API for the call element.
 
   /// Initializes the call data.
-  virtual grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+  virtual grpc_error* Init(grpc_call_element* elem,
                            const grpc_call_element_args* args) {
     return GRPC_ERROR_NONE;
   }
 
   // Called before destruction.
-  virtual void Destroy(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+  virtual void Destroy(grpc_call_element* elem,
                        const grpc_call_final_info* final_info,
                        grpc_closure* then_call_closure) {}
 
   /// Starts a new stream operation.
-  virtual void StartTransportStreamOpBatch(grpc_exec_ctx* exec_ctx,
-                                           grpc_call_element* elem,
+  virtual void StartTransportStreamOpBatch(grpc_call_element* elem,
                                            TransportStreamOpBatch* op);
 
   /// Sets a pollset or pollset set.
-  virtual void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
-                                      grpc_call_element* elem,
+  virtual void SetPollsetOrPollsetSet(grpc_call_element* elem,
                                       grpc_polling_entity* pollent);
 };
 
@@ -279,71 +273,63 @@
  public:
   static const size_t channel_data_size = sizeof(ChannelDataType);
 
-  static grpc_error* InitChannelElement(grpc_exec_ctx* exec_ctx,
-                                        grpc_channel_element* elem,
+  static grpc_error* InitChannelElement(grpc_channel_element* elem,
                                         grpc_channel_element_args* args) {
     // Construct the object in the already-allocated memory.
     ChannelDataType* channel_data = new (elem->channel_data) ChannelDataType();
-    return channel_data->Init(exec_ctx, elem, args);
+    return channel_data->Init(elem, args);
   }
 
-  static void DestroyChannelElement(grpc_exec_ctx* exec_ctx,
-                                    grpc_channel_element* elem) {
+  static void DestroyChannelElement(grpc_channel_element* elem) {
     ChannelDataType* channel_data =
         reinterpret_cast<ChannelDataType*>(elem->channel_data);
-    channel_data->Destroy(exec_ctx, elem);
+    channel_data->Destroy(elem);
     channel_data->~ChannelDataType();
   }
 
-  static void StartTransportOp(grpc_exec_ctx* exec_ctx,
-                               grpc_channel_element* elem,
+  static void StartTransportOp(grpc_channel_element* elem,
                                grpc_transport_op* op) {
     ChannelDataType* channel_data =
         reinterpret_cast<ChannelDataType*>(elem->channel_data);
     TransportOp op_wrapper(op);
-    channel_data->StartTransportOp(exec_ctx, elem, &op_wrapper);
+    channel_data->StartTransportOp(elem, &op_wrapper);
   }
 
-  static void GetChannelInfo(grpc_exec_ctx* exec_ctx,
-                             grpc_channel_element* elem,
+  static void GetChannelInfo(grpc_channel_element* elem,
                              const grpc_channel_info* channel_info) {
     ChannelDataType* channel_data =
         reinterpret_cast<ChannelDataType*>(elem->channel_data);
-    channel_data->GetInfo(exec_ctx, elem, channel_info);
+    channel_data->GetInfo(elem, channel_info);
   }
 
   static const size_t call_data_size = sizeof(CallDataType);
 
-  static grpc_error* InitCallElement(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_element* elem,
+  static grpc_error* InitCallElement(grpc_call_element* elem,
                                      const grpc_call_element_args* args) {
     // Construct the object in the already-allocated memory.
     CallDataType* call_data = new (elem->call_data) CallDataType();
-    return call_data->Init(exec_ctx, elem, args);
+    return call_data->Init(elem, args);
   }
 
-  static void DestroyCallElement(grpc_exec_ctx* exec_ctx,
-                                 grpc_call_element* elem,
+  static void DestroyCallElement(grpc_call_element* elem,
                                  const grpc_call_final_info* final_info,
                                  grpc_closure* then_call_closure) {
     CallDataType* call_data = reinterpret_cast<CallDataType*>(elem->call_data);
-    call_data->Destroy(exec_ctx, elem, final_info, then_call_closure);
+    call_data->Destroy(elem, final_info, then_call_closure);
     call_data->~CallDataType();
   }
 
-  static void StartTransportStreamOpBatch(grpc_exec_ctx* exec_ctx,
-                                          grpc_call_element* elem,
+  static void StartTransportStreamOpBatch(grpc_call_element* elem,
                                           grpc_transport_stream_op_batch* op) {
     CallDataType* call_data = reinterpret_cast<CallDataType*>(elem->call_data);
     TransportStreamOpBatch op_wrapper(op);
-    call_data->StartTransportStreamOpBatch(exec_ctx, elem, &op_wrapper);
+    call_data->StartTransportStreamOpBatch(elem, &op_wrapper);
   }
 
-  static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_element* elem,
+  static void SetPollsetOrPollsetSet(grpc_call_element* elem,
                                      grpc_polling_entity* pollent) {
     CallDataType* call_data = reinterpret_cast<CallDataType*>(elem->call_data);
-    call_data->SetPollsetOrPollsetSet(exec_ctx, elem, pollent);
+    call_data->SetPollsetOrPollsetSet(elem, pollent);
   }
 };
 
diff --git a/src/cpp/common/core_codegen.cc b/src/cpp/common/core_codegen.cc
index 3cbf08a..936d699 100644
--- a/src/cpp/common/core_codegen.cc
+++ b/src/cpp/common/core_codegen.cc
@@ -33,9 +33,7 @@
 
 #include "src/core/lib/profiling/timers.h"
 
-extern "C" {
 struct grpc_byte_buffer;
-}
 
 namespace grpc {
 
diff --git a/src/cpp/common/version_cc.cc b/src/cpp/common/version_cc.cc
index 8eb5a2e..7f01a66 100644
--- a/src/cpp/common/version_cc.cc
+++ b/src/cpp/common/version_cc.cc
@@ -22,5 +22,5 @@
 #include <grpc++/grpc++.h>
 
 namespace grpc {
-grpc::string Version() { return "1.8.3"; }
+grpc::string Version() { return "1.9.0-dev"; }
 }  // namespace grpc
diff --git a/src/cpp/util/core_stats.h b/src/cpp/util/core_stats.h
index 00e38bf..6366d7d 100644
--- a/src/cpp/util/core_stats.h
+++ b/src/cpp/util/core_stats.h
@@ -21,9 +21,7 @@
 
 #include "src/proto/grpc/core/stats.pb.h"
 
-extern "C" {
 #include "src/core/lib/debug/stats.h"
-}
 
 namespace grpc {
 
diff --git a/src/csharp/Grpc.Core.Tests/ChannelConnectivityTest.cs b/src/csharp/Grpc.Core.Tests/ChannelConnectivityTest.cs
new file mode 100644
index 0000000..a43040f
--- /dev/null
+++ b/src/csharp/Grpc.Core.Tests/ChannelConnectivityTest.cs
@@ -0,0 +1,86 @@
+#region Copyright notice and license
+
+// Copyright 2017 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using System.Threading;
+using System.Threading.Tasks;
+using Grpc.Core;
+using Grpc.Core.Internal;
+using Grpc.Core.Profiling;
+using Grpc.Core.Utils;
+using NUnit.Framework;
+
+namespace Grpc.Core.Tests
+{
+    public class ChannelConnectivityTest
+    {
+        const string Host = "127.0.0.1";
+
+        MockServiceHelper helper;
+        Server server;
+        Channel channel;
+
+        [SetUp]
+        public void Init()
+        {
+            helper = new MockServiceHelper(Host);
+            server = helper.GetServer();
+            server.Start();
+            channel = helper.GetChannel();
+        }
+
+        [TearDown]
+        public void Cleanup()
+        {
+            channel.ShutdownAsync().Wait();
+            server.ShutdownAsync().Wait();
+        }
+
+        [Test]
+        public async Task Channel_WaitForStateChangedAsync()
+        {
+            helper.UnaryHandler = new UnaryServerMethod<string, string>((request, context) =>
+            {
+                return Task.FromResult(request);
+            });
+
+            Assert.ThrowsAsync(typeof(TaskCanceledException), 
+                async () => await channel.WaitForStateChangedAsync(channel.State, DateTime.UtcNow.AddMilliseconds(10)));
+
+            var stateChangedTask = channel.WaitForStateChangedAsync(channel.State);
+
+            await Calls.AsyncUnaryCall(helper.CreateUnaryCall(), "abc");
+
+            await stateChangedTask;
+            Assert.AreEqual(ChannelState.Ready, channel.State);
+        }
+
+        [Test]
+        public async Task Channel_ConnectAsync()
+        {
+            await channel.ConnectAsync();
+            Assert.AreEqual(ChannelState.Ready, channel.State);
+
+            await channel.ConnectAsync(DateTime.UtcNow.AddMilliseconds(1000));
+            Assert.AreEqual(ChannelState.Ready, channel.State);
+        }
+    }
+}
diff --git a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
index 90dd365..331c332 100644
--- a/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
+++ b/src/csharp/Grpc.Core.Tests/ClientServerTest.cs
@@ -373,34 +373,5 @@
             });
             Assert.AreEqual("PASS", Calls.BlockingUnaryCall(helper.CreateUnaryCall(), "abc"));
         }
-
-        [Test]
-        public async Task Channel_WaitForStateChangedAsync()
-        {
-            helper.UnaryHandler = new UnaryServerMethod<string, string>((request, context) =>
-            {
-                return Task.FromResult(request);
-            });
-
-            Assert.ThrowsAsync(typeof(TaskCanceledException), 
-                async () => await channel.WaitForStateChangedAsync(channel.State, DateTime.UtcNow.AddMilliseconds(10)));
-
-            var stateChangedTask = channel.WaitForStateChangedAsync(channel.State);
-
-            await Calls.AsyncUnaryCall(helper.CreateUnaryCall(), "abc");
-
-            await stateChangedTask;
-            Assert.AreEqual(ChannelState.Ready, channel.State);
-        }
-
-        [Test]
-        public async Task Channel_ConnectAsync()
-        {
-            await channel.ConnectAsync();
-            Assert.AreEqual(ChannelState.Ready, channel.State);
-
-            await channel.ConnectAsync(DateTime.UtcNow.AddMilliseconds(1000));
-            Assert.AreEqual(ChannelState.Ready, channel.State);
-        }
     }
 }
diff --git a/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs b/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
index 775c950..7e4e297 100644
--- a/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
+++ b/src/csharp/Grpc.Core.Tests/Internal/CompletionQueueSafeHandleTest.cs
@@ -40,7 +40,7 @@
         public void CreateAsyncAndShutdown()
         {
             var env = GrpcEnvironment.AddRef();
-            var cq = CompletionQueueSafeHandle.CreateAsync(new CompletionRegistry(env, () => BatchContextSafeHandle.Create()));
+            var cq = CompletionQueueSafeHandle.CreateAsync(new CompletionRegistry(env, () => BatchContextSafeHandle.Create(), () => RequestCallContextSafeHandle.Create()));
             cq.Shutdown();
             var ev = cq.Next();
             cq.Dispose();
diff --git a/src/csharp/Grpc.Core/Channel.cs b/src/csharp/Grpc.Core/Channel.cs
index f9925a8..e39da9c 100644
--- a/src/csharp/Grpc.Core/Channel.cs
+++ b/src/csharp/Grpc.Core/Channel.cs
@@ -152,8 +152,11 @@
                 "Shutdown is a terminal state. No further state changes can occur.");
             var tcs = new TaskCompletionSource<object>();
             var deadlineTimespec = deadline.HasValue ? Timespec.FromDateTime(deadline.Value) : Timespec.InfFuture;
-            // pass "tcs" as "state" for WatchConnectivityStateHandler.
-            handle.WatchConnectivityState(lastObservedState, deadlineTimespec, completionQueue, WatchConnectivityStateHandler, tcs);
+            lock (myLock)
+            {
+                // pass "tcs" as "state" for WatchConnectivityStateHandler.
+                handle.WatchConnectivityState(lastObservedState, deadlineTimespec, completionQueue, WatchConnectivityStateHandler, tcs);
+            }
             return tcs.Task;
         }
 
@@ -236,7 +239,10 @@
                 Logger.Warning("Channel shutdown was called but there are still {0} active calls for that channel.", activeCallCount);
             }
 
-            handle.Dispose();
+            lock (myLock)
+            {
+                handle.Dispose();
+            }
 
             await Task.WhenAll(GrpcEnvironment.ReleaseAsync(), connectivityWatcherTask).ConfigureAwait(false);
         }
@@ -285,7 +291,10 @@
         {
             try
             {
-                return handle.CheckConnectivityState(tryToConnect);
+                lock (myLock)
+                {
+                    return handle.CheckConnectivityState(tryToConnect);
+                }
             }
             catch (ObjectDisposedException)
             {
diff --git a/src/csharp/Grpc.Core/GrpcEnvironment.cs b/src/csharp/Grpc.Core/GrpcEnvironment.cs
index 2b1b5e3..7b4342b 100644
--- a/src/csharp/Grpc.Core/GrpcEnvironment.cs
+++ b/src/csharp/Grpc.Core/GrpcEnvironment.cs
@@ -35,6 +35,8 @@
         const int MinDefaultThreadPoolSize = 4;
         const int DefaultBatchContextPoolSharedCapacity = 10000;
         const int DefaultBatchContextPoolThreadLocalCapacity = 64;
+        const int DefaultRequestCallContextPoolSharedCapacity = 10000;
+        const int DefaultRequestCallContextPoolThreadLocalCapacity = 64;
 
         static object staticLock = new object();
         static GrpcEnvironment instance;
@@ -44,12 +46,15 @@
         static bool inlineHandlers;
         static int batchContextPoolSharedCapacity = DefaultBatchContextPoolSharedCapacity;
         static int batchContextPoolThreadLocalCapacity = DefaultBatchContextPoolThreadLocalCapacity;
+        static int requestCallContextPoolSharedCapacity = DefaultRequestCallContextPoolSharedCapacity;
+        static int requestCallContextPoolThreadLocalCapacity = DefaultRequestCallContextPoolThreadLocalCapacity;
         static readonly HashSet<Channel> registeredChannels = new HashSet<Channel>();
         static readonly HashSet<Server> registeredServers = new HashSet<Server>();
 
         static ILogger logger = new LogLevelFilterLogger(new ConsoleLogger(), LogLevel.Off, true);
 
         readonly IObjectPool<BatchContextSafeHandle> batchContextPool;
+        readonly IObjectPool<RequestCallContextSafeHandle> requestCallContextPool;
         readonly GrpcThreadPool threadPool;
         readonly DebugStats debugStats = new DebugStats();
         readonly AtomicCounter cqPickerCounter = new AtomicCounter();
@@ -263,6 +268,26 @@
         }
 
         /// <summary>
+        /// Sets the parameters for a pool that caches request call context instances. Reusing request call context instances
+        /// instead of creating a new one for every requested call in C core helps reducing the GC pressure.
+        /// Can be only invoked before the <c>GrpcEnviroment</c> is started and cannot be changed afterwards.
+        /// This is an advanced setting and you should only use it if you know what you are doing.
+        /// Most users should rely on the default value provided by gRPC library.
+        /// Note: this method is part of an experimental API that can change or be removed without any prior notice.
+        /// </summary>
+        public static void SetRequestCallContextPoolParams(int sharedCapacity, int threadLocalCapacity)
+        {
+            lock (staticLock)
+            {
+                GrpcPreconditions.CheckState(instance == null, "Can only be set before GrpcEnvironment is initialized");
+                GrpcPreconditions.CheckArgument(sharedCapacity >= 0, "Shared capacity needs to be a non-negative number");
+                GrpcPreconditions.CheckArgument(threadLocalCapacity >= 0, "Thread local capacity needs to be a non-negative number");
+                requestCallContextPoolSharedCapacity = sharedCapacity;
+                requestCallContextPoolThreadLocalCapacity = threadLocalCapacity;
+            }
+        }
+
+        /// <summary>
         /// Occurs when <c>GrpcEnvironment</c> is about the start the shutdown logic.
         /// If <c>GrpcEnvironment</c> is later initialized and shutdown, the event will be fired again (unless unregistered first).
         /// </summary>
@@ -275,6 +300,7 @@
         {
             GrpcNativeInit();
             batchContextPool = new DefaultObjectPool<BatchContextSafeHandle>(() => BatchContextSafeHandle.Create(this.batchContextPool), batchContextPoolSharedCapacity, batchContextPoolThreadLocalCapacity);
+            requestCallContextPool = new DefaultObjectPool<RequestCallContextSafeHandle>(() => RequestCallContextSafeHandle.Create(this.requestCallContextPool), requestCallContextPoolSharedCapacity, requestCallContextPoolThreadLocalCapacity);
             threadPool = new GrpcThreadPool(this, GetThreadPoolSizeOrDefault(), GetCompletionQueueCountOrDefault(), inlineHandlers);
             threadPool.Start();
         }
@@ -292,6 +318,8 @@
 
         internal IObjectPool<BatchContextSafeHandle> BatchContextPool => batchContextPool;
 
+        internal IObjectPool<RequestCallContextSafeHandle> RequestCallContextPool => requestCallContextPool;
+
         internal bool IsAlive
         {
             get
diff --git a/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs b/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
index cf3f3c0..79d0c91 100644
--- a/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
+++ b/src/csharp/Grpc.Core/Internal/CompletionRegistry.cs
@@ -37,14 +37,16 @@
 
         readonly GrpcEnvironment environment;
         readonly Func<BatchContextSafeHandle> batchContextFactory;
+        readonly Func<RequestCallContextSafeHandle> requestCallContextFactory;
         readonly Dictionary<IntPtr, IOpCompletionCallback> dict = new Dictionary<IntPtr, IOpCompletionCallback>(new IntPtrComparer());
         SpinLock spinLock = new SpinLock(Debugger.IsAttached);
         IntPtr lastRegisteredKey;  // only for testing
 
-        public CompletionRegistry(GrpcEnvironment environment, Func<BatchContextSafeHandle> batchContextFactory)
+        public CompletionRegistry(GrpcEnvironment environment, Func<BatchContextSafeHandle> batchContextFactory, Func<RequestCallContextSafeHandle> requestCallContextFactory)
         {
             this.environment = GrpcPreconditions.CheckNotNull(environment);
             this.batchContextFactory = GrpcPreconditions.CheckNotNull(batchContextFactory);
+            this.requestCallContextFactory = GrpcPreconditions.CheckNotNull(requestCallContextFactory);
         }
 
         public void Register(IntPtr key, IOpCompletionCallback callback)
@@ -73,10 +75,12 @@
             return ctx;
         }
 
-        public void RegisterRequestCallCompletion(RequestCallContextSafeHandle ctx, RequestCallCompletionDelegate callback)
+        public RequestCallContextSafeHandle RegisterRequestCallCompletion(RequestCallCompletionDelegate callback)
         {
+            var ctx = requestCallContextFactory();
             ctx.CompletionCallback = callback;
             Register(ctx.Handle, ctx);
+            return ctx;
         }
 
         public IOpCompletionCallback Extract(IntPtr key)
diff --git a/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs b/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
index f1b5a4f..8ddda9b 100644
--- a/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
+++ b/src/csharp/Grpc.Core/Internal/GrpcThreadPool.cs
@@ -219,7 +219,7 @@
             var list = new List<CompletionQueueSafeHandle>();
             for (int i = 0; i < completionQueueCount; i++)
             {
-                var completionRegistry = new CompletionRegistry(environment, () => environment.BatchContextPool.Lease());
+                var completionRegistry = new CompletionRegistry(environment, () => environment.BatchContextPool.Lease(), () => environment.RequestCallContextPool.Lease());
                 list.Add(CompletionQueueSafeHandle.CreateAsync(completionRegistry));
             }
             return list.AsReadOnly();
diff --git a/src/csharp/Grpc.Core/Internal/NativeMethods.cs b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
index 43acb8f..8b15c26 100644
--- a/src/csharp/Grpc.Core/Internal/NativeMethods.cs
+++ b/src/csharp/Grpc.Core/Internal/NativeMethods.cs
@@ -61,6 +61,7 @@
         public readonly Delegates.grpcsharp_request_call_context_host_delegate grpcsharp_request_call_context_host;
         public readonly Delegates.grpcsharp_request_call_context_deadline_delegate grpcsharp_request_call_context_deadline;
         public readonly Delegates.grpcsharp_request_call_context_request_metadata_delegate grpcsharp_request_call_context_request_metadata;
+        public readonly Delegates.grpcsharp_request_call_context_reset_delegate grpcsharp_request_call_context_reset;
         public readonly Delegates.grpcsharp_request_call_context_destroy_delegate grpcsharp_request_call_context_destroy;
 
         public readonly Delegates.grpcsharp_composite_call_credentials_create_delegate grpcsharp_composite_call_credentials_create;
@@ -179,6 +180,7 @@
             this.grpcsharp_request_call_context_host = GetMethodDelegate<Delegates.grpcsharp_request_call_context_host_delegate>(library);
             this.grpcsharp_request_call_context_deadline = GetMethodDelegate<Delegates.grpcsharp_request_call_context_deadline_delegate>(library);
             this.grpcsharp_request_call_context_request_metadata = GetMethodDelegate<Delegates.grpcsharp_request_call_context_request_metadata_delegate>(library);
+            this.grpcsharp_request_call_context_reset = GetMethodDelegate<Delegates.grpcsharp_request_call_context_reset_delegate>(library);
             this.grpcsharp_request_call_context_destroy = GetMethodDelegate<Delegates.grpcsharp_request_call_context_destroy_delegate>(library);
 
             this.grpcsharp_composite_call_credentials_create = GetMethodDelegate<Delegates.grpcsharp_composite_call_credentials_create_delegate>(library);
@@ -322,6 +324,7 @@
             public delegate IntPtr grpcsharp_request_call_context_host_delegate(RequestCallContextSafeHandle ctx, out UIntPtr hostLength);
             public delegate Timespec grpcsharp_request_call_context_deadline_delegate(RequestCallContextSafeHandle ctx);
             public delegate IntPtr grpcsharp_request_call_context_request_metadata_delegate(RequestCallContextSafeHandle ctx);
+            public delegate void grpcsharp_request_call_context_reset_delegate(RequestCallContextSafeHandle ctx);
             public delegate void grpcsharp_request_call_context_destroy_delegate(IntPtr ctx);
 
             public delegate CallCredentialsSafeHandle grpcsharp_composite_call_credentials_create_delegate(CallCredentialsSafeHandle creds1, CallCredentialsSafeHandle creds2);
diff --git a/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs b/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
index 09f5c3e..59e9d9b 100644
--- a/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/RequestCallContextSafeHandle.cs
@@ -30,14 +30,17 @@
     {
         static readonly NativeMethods Native = NativeMethods.Get();
         static readonly ILogger Logger = GrpcEnvironment.Logger.ForType<RequestCallContextSafeHandle>();
+        IObjectPool<RequestCallContextSafeHandle> ownedByPool;
 
         private RequestCallContextSafeHandle()
         {
         }
 
-        public static RequestCallContextSafeHandle Create()
+        public static RequestCallContextSafeHandle Create(IObjectPool<RequestCallContextSafeHandle> ownedByPool = null)
         {
-            return Native.grpcsharp_request_call_context_create();
+            var ctx = Native.grpcsharp_request_call_context_create();
+            ctx.ownedByPool = ownedByPool;
+            return ctx;
         }
 
         public IntPtr Handle
@@ -71,6 +74,19 @@
             return new ServerRpcNew(server, call, method, host, deadline, metadata);
         }
 
+        public void Recycle()
+        {
+            if (ownedByPool != null)
+            {
+                Native.grpcsharp_request_call_context_reset(this);
+                ownedByPool.Return(this);
+            }
+            else
+            {
+                Dispose();
+            }
+        }
+
         protected override bool ReleaseHandle()
         {
             Native.grpcsharp_request_call_context_destroy(handle);
@@ -90,7 +106,7 @@
             finally
             {
                 CompletionCallback = null;
-                Dispose();
+                Recycle();
             }
         }
     }
diff --git a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
index 9b7ea88..56dda9c 100644
--- a/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
+++ b/src/csharp/Grpc.Core/Internal/ServerSafeHandle.cs
@@ -75,8 +75,7 @@
         {
             using (completionQueue.NewScope())
             {
-                var ctx = RequestCallContextSafeHandle.Create();
-                completionQueue.CompletionRegistry.RegisterRequestCallCompletion(ctx, callback);
+                var ctx = completionQueue.CompletionRegistry.RegisterRequestCallCompletion(callback);
                 Native.grpcsharp_server_request_call(this, completionQueue, ctx).CheckOk();
             }
         }
diff --git a/src/csharp/Grpc.Core/Server.cs b/src/csharp/Grpc.Core/Server.cs
index 71c7f10..60dacbf 100644
--- a/src/csharp/Grpc.Core/Server.cs
+++ b/src/csharp/Grpc.Core/Server.cs
@@ -300,6 +300,7 @@
         {
             if (!shutdownRequested)
             {
+                // TODO(jtattermusch): avoid unnecessary delegate allocation
                 handle.RequestCall((success, ctx) => HandleNewServerRpc(success, ctx, cq), cq);
             }
         }
diff --git a/src/csharp/Grpc.Core/Version.csproj.include b/src/csharp/Grpc.Core/Version.csproj.include
index 3eeda74..2d9e4ba 100755
--- a/src/csharp/Grpc.Core/Version.csproj.include
+++ b/src/csharp/Grpc.Core/Version.csproj.include
@@ -1,7 +1,7 @@
 <!-- This file is generated -->
 <Project>
   <PropertyGroup>
-    <GrpcCsharpVersion>1.8.3</GrpcCsharpVersion>
+    <GrpcCsharpVersion>1.9.0-dev</GrpcCsharpVersion>
     <GoogleProtobufVersion>3.3.0</GoogleProtobufVersion>
   </PropertyGroup>
 </Project>
diff --git a/src/csharp/Grpc.Core/VersionInfo.cs b/src/csharp/Grpc.Core/VersionInfo.cs
index 7cf012c..9b5da1c 100644
--- a/src/csharp/Grpc.Core/VersionInfo.cs
+++ b/src/csharp/Grpc.Core/VersionInfo.cs
@@ -33,11 +33,11 @@
         /// <summary>
         /// Current <c>AssemblyFileVersion</c> of gRPC C# assemblies
         /// </summary>
-        public const string CurrentAssemblyFileVersion = "1.8.3.0";
+        public const string CurrentAssemblyFileVersion = "1.9.0.0";
 
         /// <summary>
         /// Current version of gRPC C#
         /// </summary>
-        public const string CurrentVersion = "1.8.3";
+        public const string CurrentVersion = "1.9.0-dev";
     }
 }
diff --git a/src/csharp/Grpc.Examples/MathGrpc.cs b/src/csharp/Grpc.Examples/MathGrpc.cs
index 4ed4144..e29b108 100644
--- a/src/csharp/Grpc.Examples/MathGrpc.cs
+++ b/src/csharp/Grpc.Examples/MathGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: math/math.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: math/math.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015 gRPC authors.
 //
diff --git a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
index 3e8eb34..24a7259 100644
--- a/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
+++ b/src/csharp/Grpc.HealthCheck/HealthGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: grpc/health/v1/health.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: grpc/health/v1/health.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs b/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
index 48905a2..9d41d34 100644
--- a/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ClientRunners.cs
@@ -131,7 +131,7 @@
 
         readonly List<Task> runnerTasks;
         readonly CancellationTokenSource stoppedCts = new CancellationTokenSource();
-        readonly WallClockStopwatch wallClockStopwatch = new WallClockStopwatch();
+        readonly TimeStats timeStats = new TimeStats();
         readonly AtomicCounter statsResetCount = new AtomicCounter();
         
         public ClientRunnerImpl(List<Channel> channels, ClientType clientType, RpcType rpcType, int outstandingRpcsPerChannel, LoadParams loadParams, PayloadConfig payloadConfig, HistogramParams histogramParams, Func<BasicProfiler> profilerFactory)
@@ -165,7 +165,7 @@
                 hist.GetSnapshot(histogramData, reset);
             }
 
-            var secondsElapsed = wallClockStopwatch.GetElapsedSnapshot(reset).TotalSeconds;
+            var timeSnapshot = timeStats.GetSnapshot(reset);
 
             if (reset)
             {
@@ -173,15 +173,14 @@
             }
 
             GrpcEnvironment.Logger.Info("[ClientRunnerImpl.GetStats] GC collection counts: gen0 {0}, gen1 {1}, gen2 {2}, (histogram reset count:{3}, seconds since reset: {4})",
-                GC.CollectionCount(0), GC.CollectionCount(1), GC.CollectionCount(2), statsResetCount.Count, secondsElapsed);
+                GC.CollectionCount(0), GC.CollectionCount(1), GC.CollectionCount(2), statsResetCount.Count, timeSnapshot.WallClockTime.TotalSeconds);
 
-            // TODO: populate user time and system time
             return new ClientStats
             {
                 Latencies = histogramData,
-                TimeElapsed = secondsElapsed,
-                TimeUser = 0,
-                TimeSystem = 0
+                TimeElapsed = timeSnapshot.WallClockTime.TotalSeconds,
+                TimeUser = timeSnapshot.UserProcessorTime.TotalSeconds,
+                TimeSystem = timeSnapshot.PrivilegedProcessorTime.TotalSeconds
             };
         }
 
diff --git a/src/csharp/Grpc.IntegrationTesting/Control.cs b/src/csharp/Grpc.IntegrationTesting/Control.cs
index b15da8b..8e5da7b 100644
--- a/src/csharp/Grpc.IntegrationTesting/Control.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Control.cs
@@ -522,10 +522,16 @@
       }
       switch (other.LoadCase) {
         case LoadOneofCase.ClosedLoop:
-          ClosedLoop = other.ClosedLoop;
+          if (ClosedLoop == null) {
+            ClosedLoop = new global::Grpc.Testing.ClosedLoopParams();
+          }
+          ClosedLoop.MergeFrom(other.ClosedLoop);
           break;
         case LoadOneofCase.Poisson:
-          Poisson = other.Poisson;
+          if (Poisson == null) {
+            Poisson = new global::Grpc.Testing.PoissonParams();
+          }
+          Poisson.MergeFrom(other.Poisson);
           break;
       }
 
@@ -1901,10 +1907,16 @@
       }
       switch (other.ArgtypeCase) {
         case ArgtypeOneofCase.Setup:
-          Setup = other.Setup;
+          if (Setup == null) {
+            Setup = new global::Grpc.Testing.ClientConfig();
+          }
+          Setup.MergeFrom(other.Setup);
           break;
         case ArgtypeOneofCase.Mark:
-          Mark = other.Mark;
+          if (Mark == null) {
+            Mark = new global::Grpc.Testing.Mark();
+          }
+          Mark.MergeFrom(other.Mark);
           break;
       }
 
@@ -2508,10 +2520,16 @@
       }
       switch (other.ArgtypeCase) {
         case ArgtypeOneofCase.Setup:
-          Setup = other.Setup;
+          if (Setup == null) {
+            Setup = new global::Grpc.Testing.ServerConfig();
+          }
+          Setup.MergeFrom(other.Setup);
           break;
         case ArgtypeOneofCase.Mark:
-          Mark = other.Mark;
+          if (Mark == null) {
+            Mark = new global::Grpc.Testing.Mark();
+          }
+          Mark.MergeFrom(other.Mark);
           break;
       }
 
diff --git a/src/csharp/Grpc.IntegrationTesting/CoreStats/Stats.cs b/src/csharp/Grpc.IntegrationTesting/CoreStats/Stats.cs
new file mode 100644
index 0000000..380294e
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/CoreStats/Stats.cs
@@ -0,0 +1,623 @@
+// Generated by the protocol buffer compiler.  DO NOT EDIT!
+// source: grpc/core/stats.proto
+#pragma warning disable 1591, 0612, 3021
+#region Designer generated code
+
+using pb = global::Google.Protobuf;
+using pbc = global::Google.Protobuf.Collections;
+using pbr = global::Google.Protobuf.Reflection;
+using scg = global::System.Collections.Generic;
+namespace Grpc.Core {
+
+  /// <summary>Holder for reflection information generated from grpc/core/stats.proto</summary>
+  public static partial class StatsReflection {
+
+    #region Descriptor
+    /// <summary>File descriptor for grpc/core/stats.proto</summary>
+    public static pbr::FileDescriptor Descriptor {
+      get { return descriptor; }
+    }
+    private static pbr::FileDescriptor descriptor;
+
+    static StatsReflection() {
+      byte[] descriptorData = global::System.Convert.FromBase64String(
+          string.Concat(
+            "ChVncnBjL2NvcmUvc3RhdHMucHJvdG8SCWdycGMuY29yZSImCgZCdWNrZXQS",
+            "DQoFc3RhcnQYASABKAESDQoFY291bnQYAiABKAQiLwoJSGlzdG9ncmFtEiIK",
+            "B2J1Y2tldHMYASADKAsyES5ncnBjLmNvcmUuQnVja2V0IlsKBk1ldHJpYxIM",
+            "CgRuYW1lGAEgASgJEg8KBWNvdW50GAogASgESAASKQoJaGlzdG9ncmFtGAsg",
+            "ASgLMhQuZ3JwYy5jb3JlLkhpc3RvZ3JhbUgAQgcKBXZhbHVlIisKBVN0YXRz",
+            "EiIKB21ldHJpY3MYASADKAsyES5ncnBjLmNvcmUuTWV0cmljYgZwcm90bzM="));
+      descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
+          new pbr::FileDescriptor[] { },
+          new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Bucket), global::Grpc.Core.Bucket.Parser, new[]{ "Start", "Count" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Histogram), global::Grpc.Core.Histogram.Parser, new[]{ "Buckets" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Metric), global::Grpc.Core.Metric.Parser, new[]{ "Name", "Count", "Histogram" }, new[]{ "Value" }, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Core.Stats), global::Grpc.Core.Stats.Parser, new[]{ "Metrics" }, null, null, null)
+          }));
+    }
+    #endregion
+
+  }
+  #region Messages
+  public sealed partial class Bucket : pb::IMessage<Bucket> {
+    private static readonly pb::MessageParser<Bucket> _parser = new pb::MessageParser<Bucket>(() => new Bucket());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Bucket> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[0]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Bucket() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Bucket(Bucket other) : this() {
+      start_ = other.start_;
+      count_ = other.count_;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Bucket Clone() {
+      return new Bucket(this);
+    }
+
+    /// <summary>Field number for the "start" field.</summary>
+    public const int StartFieldNumber = 1;
+    private double start_;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public double Start {
+      get { return start_; }
+      set {
+        start_ = value;
+      }
+    }
+
+    /// <summary>Field number for the "count" field.</summary>
+    public const int CountFieldNumber = 2;
+    private ulong count_;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public ulong Count {
+      get { return count_; }
+      set {
+        count_ = value;
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Bucket);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Bucket other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if (Start != other.Start) return false;
+      if (Count != other.Count) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      if (Start != 0D) hash ^= Start.GetHashCode();
+      if (Count != 0UL) hash ^= Count.GetHashCode();
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      if (Start != 0D) {
+        output.WriteRawTag(9);
+        output.WriteDouble(Start);
+      }
+      if (Count != 0UL) {
+        output.WriteRawTag(16);
+        output.WriteUInt64(Count);
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      if (Start != 0D) {
+        size += 1 + 8;
+      }
+      if (Count != 0UL) {
+        size += 1 + pb::CodedOutputStream.ComputeUInt64Size(Count);
+      }
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Bucket other) {
+      if (other == null) {
+        return;
+      }
+      if (other.Start != 0D) {
+        Start = other.Start;
+      }
+      if (other.Count != 0UL) {
+        Count = other.Count;
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 9: {
+            Start = input.ReadDouble();
+            break;
+          }
+          case 16: {
+            Count = input.ReadUInt64();
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  public sealed partial class Histogram : pb::IMessage<Histogram> {
+    private static readonly pb::MessageParser<Histogram> _parser = new pb::MessageParser<Histogram>(() => new Histogram());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Histogram> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[1]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Histogram() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Histogram(Histogram other) : this() {
+      buckets_ = other.buckets_.Clone();
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Histogram Clone() {
+      return new Histogram(this);
+    }
+
+    /// <summary>Field number for the "buckets" field.</summary>
+    public const int BucketsFieldNumber = 1;
+    private static readonly pb::FieldCodec<global::Grpc.Core.Bucket> _repeated_buckets_codec
+        = pb::FieldCodec.ForMessage(10, global::Grpc.Core.Bucket.Parser);
+    private readonly pbc::RepeatedField<global::Grpc.Core.Bucket> buckets_ = new pbc::RepeatedField<global::Grpc.Core.Bucket>();
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public pbc::RepeatedField<global::Grpc.Core.Bucket> Buckets {
+      get { return buckets_; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Histogram);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Histogram other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if(!buckets_.Equals(other.buckets_)) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      hash ^= buckets_.GetHashCode();
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      buckets_.WriteTo(output, _repeated_buckets_codec);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      size += buckets_.CalculateSize(_repeated_buckets_codec);
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Histogram other) {
+      if (other == null) {
+        return;
+      }
+      buckets_.Add(other.buckets_);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 10: {
+            buckets_.AddEntriesFrom(input, _repeated_buckets_codec);
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  public sealed partial class Metric : pb::IMessage<Metric> {
+    private static readonly pb::MessageParser<Metric> _parser = new pb::MessageParser<Metric>(() => new Metric());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Metric> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[2]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Metric() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Metric(Metric other) : this() {
+      name_ = other.name_;
+      switch (other.ValueCase) {
+        case ValueOneofCase.Count:
+          Count = other.Count;
+          break;
+        case ValueOneofCase.Histogram:
+          Histogram = other.Histogram.Clone();
+          break;
+      }
+
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Metric Clone() {
+      return new Metric(this);
+    }
+
+    /// <summary>Field number for the "name" field.</summary>
+    public const int NameFieldNumber = 1;
+    private string name_ = "";
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public string Name {
+      get { return name_; }
+      set {
+        name_ = pb::ProtoPreconditions.CheckNotNull(value, "value");
+      }
+    }
+
+    /// <summary>Field number for the "count" field.</summary>
+    public const int CountFieldNumber = 10;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public ulong Count {
+      get { return valueCase_ == ValueOneofCase.Count ? (ulong) value_ : 0UL; }
+      set {
+        value_ = value;
+        valueCase_ = ValueOneofCase.Count;
+      }
+    }
+
+    /// <summary>Field number for the "histogram" field.</summary>
+    public const int HistogramFieldNumber = 11;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public global::Grpc.Core.Histogram Histogram {
+      get { return valueCase_ == ValueOneofCase.Histogram ? (global::Grpc.Core.Histogram) value_ : null; }
+      set {
+        value_ = value;
+        valueCase_ = value == null ? ValueOneofCase.None : ValueOneofCase.Histogram;
+      }
+    }
+
+    private object value_;
+    /// <summary>Enum of possible cases for the "value" oneof.</summary>
+    public enum ValueOneofCase {
+      None = 0,
+      Count = 10,
+      Histogram = 11,
+    }
+    private ValueOneofCase valueCase_ = ValueOneofCase.None;
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public ValueOneofCase ValueCase {
+      get { return valueCase_; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void ClearValue() {
+      valueCase_ = ValueOneofCase.None;
+      value_ = null;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Metric);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Metric other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if (Name != other.Name) return false;
+      if (Count != other.Count) return false;
+      if (!object.Equals(Histogram, other.Histogram)) return false;
+      if (ValueCase != other.ValueCase) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      if (Name.Length != 0) hash ^= Name.GetHashCode();
+      if (valueCase_ == ValueOneofCase.Count) hash ^= Count.GetHashCode();
+      if (valueCase_ == ValueOneofCase.Histogram) hash ^= Histogram.GetHashCode();
+      hash ^= (int) valueCase_;
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      if (Name.Length != 0) {
+        output.WriteRawTag(10);
+        output.WriteString(Name);
+      }
+      if (valueCase_ == ValueOneofCase.Count) {
+        output.WriteRawTag(80);
+        output.WriteUInt64(Count);
+      }
+      if (valueCase_ == ValueOneofCase.Histogram) {
+        output.WriteRawTag(90);
+        output.WriteMessage(Histogram);
+      }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      if (Name.Length != 0) {
+        size += 1 + pb::CodedOutputStream.ComputeStringSize(Name);
+      }
+      if (valueCase_ == ValueOneofCase.Count) {
+        size += 1 + pb::CodedOutputStream.ComputeUInt64Size(Count);
+      }
+      if (valueCase_ == ValueOneofCase.Histogram) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(Histogram);
+      }
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Metric other) {
+      if (other == null) {
+        return;
+      }
+      if (other.Name.Length != 0) {
+        Name = other.Name;
+      }
+      switch (other.ValueCase) {
+        case ValueOneofCase.Count:
+          Count = other.Count;
+          break;
+        case ValueOneofCase.Histogram:
+          if (Histogram == null) {
+            Histogram = new global::Grpc.Core.Histogram();
+          }
+          Histogram.MergeFrom(other.Histogram);
+          break;
+      }
+
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 10: {
+            Name = input.ReadString();
+            break;
+          }
+          case 80: {
+            Count = input.ReadUInt64();
+            break;
+          }
+          case 90: {
+            global::Grpc.Core.Histogram subBuilder = new global::Grpc.Core.Histogram();
+            if (valueCase_ == ValueOneofCase.Histogram) {
+              subBuilder.MergeFrom(Histogram);
+            }
+            input.ReadMessage(subBuilder);
+            Histogram = subBuilder;
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  public sealed partial class Stats : pb::IMessage<Stats> {
+    private static readonly pb::MessageParser<Stats> _parser = new pb::MessageParser<Stats>(() => new Stats());
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pb::MessageParser<Stats> Parser { get { return _parser; } }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public static pbr::MessageDescriptor Descriptor {
+      get { return global::Grpc.Core.StatsReflection.Descriptor.MessageTypes[3]; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    pbr::MessageDescriptor pb::IMessage.Descriptor {
+      get { return Descriptor; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Stats() {
+      OnConstruction();
+    }
+
+    partial void OnConstruction();
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Stats(Stats other) : this() {
+      metrics_ = other.metrics_.Clone();
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public Stats Clone() {
+      return new Stats(this);
+    }
+
+    /// <summary>Field number for the "metrics" field.</summary>
+    public const int MetricsFieldNumber = 1;
+    private static readonly pb::FieldCodec<global::Grpc.Core.Metric> _repeated_metrics_codec
+        = pb::FieldCodec.ForMessage(10, global::Grpc.Core.Metric.Parser);
+    private readonly pbc::RepeatedField<global::Grpc.Core.Metric> metrics_ = new pbc::RepeatedField<global::Grpc.Core.Metric>();
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public pbc::RepeatedField<global::Grpc.Core.Metric> Metrics {
+      get { return metrics_; }
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override bool Equals(object other) {
+      return Equals(other as Stats);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public bool Equals(Stats other) {
+      if (ReferenceEquals(other, null)) {
+        return false;
+      }
+      if (ReferenceEquals(other, this)) {
+        return true;
+      }
+      if(!metrics_.Equals(other.metrics_)) return false;
+      return true;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override int GetHashCode() {
+      int hash = 1;
+      hash ^= metrics_.GetHashCode();
+      return hash;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public override string ToString() {
+      return pb::JsonFormatter.ToDiagnosticString(this);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void WriteTo(pb::CodedOutputStream output) {
+      metrics_.WriteTo(output, _repeated_metrics_codec);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int CalculateSize() {
+      int size = 0;
+      size += metrics_.CalculateSize(_repeated_metrics_codec);
+      return size;
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(Stats other) {
+      if (other == null) {
+        return;
+      }
+      metrics_.Add(other.metrics_);
+    }
+
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public void MergeFrom(pb::CodedInputStream input) {
+      uint tag;
+      while ((tag = input.ReadTag()) != 0) {
+        switch(tag) {
+          default:
+            input.SkipLastField();
+            break;
+          case 10: {
+            metrics_.AddEntriesFrom(input, _repeated_metrics_codec);
+            break;
+          }
+        }
+      }
+    }
+
+  }
+
+  #endregion
+
+}
+
+#endregion Designer generated code
diff --git a/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs b/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
index b2fe73a..9581ade 100644
--- a/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
+++ b/src/csharp/Grpc.IntegrationTesting/EchoMessages.cs
@@ -26,7 +26,7 @@
             "DGdycGMudGVzdGluZyIyCglEZWJ1Z0luZm8SFQoNc3RhY2tfZW50cmllcxgB",
             "IAMoCRIOCgZkZXRhaWwYAiABKAkiUAoLRXJyb3JTdGF0dXMSDAoEY29kZRgB",
             "IAEoBRIVCg1lcnJvcl9tZXNzYWdlGAIgASgJEhwKFGJpbmFyeV9lcnJvcl9k",
-            "ZXRhaWxzGAMgASgJIskDCg1SZXF1ZXN0UGFyYW1zEhUKDWVjaG9fZGVhZGxp",
+            "ZXRhaWxzGAMgASgJIuIDCg1SZXF1ZXN0UGFyYW1zEhUKDWVjaG9fZGVhZGxp",
             "bmUYASABKAgSHgoWY2xpZW50X2NhbmNlbF9hZnRlcl91cxgCIAEoBRIeChZz",
             "ZXJ2ZXJfY2FuY2VsX2FmdGVyX3VzGAMgASgFEhUKDWVjaG9fbWV0YWRhdGEY",
             "BCABKAgSGgoSY2hlY2tfYXV0aF9jb250ZXh0GAUgASgIEh8KF3Jlc3BvbnNl",
@@ -36,18 +36,19 @@
             "X3R5cGUYCiABKAkSKwoKZGVidWdfaW5mbxgLIAEoCzIXLmdycGMudGVzdGlu",
             "Zy5EZWJ1Z0luZm8SEgoKc2VydmVyX2RpZRgMIAEoCBIcChRiaW5hcnlfZXJy",
             "b3JfZGV0YWlscxgNIAEoCRIxCg5leHBlY3RlZF9lcnJvchgOIAEoCzIZLmdy",
-            "cGMudGVzdGluZy5FcnJvclN0YXR1cyJKCgtFY2hvUmVxdWVzdBIPCgdtZXNz",
-            "YWdlGAEgASgJEioKBXBhcmFtGAIgASgLMhsuZ3JwYy50ZXN0aW5nLlJlcXVl",
-            "c3RQYXJhbXMiRgoOUmVzcG9uc2VQYXJhbXMSGAoQcmVxdWVzdF9kZWFkbGlu",
-            "ZRgBIAEoAxIMCgRob3N0GAIgASgJEgwKBHBlZXIYAyABKAkiTAoMRWNob1Jl",
-            "c3BvbnNlEg8KB21lc3NhZ2UYASABKAkSKwoFcGFyYW0YAiABKAsyHC5ncnBj",
-            "LnRlc3RpbmcuUmVzcG9uc2VQYXJhbXNiBnByb3RvMw=="));
+            "cGMudGVzdGluZy5FcnJvclN0YXR1cxIXCg9zZXJ2ZXJfc2xlZXBfdXMYDyAB",
+            "KAUiSgoLRWNob1JlcXVlc3QSDwoHbWVzc2FnZRgBIAEoCRIqCgVwYXJhbRgC",
+            "IAEoCzIbLmdycGMudGVzdGluZy5SZXF1ZXN0UGFyYW1zIkYKDlJlc3BvbnNl",
+            "UGFyYW1zEhgKEHJlcXVlc3RfZGVhZGxpbmUYASABKAMSDAoEaG9zdBgCIAEo",
+            "CRIMCgRwZWVyGAMgASgJIkwKDEVjaG9SZXNwb25zZRIPCgdtZXNzYWdlGAEg",
+            "ASgJEisKBXBhcmFtGAIgASgLMhwuZ3JwYy50ZXN0aW5nLlJlc3BvbnNlUGFy",
+            "YW1zYgZwcm90bzM="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
           new pbr::FileDescriptor[] { },
           new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.DebugInfo), global::Grpc.Testing.DebugInfo.Parser, new[]{ "StackEntries", "Detail" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ErrorStatus), global::Grpc.Testing.ErrorStatus.Parser, new[]{ "Code", "ErrorMessage", "BinaryErrorDetails" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestParams), global::Grpc.Testing.RequestParams.Parser, new[]{ "EchoDeadline", "ClientCancelAfterUs", "ServerCancelAfterUs", "EchoMetadata", "CheckAuthContext", "ResponseMessageLength", "EchoPeer", "ExpectedClientIdentity", "SkipCancelledCheck", "ExpectedTransportSecurityType", "DebugInfo", "ServerDie", "BinaryErrorDetails", "ExpectedError" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestParams), global::Grpc.Testing.RequestParams.Parser, new[]{ "EchoDeadline", "ClientCancelAfterUs", "ServerCancelAfterUs", "EchoMetadata", "CheckAuthContext", "ResponseMessageLength", "EchoPeer", "ExpectedClientIdentity", "SkipCancelledCheck", "ExpectedTransportSecurityType", "DebugInfo", "ServerDie", "BinaryErrorDetails", "ExpectedError", "ServerSleepUs" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoRequest), global::Grpc.Testing.EchoRequest.Parser, new[]{ "Message", "Param" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ResponseParams), global::Grpc.Testing.ResponseParams.Parser, new[]{ "RequestDeadline", "Host", "Peer" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.EchoResponse), global::Grpc.Testing.EchoResponse.Parser, new[]{ "Message", "Param" }, null, null, null)
@@ -411,6 +412,7 @@
       serverDie_ = other.serverDie_;
       binaryErrorDetails_ = other.binaryErrorDetails_;
       ExpectedError = other.expectedError_ != null ? other.ExpectedError.Clone() : null;
+      serverSleepUs_ = other.serverSleepUs_;
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -578,6 +580,20 @@
       }
     }
 
+    /// <summary>Field number for the "server_sleep_us" field.</summary>
+    public const int ServerSleepUsFieldNumber = 15;
+    private int serverSleepUs_;
+    /// <summary>
+    /// Amount to sleep when invoking server
+    /// </summary>
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public int ServerSleepUs {
+      get { return serverSleepUs_; }
+      set {
+        serverSleepUs_ = value;
+      }
+    }
+
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
     public override bool Equals(object other) {
       return Equals(other as RequestParams);
@@ -605,6 +621,7 @@
       if (ServerDie != other.ServerDie) return false;
       if (BinaryErrorDetails != other.BinaryErrorDetails) return false;
       if (!object.Equals(ExpectedError, other.ExpectedError)) return false;
+      if (ServerSleepUs != other.ServerSleepUs) return false;
       return true;
     }
 
@@ -625,6 +642,7 @@
       if (ServerDie != false) hash ^= ServerDie.GetHashCode();
       if (BinaryErrorDetails.Length != 0) hash ^= BinaryErrorDetails.GetHashCode();
       if (expectedError_ != null) hash ^= ExpectedError.GetHashCode();
+      if (ServerSleepUs != 0) hash ^= ServerSleepUs.GetHashCode();
       return hash;
     }
 
@@ -691,6 +709,10 @@
         output.WriteRawTag(114);
         output.WriteMessage(ExpectedError);
       }
+      if (ServerSleepUs != 0) {
+        output.WriteRawTag(120);
+        output.WriteInt32(ServerSleepUs);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -738,6 +760,9 @@
       if (expectedError_ != null) {
         size += 1 + pb::CodedOutputStream.ComputeMessageSize(ExpectedError);
       }
+      if (ServerSleepUs != 0) {
+        size += 1 + pb::CodedOutputStream.ComputeInt32Size(ServerSleepUs);
+      }
       return size;
     }
 
@@ -794,6 +819,9 @@
         }
         ExpectedError.MergeFrom(other.ExpectedError);
       }
+      if (other.ServerSleepUs != 0) {
+        ServerSleepUs = other.ServerSleepUs;
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -866,6 +894,10 @@
             input.ReadMessage(expectedError_);
             break;
           }
+          case 120: {
+            ServerSleepUs = input.ReadInt32();
+            break;
+          }
         }
       }
     }
diff --git a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
index 9a664f3..f71d6d1 100644
--- a/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/MetricsGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: src/proto/grpc/testing/metrics.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: src/proto/grpc/testing/metrics.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015-2016 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/Payloads.cs b/src/csharp/Grpc.IntegrationTesting/Payloads.cs
index f918b95..fca8cda 100644
--- a/src/csharp/Grpc.IntegrationTesting/Payloads.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Payloads.cs
@@ -596,13 +596,22 @@
       }
       switch (other.PayloadCase) {
         case PayloadOneofCase.BytebufParams:
-          BytebufParams = other.BytebufParams;
+          if (BytebufParams == null) {
+            BytebufParams = new global::Grpc.Testing.ByteBufferParams();
+          }
+          BytebufParams.MergeFrom(other.BytebufParams);
           break;
         case PayloadOneofCase.SimpleParams:
-          SimpleParams = other.SimpleParams;
+          if (SimpleParams == null) {
+            SimpleParams = new global::Grpc.Testing.SimpleProtoParams();
+          }
+          SimpleParams.MergeFrom(other.SimpleParams);
           break;
         case PayloadOneofCase.ComplexParams:
-          ComplexParams = other.ComplexParams;
+          if (ComplexParams == null) {
+            ComplexParams = new global::Grpc.Testing.ComplexProtoParams();
+          }
+          ComplexParams.MergeFrom(other.ComplexParams);
           break;
       }
 
diff --git a/src/csharp/Grpc.IntegrationTesting/ServerRunners.cs b/src/csharp/Grpc.IntegrationTesting/ServerRunners.cs
index e1b4774..ea29bd7 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServerRunners.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServerRunners.cs
@@ -117,7 +117,7 @@
     public class ServerRunnerImpl : IServerRunner
     {
         readonly Server server;
-        readonly WallClockStopwatch wallClockStopwatch = new WallClockStopwatch();
+        readonly TimeStats timeStats = new TimeStats();
 
         public ServerRunnerImpl(Server server)
         {
@@ -138,17 +138,16 @@
         /// <returns>The stats.</returns>
         public ServerStats GetStats(bool reset)
         {
-            var secondsElapsed = wallClockStopwatch.GetElapsedSnapshot(reset).TotalSeconds;
+            var timeSnapshot = timeStats.GetSnapshot(reset);
 
             GrpcEnvironment.Logger.Info("[ServerRunner.GetStats] GC collection counts: gen0 {0}, gen1 {1}, gen2 {2}, (seconds since last reset {3})",
-                GC.CollectionCount(0), GC.CollectionCount(1), GC.CollectionCount(2), secondsElapsed);
+                GC.CollectionCount(0), GC.CollectionCount(1), GC.CollectionCount(2), timeSnapshot.WallClockTime.TotalSeconds);
 
-            // TODO: populate user time and system time
             return new ServerStats
             {
-                TimeElapsed = secondsElapsed,
-                TimeUser = 0,
-                TimeSystem = 0
+                TimeElapsed = timeSnapshot.WallClockTime.TotalSeconds,
+                TimeUser = timeSnapshot.UserProcessorTime.TotalSeconds,
+                TimeSystem = timeSnapshot.PrivilegedProcessorTime.TotalSeconds
             };
         }
 
diff --git a/src/csharp/Grpc.IntegrationTesting/Services.cs b/src/csharp/Grpc.IntegrationTesting/Services.cs
index 7a0845d..4b76170 100644
--- a/src/csharp/Grpc.IntegrationTesting/Services.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Services.cs
@@ -24,28 +24,27 @@
           string.Concat(
             "CiVzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3NlcnZpY2VzLnByb3RvEgxncnBj",
             "LnRlc3RpbmcaJXNyYy9wcm90by9ncnBjL3Rlc3RpbmcvbWVzc2FnZXMucHJv",
-            "dG8aJHNyYy9wcm90by9ncnBjL3Rlc3RpbmcvY29udHJvbC5wcm90bxoic3Jj",
-            "L3Byb3RvL2dycGMvdGVzdGluZy9zdGF0cy5wcm90bzKmAwoQQmVuY2htYXJr",
-            "U2VydmljZRJGCglVbmFyeUNhbGwSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVx",
-            "dWVzdBocLmdycGMudGVzdGluZy5TaW1wbGVSZXNwb25zZRJOCg1TdHJlYW1p",
-            "bmdDYWxsEhsuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRl",
-            "c3RpbmcuU2ltcGxlUmVzcG9uc2UoATABElIKE1N0cmVhbWluZ0Zyb21DbGll",
-            "bnQSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGlu",
-            "Zy5TaW1wbGVSZXNwb25zZSgBElIKE1N0cmVhbWluZ0Zyb21TZXJ2ZXISGy5n",
-            "cnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGluZy5TaW1w",
-            "bGVSZXNwb25zZTABElIKEVN0cmVhbWluZ0JvdGhXYXlzEhsuZ3JwYy50ZXN0",
-            "aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRlc3RpbmcuU2ltcGxlUmVzcG9u",
-            "c2UoATABMpcCCg1Xb3JrZXJTZXJ2aWNlEkUKCVJ1blNlcnZlchIYLmdycGMu",
-            "dGVzdGluZy5TZXJ2ZXJBcmdzGhouZ3JwYy50ZXN0aW5nLlNlcnZlclN0YXR1",
-            "cygBMAESRQoJUnVuQ2xpZW50EhguZ3JwYy50ZXN0aW5nLkNsaWVudEFyZ3Ma",
-            "Gi5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHVzKAEwARJCCglDb3JlQ291bnQS",
-            "GS5ncnBjLnRlc3RpbmcuQ29yZVJlcXVlc3QaGi5ncnBjLnRlc3RpbmcuQ29y",
-            "ZVJlc3BvbnNlEjQKClF1aXRXb3JrZXISEi5ncnBjLnRlc3RpbmcuVm9pZBoS",
-            "LmdycGMudGVzdGluZy5Wb2lkMl4KGFJlcG9ydFFwc1NjZW5hcmlvU2Vydmlj",
-            "ZRJCCg5SZXBvcnRTY2VuYXJpbxIcLmdycGMudGVzdGluZy5TY2VuYXJpb1Jl",
-            "c3VsdBoSLmdycGMudGVzdGluZy5Wb2lkYgZwcm90bzM="));
+            "dG8aJHNyYy9wcm90by9ncnBjL3Rlc3RpbmcvY29udHJvbC5wcm90bzKmAwoQ",
+            "QmVuY2htYXJrU2VydmljZRJGCglVbmFyeUNhbGwSGy5ncnBjLnRlc3Rpbmcu",
+            "U2ltcGxlUmVxdWVzdBocLmdycGMudGVzdGluZy5TaW1wbGVSZXNwb25zZRJO",
+            "Cg1TdHJlYW1pbmdDYWxsEhsuZ3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3Qa",
+            "HC5ncnBjLnRlc3RpbmcuU2ltcGxlUmVzcG9uc2UoATABElIKE1N0cmVhbWlu",
+            "Z0Zyb21DbGllbnQSGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdy",
+            "cGMudGVzdGluZy5TaW1wbGVSZXNwb25zZSgBElIKE1N0cmVhbWluZ0Zyb21T",
+            "ZXJ2ZXISGy5ncnBjLnRlc3RpbmcuU2ltcGxlUmVxdWVzdBocLmdycGMudGVz",
+            "dGluZy5TaW1wbGVSZXNwb25zZTABElIKEVN0cmVhbWluZ0JvdGhXYXlzEhsu",
+            "Z3JwYy50ZXN0aW5nLlNpbXBsZVJlcXVlc3QaHC5ncnBjLnRlc3RpbmcuU2lt",
+            "cGxlUmVzcG9uc2UoATABMpcCCg1Xb3JrZXJTZXJ2aWNlEkUKCVJ1blNlcnZl",
+            "chIYLmdycGMudGVzdGluZy5TZXJ2ZXJBcmdzGhouZ3JwYy50ZXN0aW5nLlNl",
+            "cnZlclN0YXR1cygBMAESRQoJUnVuQ2xpZW50EhguZ3JwYy50ZXN0aW5nLkNs",
+            "aWVudEFyZ3MaGi5ncnBjLnRlc3RpbmcuQ2xpZW50U3RhdHVzKAEwARJCCglD",
+            "b3JlQ291bnQSGS5ncnBjLnRlc3RpbmcuQ29yZVJlcXVlc3QaGi5ncnBjLnRl",
+            "c3RpbmcuQ29yZVJlc3BvbnNlEjQKClF1aXRXb3JrZXISEi5ncnBjLnRlc3Rp",
+            "bmcuVm9pZBoSLmdycGMudGVzdGluZy5Wb2lkMl4KGFJlcG9ydFFwc1NjZW5h",
+            "cmlvU2VydmljZRJCCg5SZXBvcnRTY2VuYXJpbxIcLmdycGMudGVzdGluZy5T",
+            "Y2VuYXJpb1Jlc3VsdBoSLmdycGMudGVzdGluZy5Wb2lkYgZwcm90bzM="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
-          new pbr::FileDescriptor[] { global::Grpc.Testing.MessagesReflection.Descriptor, global::Grpc.Testing.ControlReflection.Descriptor, global::Grpc.Testing.StatsReflection.Descriptor, },
+          new pbr::FileDescriptor[] { global::Grpc.Testing.MessagesReflection.Descriptor, global::Grpc.Testing.ControlReflection.Descriptor, },
           new pbr::GeneratedClrTypeInfo(null, null));
     }
     #endregion
diff --git a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
index bfae4ee..d2e4f2e 100644
--- a/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/ServicesGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: src/proto/grpc/testing/services.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: src/proto/grpc/testing/services.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/Stats.cs b/src/csharp/Grpc.IntegrationTesting/Stats.cs
index 23b56df..e082ae7 100644
--- a/src/csharp/Grpc.IntegrationTesting/Stats.cs
+++ b/src/csharp/Grpc.IntegrationTesting/Stats.cs
@@ -23,28 +23,30 @@
       byte[] descriptorData = global::System.Convert.FromBase64String(
           string.Concat(
             "CiJzcmMvcHJvdG8vZ3JwYy90ZXN0aW5nL3N0YXRzLnByb3RvEgxncnBjLnRl",
-            "c3RpbmcikQEKC1NlcnZlclN0YXRzEhQKDHRpbWVfZWxhcHNlZBgBIAEoARIR",
-            "Cgl0aW1lX3VzZXIYAiABKAESEwoLdGltZV9zeXN0ZW0YAyABKAESFgoOdG90",
-            "YWxfY3B1X3RpbWUYBCABKAQSFQoNaWRsZV9jcHVfdGltZRgFIAEoBBIVCg1j",
-            "cV9wb2xsX2NvdW50GAYgASgEIjsKD0hpc3RvZ3JhbVBhcmFtcxISCgpyZXNv",
-            "bHV0aW9uGAEgASgBEhQKDG1heF9wb3NzaWJsZRgCIAEoASJ3Cg1IaXN0b2dy",
-            "YW1EYXRhEg4KBmJ1Y2tldBgBIAMoDRIQCghtaW5fc2VlbhgCIAEoARIQCght",
-            "YXhfc2VlbhgDIAEoARILCgNzdW0YBCABKAESFgoOc3VtX29mX3NxdWFyZXMY",
-            "BSABKAESDQoFY291bnQYBiABKAEiOAoSUmVxdWVzdFJlc3VsdENvdW50EhMK",
-            "C3N0YXR1c19jb2RlGAEgASgFEg0KBWNvdW50GAIgASgDIs0BCgtDbGllbnRT",
-            "dGF0cxIuCglsYXRlbmNpZXMYASABKAsyGy5ncnBjLnRlc3RpbmcuSGlzdG9n",
-            "cmFtRGF0YRIUCgx0aW1lX2VsYXBzZWQYAiABKAESEQoJdGltZV91c2VyGAMg",
-            "ASgBEhMKC3RpbWVfc3lzdGVtGAQgASgBEjkKD3JlcXVlc3RfcmVzdWx0cxgF",
-            "IAMoCzIgLmdycGMudGVzdGluZy5SZXF1ZXN0UmVzdWx0Q291bnQSFQoNY3Ff",
-            "cG9sbF9jb3VudBgGIAEoBGIGcHJvdG8z"));
+            "c3RpbmcaH3NyYy9wcm90by9ncnBjL2NvcmUvc3RhdHMucHJvdG8itwEKC1Nl",
+            "cnZlclN0YXRzEhQKDHRpbWVfZWxhcHNlZBgBIAEoARIRCgl0aW1lX3VzZXIY",
+            "AiABKAESEwoLdGltZV9zeXN0ZW0YAyABKAESFgoOdG90YWxfY3B1X3RpbWUY",
+            "BCABKAQSFQoNaWRsZV9jcHVfdGltZRgFIAEoBBIVCg1jcV9wb2xsX2NvdW50",
+            "GAYgASgEEiQKCmNvcmVfc3RhdHMYByABKAsyEC5ncnBjLmNvcmUuU3RhdHMi",
+            "OwoPSGlzdG9ncmFtUGFyYW1zEhIKCnJlc29sdXRpb24YASABKAESFAoMbWF4",
+            "X3Bvc3NpYmxlGAIgASgBIncKDUhpc3RvZ3JhbURhdGESDgoGYnVja2V0GAEg",
+            "AygNEhAKCG1pbl9zZWVuGAIgASgBEhAKCG1heF9zZWVuGAMgASgBEgsKA3N1",
+            "bRgEIAEoARIWCg5zdW1fb2Zfc3F1YXJlcxgFIAEoARINCgVjb3VudBgGIAEo",
+            "ASI4ChJSZXF1ZXN0UmVzdWx0Q291bnQSEwoLc3RhdHVzX2NvZGUYASABKAUS",
+            "DQoFY291bnQYAiABKAMi8wEKC0NsaWVudFN0YXRzEi4KCWxhdGVuY2llcxgB",
+            "IAEoCzIbLmdycGMudGVzdGluZy5IaXN0b2dyYW1EYXRhEhQKDHRpbWVfZWxh",
+            "cHNlZBgCIAEoARIRCgl0aW1lX3VzZXIYAyABKAESEwoLdGltZV9zeXN0ZW0Y",
+            "BCABKAESOQoPcmVxdWVzdF9yZXN1bHRzGAUgAygLMiAuZ3JwYy50ZXN0aW5n",
+            "LlJlcXVlc3RSZXN1bHRDb3VudBIVCg1jcV9wb2xsX2NvdW50GAYgASgEEiQK",
+            "CmNvcmVfc3RhdHMYByABKAsyEC5ncnBjLmNvcmUuU3RhdHNiBnByb3RvMw=="));
       descriptor = pbr::FileDescriptor.FromGeneratedCode(descriptorData,
-          new pbr::FileDescriptor[] { },
+          new pbr::FileDescriptor[] { global::Grpc.Core.StatsReflection.Descriptor, },
           new pbr::GeneratedClrTypeInfo(null, new pbr::GeneratedClrTypeInfo[] {
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStats), global::Grpc.Testing.ServerStats.Parser, new[]{ "TimeElapsed", "TimeUser", "TimeSystem", "TotalCpuTime", "IdleCpuTime", "CqPollCount" }, null, null, null),
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ServerStats), global::Grpc.Testing.ServerStats.Parser, new[]{ "TimeElapsed", "TimeUser", "TimeSystem", "TotalCpuTime", "IdleCpuTime", "CqPollCount", "CoreStats" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.HistogramParams), global::Grpc.Testing.HistogramParams.Parser, new[]{ "Resolution", "MaxPossible" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.HistogramData), global::Grpc.Testing.HistogramData.Parser, new[]{ "Bucket", "MinSeen", "MaxSeen", "Sum", "SumOfSquares", "Count" }, null, null, null),
             new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.RequestResultCount), global::Grpc.Testing.RequestResultCount.Parser, new[]{ "StatusCode", "Count" }, null, null, null),
-            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStats), global::Grpc.Testing.ClientStats.Parser, new[]{ "Latencies", "TimeElapsed", "TimeUser", "TimeSystem", "RequestResults", "CqPollCount" }, null, null, null)
+            new pbr::GeneratedClrTypeInfo(typeof(global::Grpc.Testing.ClientStats), global::Grpc.Testing.ClientStats.Parser, new[]{ "Latencies", "TimeElapsed", "TimeUser", "TimeSystem", "RequestResults", "CqPollCount", "CoreStats" }, null, null, null)
           }));
     }
     #endregion
@@ -81,6 +83,7 @@
       totalCpuTime_ = other.totalCpuTime_;
       idleCpuTime_ = other.idleCpuTime_;
       cqPollCount_ = other.cqPollCount_;
+      CoreStats = other.coreStats_ != null ? other.CoreStats.Clone() : null;
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -173,6 +176,20 @@
       }
     }
 
+    /// <summary>Field number for the "core_stats" field.</summary>
+    public const int CoreStatsFieldNumber = 7;
+    private global::Grpc.Core.Stats coreStats_;
+    /// <summary>
+    /// Core library stats
+    /// </summary>
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public global::Grpc.Core.Stats CoreStats {
+      get { return coreStats_; }
+      set {
+        coreStats_ = value;
+      }
+    }
+
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
     public override bool Equals(object other) {
       return Equals(other as ServerStats);
@@ -192,6 +209,7 @@
       if (TotalCpuTime != other.TotalCpuTime) return false;
       if (IdleCpuTime != other.IdleCpuTime) return false;
       if (CqPollCount != other.CqPollCount) return false;
+      if (!object.Equals(CoreStats, other.CoreStats)) return false;
       return true;
     }
 
@@ -204,6 +222,7 @@
       if (TotalCpuTime != 0UL) hash ^= TotalCpuTime.GetHashCode();
       if (IdleCpuTime != 0UL) hash ^= IdleCpuTime.GetHashCode();
       if (CqPollCount != 0UL) hash ^= CqPollCount.GetHashCode();
+      if (coreStats_ != null) hash ^= CoreStats.GetHashCode();
       return hash;
     }
 
@@ -238,6 +257,10 @@
         output.WriteRawTag(48);
         output.WriteUInt64(CqPollCount);
       }
+      if (coreStats_ != null) {
+        output.WriteRawTag(58);
+        output.WriteMessage(CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -261,6 +284,9 @@
       if (CqPollCount != 0UL) {
         size += 1 + pb::CodedOutputStream.ComputeUInt64Size(CqPollCount);
       }
+      if (coreStats_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(CoreStats);
+      }
       return size;
     }
 
@@ -287,6 +313,12 @@
       if (other.CqPollCount != 0UL) {
         CqPollCount = other.CqPollCount;
       }
+      if (other.coreStats_ != null) {
+        if (coreStats_ == null) {
+          coreStats_ = new global::Grpc.Core.Stats();
+        }
+        CoreStats.MergeFrom(other.CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -321,6 +353,13 @@
             CqPollCount = input.ReadUInt64();
             break;
           }
+          case 58: {
+            if (coreStats_ == null) {
+              coreStats_ = new global::Grpc.Core.Stats();
+            }
+            input.ReadMessage(coreStats_);
+            break;
+          }
         }
       }
     }
@@ -909,6 +948,7 @@
       timeSystem_ = other.timeSystem_;
       requestResults_ = other.requestResults_.Clone();
       cqPollCount_ = other.cqPollCount_;
+      CoreStats = other.coreStats_ != null ? other.CoreStats.Clone() : null;
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -993,6 +1033,20 @@
       }
     }
 
+    /// <summary>Field number for the "core_stats" field.</summary>
+    public const int CoreStatsFieldNumber = 7;
+    private global::Grpc.Core.Stats coreStats_;
+    /// <summary>
+    /// Core library stats
+    /// </summary>
+    [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
+    public global::Grpc.Core.Stats CoreStats {
+      get { return coreStats_; }
+      set {
+        coreStats_ = value;
+      }
+    }
+
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
     public override bool Equals(object other) {
       return Equals(other as ClientStats);
@@ -1012,6 +1066,7 @@
       if (TimeSystem != other.TimeSystem) return false;
       if(!requestResults_.Equals(other.requestResults_)) return false;
       if (CqPollCount != other.CqPollCount) return false;
+      if (!object.Equals(CoreStats, other.CoreStats)) return false;
       return true;
     }
 
@@ -1024,6 +1079,7 @@
       if (TimeSystem != 0D) hash ^= TimeSystem.GetHashCode();
       hash ^= requestResults_.GetHashCode();
       if (CqPollCount != 0UL) hash ^= CqPollCount.GetHashCode();
+      if (coreStats_ != null) hash ^= CoreStats.GetHashCode();
       return hash;
     }
 
@@ -1055,6 +1111,10 @@
         output.WriteRawTag(48);
         output.WriteUInt64(CqPollCount);
       }
+      if (coreStats_ != null) {
+        output.WriteRawTag(58);
+        output.WriteMessage(CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1076,6 +1136,9 @@
       if (CqPollCount != 0UL) {
         size += 1 + pb::CodedOutputStream.ComputeUInt64Size(CqPollCount);
       }
+      if (coreStats_ != null) {
+        size += 1 + pb::CodedOutputStream.ComputeMessageSize(CoreStats);
+      }
       return size;
     }
 
@@ -1103,6 +1166,12 @@
       if (other.CqPollCount != 0UL) {
         CqPollCount = other.CqPollCount;
       }
+      if (other.coreStats_ != null) {
+        if (coreStats_ == null) {
+          coreStats_ = new global::Grpc.Core.Stats();
+        }
+        CoreStats.MergeFrom(other.CoreStats);
+      }
     }
 
     [global::System.Diagnostics.DebuggerNonUserCodeAttribute]
@@ -1140,6 +1209,13 @@
             CqPollCount = input.ReadUInt64();
             break;
           }
+          case 58: {
+            if (coreStats_ == null) {
+              coreStats_ = new global::Grpc.Core.Stats();
+            }
+            input.ReadMessage(coreStats_);
+            break;
+          }
         }
       }
     }
diff --git a/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs b/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
index 11956e4..0c62380 100644
--- a/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
+++ b/src/csharp/Grpc.IntegrationTesting/StressTestClient.cs
@@ -243,7 +243,7 @@
             const string GaugeName = "csharp_overall_qps";
 
             readonly Histogram histogram;
-            readonly WallClockStopwatch wallClockStopwatch = new WallClockStopwatch();
+            readonly TimeStats timeStats = new TimeStats();
 
             public MetricsServiceImpl(Histogram histogram)
             {
@@ -280,9 +280,9 @@
             long GetQpsAndReset()
             {
                 var snapshot = histogram.GetSnapshot(true);
-                var elapsedSnapshot = wallClockStopwatch.GetElapsedSnapshot(true);
+                var timeSnapshot = timeStats.GetSnapshot(true);
 
-                return (long) (snapshot.Count / elapsedSnapshot.TotalSeconds);
+                return (long) (snapshot.Count / timeSnapshot.WallClockTime.TotalSeconds);
             }
         }
     }
diff --git a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
index b419dd1..c0d147c 100644
--- a/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
+++ b/src/csharp/Grpc.IntegrationTesting/TestGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: src/proto/grpc/testing/test.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: src/proto/grpc/testing/test.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2015-2016 gRPC authors.
 //
diff --git a/src/csharp/Grpc.IntegrationTesting/TimeStats.cs b/src/csharp/Grpc.IntegrationTesting/TimeStats.cs
new file mode 100644
index 0000000..6aba04c
--- /dev/null
+++ b/src/csharp/Grpc.IntegrationTesting/TimeStats.cs
@@ -0,0 +1,90 @@
+#region Copyright notice and license
+
+// Copyright 2015 gRPC authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#endregion
+
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using System.Text.RegularExpressions;
+using System.Threading;
+using System.Threading.Tasks;
+using Google.Protobuf;
+using Grpc.Core;
+using Grpc.Core.Utils;
+using NUnit.Framework;
+using Grpc.Testing;
+
+namespace Grpc.IntegrationTesting
+{
+    /// <summary>
+    /// Snapshottable time statistics.
+    /// </summary>
+    public class TimeStats
+    {
+        readonly object myLock = new object();
+        DateTime lastWallClock;
+        TimeSpan lastUserTime;
+        TimeSpan lastPrivilegedTime;
+
+        public TimeStats()
+        {
+            lastWallClock = DateTime.UtcNow;
+            lastUserTime = Process.GetCurrentProcess().UserProcessorTime;
+            lastPrivilegedTime = Process.GetCurrentProcess().PrivilegedProcessorTime;
+        }
+
+        public Snapshot GetSnapshot(bool reset)
+        {
+            lock (myLock)
+            {
+                var wallClock = DateTime.UtcNow;
+                var userTime = Process.GetCurrentProcess().UserProcessorTime;
+                var privilegedTime = Process.GetCurrentProcess().PrivilegedProcessorTime;
+                var snapshot = new Snapshot(wallClock - lastWallClock, userTime - lastUserTime, privilegedTime - lastPrivilegedTime);
+
+                if (reset)
+                {
+                    lastWallClock = wallClock;
+                    lastUserTime = userTime;
+                    lastPrivilegedTime = privilegedTime;
+                }
+                return snapshot;
+            }
+        }
+
+        public class Snapshot
+        {
+            public TimeSpan WallClockTime { get; }
+            public TimeSpan UserProcessorTime { get; }
+            public TimeSpan PrivilegedProcessorTime { get; }
+
+            public Snapshot(TimeSpan wallClockTime, TimeSpan userProcessorTime, TimeSpan privilegedProcessorTime)
+            {
+                this.WallClockTime = wallClockTime;
+                this.UserProcessorTime = userProcessorTime;
+                this.PrivilegedProcessorTime = privilegedProcessorTime;
+            }
+
+            public override string ToString()
+            {
+                return string.Format("[TimeStats.Snapshot: wallClock {0}, userProcessor {1}, privilegedProcessor {2}]", WallClockTime, UserProcessorTime, PrivilegedProcessorTime);
+            }
+        }
+    }
+}
diff --git a/src/csharp/Grpc.IntegrationTesting/WallClockStopwatch.cs b/src/csharp/Grpc.IntegrationTesting/WallClockStopwatch.cs
deleted file mode 100644
index 38b58f2..0000000
--- a/src/csharp/Grpc.IntegrationTesting/WallClockStopwatch.cs
+++ /dev/null
@@ -1,63 +0,0 @@
-#region Copyright notice and license
-
-// Copyright 2015 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#endregion
-
-using System;
-using System.Collections.Generic;
-using System.Diagnostics;
-using System.IO;
-using System.Linq;
-using System.Text.RegularExpressions;
-using System.Threading;
-using System.Threading.Tasks;
-using Google.Protobuf;
-using Grpc.Core;
-using Grpc.Core.Utils;
-using NUnit.Framework;
-using Grpc.Testing;
-
-namespace Grpc.IntegrationTesting
-{
-    /// <summary>
-    /// Snapshottable wall clock stopwatch.
-    /// </summary>
-    public class WallClockStopwatch
-    {
-        long startTicks;
-
-        public WallClockStopwatch()
-        {
-            this.startTicks = DateTime.UtcNow.Ticks;
-        }
-
-        public TimeSpan GetElapsedSnapshot(bool reset)
-        {
-            var utcNow = DateTime.UtcNow;
-
-            long oldStartTicks;
-            if (reset)
-            {
-                oldStartTicks = Interlocked.Exchange(ref this.startTicks, utcNow.Ticks);
-            }
-            else
-            {
-                oldStartTicks = this.startTicks;
-            }
-            return utcNow - new DateTime(oldStartTicks, DateTimeKind.Utc);
-        }
-    }
-}
diff --git a/src/csharp/Grpc.Microbenchmarks/CompletionRegistryBenchmark.cs b/src/csharp/Grpc.Microbenchmarks/CompletionRegistryBenchmark.cs
index eefdb50..bb57a69 100644
--- a/src/csharp/Grpc.Microbenchmarks/CompletionRegistryBenchmark.cs
+++ b/src/csharp/Grpc.Microbenchmarks/CompletionRegistryBenchmark.cs
@@ -43,7 +43,7 @@
         public void Run(int threadCount, int iterations, bool useSharedRegistry)
         {
             Console.WriteLine(string.Format("CompletionRegistryBenchmark: threads={0}, iterations={1}, useSharedRegistry={2}", threadCount, iterations, useSharedRegistry));
-            CompletionRegistry sharedRegistry = useSharedRegistry ? new CompletionRegistry(environment, () => BatchContextSafeHandle.Create()) : null;
+            CompletionRegistry sharedRegistry = useSharedRegistry ? new CompletionRegistry(environment, () => BatchContextSafeHandle.Create(), () => RequestCallContextSafeHandle.Create()) : null;
             var threadedBenchmark = new ThreadedBenchmark(threadCount, () => ThreadBody(iterations, sharedRegistry));
             threadedBenchmark.Run();
             // TODO: parametrize by number of pending completions
@@ -51,7 +51,7 @@
 
         private void ThreadBody(int iterations, CompletionRegistry optionalSharedRegistry)
         {
-            var completionRegistry = optionalSharedRegistry ?? new CompletionRegistry(environment, () => BatchContextSafeHandle.Create());
+            var completionRegistry = optionalSharedRegistry ?? new CompletionRegistry(environment, () => throw new NotImplementedException(), () => throw new NotImplementedException());
             var ctx = BatchContextSafeHandle.Create();
   
             var stopwatch = Stopwatch.StartNew();
diff --git a/src/csharp/Grpc.Microbenchmarks/SendMessageBenchmark.cs b/src/csharp/Grpc.Microbenchmarks/SendMessageBenchmark.cs
index da4f35f..390c062 100644
--- a/src/csharp/Grpc.Microbenchmarks/SendMessageBenchmark.cs
+++ b/src/csharp/Grpc.Microbenchmarks/SendMessageBenchmark.cs
@@ -52,7 +52,7 @@
 
         private void ThreadBody(int iterations, int payloadSize)
         {
-            var completionRegistry = new CompletionRegistry(environment, () => environment.BatchContextPool.Lease());
+            var completionRegistry = new CompletionRegistry(environment, () => environment.BatchContextPool.Lease(), () => throw new NotImplementedException());
             var cq = CompletionQueueSafeHandle.CreateAsync(completionRegistry);
             var call = CreateFakeCall(cq);
 
diff --git a/src/csharp/Grpc.Reflection/Reflection.cs b/src/csharp/Grpc.Reflection/Reflection.cs
index 86e9aac..60090e5 100644
--- a/src/csharp/Grpc.Reflection/Reflection.cs
+++ b/src/csharp/Grpc.Reflection/Reflection.cs
@@ -345,7 +345,10 @@
           FileContainingSymbol = other.FileContainingSymbol;
           break;
         case MessageRequestOneofCase.FileContainingExtension:
-          FileContainingExtension = other.FileContainingExtension;
+          if (FileContainingExtension == null) {
+            FileContainingExtension = new global::Grpc.Reflection.V1Alpha.ExtensionRequest();
+          }
+          FileContainingExtension.MergeFrom(other.FileContainingExtension);
           break;
         case MessageRequestOneofCase.AllExtensionNumbersOfType:
           AllExtensionNumbersOfType = other.AllExtensionNumbersOfType;
@@ -816,16 +819,28 @@
       }
       switch (other.MessageResponseCase) {
         case MessageResponseOneofCase.FileDescriptorResponse:
-          FileDescriptorResponse = other.FileDescriptorResponse;
+          if (FileDescriptorResponse == null) {
+            FileDescriptorResponse = new global::Grpc.Reflection.V1Alpha.FileDescriptorResponse();
+          }
+          FileDescriptorResponse.MergeFrom(other.FileDescriptorResponse);
           break;
         case MessageResponseOneofCase.AllExtensionNumbersResponse:
-          AllExtensionNumbersResponse = other.AllExtensionNumbersResponse;
+          if (AllExtensionNumbersResponse == null) {
+            AllExtensionNumbersResponse = new global::Grpc.Reflection.V1Alpha.ExtensionNumberResponse();
+          }
+          AllExtensionNumbersResponse.MergeFrom(other.AllExtensionNumbersResponse);
           break;
         case MessageResponseOneofCase.ListServicesResponse:
-          ListServicesResponse = other.ListServicesResponse;
+          if (ListServicesResponse == null) {
+            ListServicesResponse = new global::Grpc.Reflection.V1Alpha.ListServiceResponse();
+          }
+          ListServicesResponse.MergeFrom(other.ListServicesResponse);
           break;
         case MessageResponseOneofCase.ErrorResponse:
-          ErrorResponse = other.ErrorResponse;
+          if (ErrorResponse == null) {
+            ErrorResponse = new global::Grpc.Reflection.V1Alpha.ErrorResponse();
+          }
+          ErrorResponse.MergeFrom(other.ErrorResponse);
           break;
       }
 
diff --git a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
index 5843957..0195186 100644
--- a/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
+++ b/src/csharp/Grpc.Reflection/ReflectionGrpc.cs
@@ -1,5 +1,7 @@
-// Generated by the protocol buffer compiler.  DO NOT EDIT!
-// source: grpc/reflection/v1alpha/reflection.proto
+// <auto-generated>
+//     Generated by the protocol buffer compiler.  DO NOT EDIT!
+//     source: grpc/reflection/v1alpha/reflection.proto
+// </auto-generated>
 // Original file comments:
 // Copyright 2016 gRPC authors.
 //
diff --git a/src/csharp/build_packages_dotnetcli.bat b/src/csharp/build_packages_dotnetcli.bat
index 86657c9..8f89e28 100755
--- a/src/csharp/build_packages_dotnetcli.bat
+++ b/src/csharp/build_packages_dotnetcli.bat
@@ -13,7 +13,7 @@
 @rem limitations under the License.
 
 @rem Current package versions
-set VERSION=1.8.3
+set VERSION=1.9.0-dev
 
 @rem Adjust the location of nuget.exe
 set NUGET=C:\nuget\nuget.exe
diff --git a/src/csharp/build_packages_dotnetcli.sh b/src/csharp/build_packages_dotnetcli.sh
index 33c34a0..6a6cafe 100755
--- a/src/csharp/build_packages_dotnetcli.sh
+++ b/src/csharp/build_packages_dotnetcli.sh
@@ -39,7 +39,7 @@
 dotnet pack --configuration Release Grpc.HealthCheck --output ../../../artifacts
 dotnet pack --configuration Release Grpc.Reflection --output ../../../artifacts
 
-nuget pack Grpc.nuspec -Version "1.8.3" -OutputDirectory ../../artifacts
-nuget pack Grpc.Tools.nuspec -Version "1.8.3" -OutputDirectory ../../artifacts
+nuget pack Grpc.nuspec -Version "1.9.0-dev" -OutputDirectory ../../artifacts
+nuget pack Grpc.Tools.nuspec -Version "1.9.0-dev" -OutputDirectory ../../artifacts
 
 (cd ../../artifacts && zip csharp_nugets_dotnetcli.zip *.nupkg)
diff --git a/src/csharp/ext/grpc_csharp_ext.c b/src/csharp/ext/grpc_csharp_ext.c
index 24d779e..6875d40 100644
--- a/src/csharp/ext/grpc_csharp_ext.c
+++ b/src/csharp/ext/grpc_csharp_ext.c
@@ -226,17 +226,22 @@
 }
 
 GPR_EXPORT void GPR_CALLTYPE
-grpcsharp_request_call_context_destroy(grpcsharp_request_call_context* ctx) {
-  if (!ctx) {
-    return;
-  }
+grpcsharp_request_call_context_reset(grpcsharp_request_call_context* ctx) {
   /* NOTE: ctx->server_rpc_new.call is not destroyed because callback handler is
      supposed
      to take its ownership. */
 
   grpc_call_details_destroy(&(ctx->call_details));
   grpcsharp_metadata_array_destroy_metadata_only(&(ctx->request_metadata));
+  memset(ctx, 0, sizeof(grpcsharp_request_call_context));
+}
 
+GPR_EXPORT void GPR_CALLTYPE
+grpcsharp_request_call_context_destroy(grpcsharp_request_call_context* ctx) {
+  if (!ctx) {
+    return;
+  }
+  grpcsharp_request_call_context_reset(ctx);
   gpr_free(ctx);
 }
 
diff --git a/src/csharp/generate_proto_csharp.sh b/src/csharp/generate_proto_csharp.sh
index 1a1adbb..299dc3f 100755
--- a/src/csharp/generate_proto_csharp.sh
+++ b/src/csharp/generate_proto_csharp.sh
@@ -33,6 +33,11 @@
 $PROTOC --plugin=$PLUGIN --csharp_out=$REFLECTION_DIR --grpc_out=$REFLECTION_DIR \
     -I src/proto src/proto/grpc/reflection/v1alpha/reflection.proto
 
+# Put grp/core/stats.proto in a subdirectory to avoid collision with grpc/testing/stats.proto
+mkdir -p $TESTING_DIR/CoreStats
+$PROTOC --plugin=$PLUGIN --csharp_out=$TESTING_DIR/CoreStats --grpc_out=$TESTING_DIR/CoreStats \
+    -I src/proto src/proto/grpc/core/stats.proto
+
 # TODO(jtattermusch): following .proto files are a bit broken and import paths
 # don't match the package names. Setting -I to the correct value src/proto
 # breaks the code generation.
diff --git a/src/csharp/tests.json b/src/csharp/tests.json
index 82573ed..469328a 100644
--- a/src/csharp/tests.json
+++ b/src/csharp/tests.json
@@ -14,6 +14,7 @@
     "Grpc.Core.Tests.CallCancellationTest",
     "Grpc.Core.Tests.CallCredentialsTest",
     "Grpc.Core.Tests.CallOptionsTest",
+    "Grpc.Core.Tests.ChannelConnectivityTest",
     "Grpc.Core.Tests.ChannelCredentialsTest",
     "Grpc.Core.Tests.ChannelOptionsTest",
     "Grpc.Core.Tests.ChannelTest",
diff --git "a/src/objective-c/\041ProtoCompiler-gRPCPlugin.podspec" "b/src/objective-c/\041ProtoCompiler-gRPCPlugin.podspec"
index fc4859e..2250176 100644
--- "a/src/objective-c/\041ProtoCompiler-gRPCPlugin.podspec"
+++ "b/src/objective-c/\041ProtoCompiler-gRPCPlugin.podspec"
@@ -42,7 +42,7 @@
   # exclamation mark ensures that other "regular" pods will be able to find it as it'll be installed
   # before them.
   s.name     = '!ProtoCompiler-gRPCPlugin'
-  v = '1.8.3'
+  v = '1.9.0-dev'
   s.version  = v
   s.summary  = 'The gRPC ProtoC plugin generates Objective-C files from .proto services.'
   s.description = <<-DESC
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
index 9f2361b..18d4597 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.h
@@ -19,6 +19,12 @@
 
 #include <AvailabilityMacros.h>
 
+typedef NS_ENUM(NSInteger, GRPCCompressAlgorithm) {
+  GRPCCompressNone,
+  GRPCCompressDeflate,
+  GRPCCompressGzip,
+};
+
 /**
  * Methods to configure GRPC channel options.
  */
@@ -36,4 +42,8 @@
 + (void)closeOpenConnections DEPRECATED_MSG_ATTRIBUTE("The API for this feature is experimental, "
                                                       "and might be removed or modified at any "
                                                       "time.");
+
++ (void)setDefaultCompressMethod:(GRPCCompressAlgorithm)algorithm
+                         forhost:(nonnull NSString *)host;
+
 @end
diff --git a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
index 398d98f..805e54b 100644
--- a/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
+++ b/src/objective-c/GRPCClient/GRPCCall+ChannelArg.m
@@ -20,6 +20,8 @@
 
 #import "private/GRPCHost.h"
 
+#import <grpc/impl/codegen/compression_types.h>
+
 @implementation GRPCCall (ChannelArg)
 
 + (void)setUserAgentPrefix:(nonnull NSString *)userAgentPrefix forHost:(nonnull NSString *)host {
@@ -36,4 +38,23 @@
   [GRPCHost flushChannelCache];
 }
 
++ (void)setDefaultCompressMethod:(GRPCCompressAlgorithm)algorithm
+                         forhost:(nonnull NSString *)host {
+  GRPCHost *hostConfig = [GRPCHost hostWithAddress:host];
+  switch (algorithm) {
+    case GRPCCompressNone:
+      hostConfig.compressAlgorithm = GRPC_COMPRESS_NONE;
+      break;
+    case GRPCCompressDeflate:
+      hostConfig.compressAlgorithm = GRPC_COMPRESS_DEFLATE;
+      break;
+    case GRPCCompressGzip:
+      hostConfig.compressAlgorithm = GRPC_COMPRESS_GZIP;
+      break;
+    default:
+      NSLog(@"Invalid compression algorithm");
+      abort();
+  }
+}
+
 @end
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.h b/src/objective-c/GRPCClient/private/GRPCHost.h
index 5817121..0215db8 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.h
+++ b/src/objective-c/GRPCClient/private/GRPCHost.h
@@ -18,6 +18,8 @@
 
 #import <Foundation/Foundation.h>
 
+#import <grpc/impl/codegen/compression_types.h>
+
 NS_ASSUME_NONNULL_BEGIN
 
 @class GRPCCompletionQueue;
@@ -32,6 +34,7 @@
 @property(nonatomic, readonly) NSString *address;
 @property(nonatomic, copy, nullable) NSString *userAgentPrefix;
 @property(nonatomic, nullable) struct grpc_channel_credentials *channelCreds;
+@property(nonatomic) grpc_compression_algorithm compressAlgorithm;
 
 /** The following properties should only be modified for testing: */
 
diff --git a/src/objective-c/GRPCClient/private/GRPCHost.m b/src/objective-c/GRPCClient/private/GRPCHost.m
index a0f4118..665943f 100644
--- a/src/objective-c/GRPCClient/private/GRPCHost.m
+++ b/src/objective-c/GRPCClient/private/GRPCHost.m
@@ -87,6 +87,7 @@
       _address = address;
       _secure = YES;
       kHostCache[address] = self;
+      _compressAlgorithm = GRPC_COMPRESS_NONE;
     }
     // Keep a single monitor to flush the cache if the connectivity status changes
     // Thread safety guarded by @synchronized(kHostCache)
@@ -226,6 +227,12 @@
   }
   // Use 10000ms initial backoff time for correct behavior on bad/slow networks  
   args[@GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS] = @10000;
+
+  if (_compressAlgorithm != GRPC_COMPRESS_NONE) {
+    args[@GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM] =
+        [NSNumber numberWithInt:_compressAlgorithm];
+  }
+
   return args;
 }
 
diff --git a/src/objective-c/GRPCClient/private/version.h b/src/objective-c/GRPCClient/private/version.h
index 3513a7d..69dd626 100644
--- a/src/objective-c/GRPCClient/private/version.h
+++ b/src/objective-c/GRPCClient/private/version.h
@@ -23,4 +23,4 @@
 // `tools/buildgen/generate_projects.sh`.
 
 
-#define GRPC_OBJC_VERSION_STRING @"1.8.3"
+#define GRPC_OBJC_VERSION_STRING @"1.9.0-dev"
diff --git a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.mm b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.mm
index d5e668a..d130971 100644
--- a/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.mm
+++ b/src/objective-c/tests/CoreCronetEnd2EndTests/CoreCronetEnd2EndTests.mm
@@ -110,13 +110,12 @@
 
 static void cronet_init_client_simple_ssl_secure_fullstack(
     grpc_end2end_test_fixture *f, grpc_channel_args *client_args) {
-  grpc_exec_ctx ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   stream_engine *cronetEngine = [Cronet getGlobalEngine];
 
   grpc_channel_args *new_client_args = grpc_channel_args_copy(client_args);
   cronet_init_client_secure_fullstack(f, new_client_args, cronetEngine);
-  grpc_channel_args_destroy(&ctx, new_client_args);
-  grpc_exec_ctx_finish(&ctx);
+  grpc_channel_args_destroy(new_client_args);
 }
 
 static int fail_server_auth_check(grpc_channel_args *server_args) {
diff --git a/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m b/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
index 0d295fb..92bc20e 100644
--- a/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
+++ b/src/objective-c/tests/CronetUnitTests/CronetUnitTests.m
@@ -56,7 +56,7 @@
 + (void)setUp {
   [super setUp];
 
-  char *argv[] = {"CoreCronetEnd2EndTests"};
+  char *argv[] = {(char *)"CoreCronetEnd2EndTests"};
   grpc_test_init(1, argv);
 
   grpc_init();
@@ -100,7 +100,7 @@
   // Install server certificate
   BIO *pem = BIO_new_mem_buf((void *)test_server1_cert,
                              (int)strlen(test_server1_cert));
-  X509 *cert = PEM_read_bio_X509_AUX(pem, NULL, NULL, "");
+  X509 *cert = PEM_read_bio_X509_AUX(pem, NULL, NULL, (char *)"");
   SSL_CTX_use_certificate(ctx, cert);
   X509_free(cert);
   BIO_free(pem);
@@ -108,7 +108,7 @@
   // Install server private key
   pem =
       BIO_new_mem_buf((void *)test_server1_key, (int)strlen(test_server1_key));
-  EVP_PKEY *key = PEM_read_bio_PrivateKey(pem, NULL, NULL, "");
+  EVP_PKEY *key = PEM_read_bio_PrivateKey(pem, NULL, NULL, (char *)"");
   SSL_CTX_use_PrivateKey(ctx, key);
   EVP_PKEY_free(key);
   BIO_free(pem);
@@ -258,7 +258,7 @@
 
 - (void)packetCoalescing:(BOOL)useCoalescing {
   grpc_arg arg;
-  arg.key = GRPC_ARG_USE_CRONET_PACKET_COALESCING;
+  arg.key = (char *)GRPC_ARG_USE_CRONET_PACKET_COALESCING;
   arg.type = GRPC_ARG_INTEGER;
   arg.value.integer = useCoalescing ? 1 : 0;
   grpc_channel_args *args = grpc_channel_args_copy_and_add(NULL, &arg, 1);
diff --git a/src/objective-c/tests/GRPCClientTests.m b/src/objective-c/tests/GRPCClientTests.m
index 5672bda..3bab7f6 100644
--- a/src/objective-c/tests/GRPCClientTests.m
+++ b/src/objective-c/tests/GRPCClientTests.m
@@ -95,6 +95,10 @@
 
 @implementation GRPCClientTests
 
++ (void)setUp {
+  NSLog(@"GRPCClientTests Started");
+}
+
 - (void)setUp {
   // Add a custom user agent prefix that will be used in test
   [GRPCCall setUserAgentPrefix:@"Foo" forHost:kHostAddress];
diff --git a/src/objective-c/tests/InteropTests.m b/src/objective-c/tests/InteropTests.m
index e5fcab2..dfa874a 100644
--- a/src/objective-c/tests/InteropTests.m
+++ b/src/objective-c/tests/InteropTests.m
@@ -68,6 +68,10 @@
 }
 @end
 
+BOOL isRemoteInteropTest(NSString *host) {
+  return [host isEqualToString:@"grpc-test.sandbox.googleapis.com"];
+}
+
 #pragma mark Tests
 
 @implementation InteropTests {
@@ -86,6 +90,7 @@
 }
 
 + (void)setUp {
+  NSLog(@"InteropTest Started, class: %@", [[self class] description]);
 #ifdef GRPC_COMPILE_WITH_CRONET
   // Cronet setup
   [Cronet setHttp2Enabled:YES];
@@ -451,4 +456,34 @@
   [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
 }
 
+- (void)testCompressedUnaryRPC {
+  // This test needs to be disabled for remote test because interop server grpc-test
+  // does not support compression.
+  if (isRemoteInteropTest(self.class.host)) {
+    return;
+  }
+  XCTAssertNotNil(self.class.host);
+  __weak XCTestExpectation *expectation = [self expectationWithDescription:@"LargeUnary"];
+
+  RMTSimpleRequest *request = [RMTSimpleRequest message];
+  request.responseType = RMTPayloadType_Compressable;
+  request.responseSize = 314159;
+  request.payload.body = [NSMutableData dataWithLength:271828];
+  request.expectCompressed.value = YES;
+  [GRPCCall setDefaultCompressMethod:GRPCCompressGzip forhost:self.class.host];
+
+  [_service unaryCallWithRequest:request handler:^(RMTSimpleResponse *response, NSError *error) {
+    XCTAssertNil(error, @"Finished with unexpected error: %@", error);
+
+    RMTSimpleResponse *expectedResponse = [RMTSimpleResponse message];
+    expectedResponse.payload.type = RMTPayloadType_Compressable;
+    expectedResponse.payload.body = [NSMutableData dataWithLength:314159];
+    XCTAssertEqualObjects(response, expectedResponse);
+
+    [expectation fulfill];
+  }];
+
+  [self waitForExpectationsWithTimeout:TEST_TIMEOUT handler:nil];
+}
+
 @end
diff --git a/src/objective-c/tests/RemoteTestClient/messages.proto b/src/objective-c/tests/RemoteTestClient/messages.proto
index 128efd9..342e91d 100644
--- a/src/objective-c/tests/RemoteTestClient/messages.proto
+++ b/src/objective-c/tests/RemoteTestClient/messages.proto
@@ -20,34 +20,45 @@
 
 option objc_class_prefix = "RMT";
 
+// TODO(dgq): Go back to using well-known types once
+// https://github.com/grpc/grpc/issues/6980 has been fixed.
+// import "google/protobuf/wrappers.proto";
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// DEPRECATED, don't use. To be removed shortly.
 // The type of payload that should be returned.
 enum PayloadType {
   // Compressable text format.
   COMPRESSABLE = 0;
-
-  // Uncompressable binary format.
-  UNCOMPRESSABLE = 1;
-
-  // Randomly chosen from all other formats defined in this enum.
-  RANDOM = 2;
 }
 
 // A block of data, to simply increase gRPC message size.
 message Payload {
+  // DEPRECATED, don't use. To be removed shortly.
   // The type of data in body.
   PayloadType type = 1;
   // Primary contents of payload.
   bytes body = 2;
 }
 
+// A protobuf representation for grpc status. This is used by test
+// clients to specify a status that the server should attempt to return.
+message EchoStatus {
+  int32 code = 1;
+  string message = 2;
+}
+
 // Unary request.
 message SimpleRequest {
+  // DEPRECATED, don't use. To be removed shortly.
   // Desired payload type in the response from the server.
   // If response_type is RANDOM, server randomly chooses one from other formats.
   PayloadType response_type = 1;
 
   // Desired payload size in the response from the server.
-  // If response_type is COMPRESSABLE, this denotes the size before compression.
   int32 response_size = 2;
 
   // Optional input payload sent along with the request.
@@ -58,6 +69,18 @@
 
   // Whether SimpleResponse should include OAuth scope.
   bool fill_oauth_scope = 5;
+
+  // Whether to request the server to compress the response. This field is
+  // "nullable" in order to interoperate seamlessly with clients not able to
+  // implement the full compression tests by introspecting the call to verify
+  // the response's compression status.
+  BoolValue response_compressed = 6;
+
+  // Whether server should return a given status
+  EchoStatus response_status = 7;
+
+  // Whether the server should expect this request to be compressed.
+  BoolValue expect_compressed = 8;
 }
 
 // Unary response, as configured by the request.
@@ -76,6 +99,12 @@
   // Optional input payload sent along with the request.
   Payload payload = 1;
 
+  // Whether the server should expect this request to be compressed. This field
+  // is "nullable" in order to interoperate seamlessly with servers not able to
+  // implement the full compression tests by introspecting the call to verify
+  // the request's compression status.
+  BoolValue expect_compressed = 2;
+
   // Not expecting any payload from the response.
 }
 
@@ -88,16 +117,22 @@
 // Configuration for a particular response.
 message ResponseParameters {
   // Desired payload sizes in responses from the server.
-  // If response_type is COMPRESSABLE, this denotes the size before compression.
   int32 size = 1;
 
   // Desired interval between consecutive responses in the response stream in
   // microseconds.
   int32 interval_us = 2;
+
+  // Whether to request the server to compress the response. This field is
+  // "nullable" in order to interoperate seamlessly with clients not able to
+  // implement the full compression tests by introspecting the call to verify
+  // the response's compression status.
+  BoolValue compressed = 3;
 }
 
 // Server-streaming request.
 message StreamingOutputCallRequest {
+  // DEPRECATED, don't use. To be removed shortly.
   // Desired payload type in the response from the server.
   // If response_type is RANDOM, the payload from each response in the stream
   // might be of different types. This is to simulate a mixed type of payload
@@ -109,6 +144,9 @@
 
   // Optional input payload sent along with the request.
   Payload payload = 3;
+
+  // Whether server should return a given status
+  EchoStatus response_status = 7;
 }
 
 // Server-streaming response, as configured by the request and parameters.
@@ -116,3 +154,17 @@
   // Payload to increase response size.
   Payload payload = 1;
 }
+
+// For reconnect interop test only.
+// Client tells server what reconnection parameters it used.
+message ReconnectParams {
+  int32 max_reconnect_backoff_ms = 1;
+}
+
+// For reconnect interop test only.
+// Server tells client whether its reconnects are following the spec and the
+// reconnect backoffs it saw.
+message ReconnectInfo {
+  bool passed = 1;
+  repeated int32 backoff_ms = 2;
+}
diff --git a/src/objective-c/tests/RxLibraryUnitTests.m b/src/objective-c/tests/RxLibraryUnitTests.m
index 3a5adbb..aa178f8 100644
--- a/src/objective-c/tests/RxLibraryUnitTests.m
+++ b/src/objective-c/tests/RxLibraryUnitTests.m
@@ -58,6 +58,10 @@
 
 @implementation RxLibraryUnitTests
 
++ (void)setUp {
+  NSLog(@"GRPCClientTests Started");
+}
+
 #pragma mark Writeable
 
 - (void)testWriteableSingleHandlerIsCalledForValue {
diff --git a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
index 52631b4..9a6cb0e 100644
--- a/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
+++ b/src/objective-c/tests/Tests.xcodeproj/project.pbxproj
@@ -1518,6 +1518,7 @@
 				"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
 				DEBUG_INFORMATION_FORMAT = dwarf;
 				ENABLE_TESTABILITY = YES;
+				GCC_INPUT_FILETYPE = sourcecode.cpp.objcpp;
 				INFOPLIST_FILE = CronetUnitTests/Info.plist;
 				IPHONEOS_DEPLOYMENT_TARGET = 9.3;
 				LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
@@ -1567,6 +1568,7 @@
 				"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
 				DEBUG_INFORMATION_FORMAT = dwarf;
 				ENABLE_TESTABILITY = YES;
+				GCC_INPUT_FILETYPE = sourcecode.cpp.objcpp;
 				INFOPLIST_FILE = CronetUnitTests/Info.plist;
 				IPHONEOS_DEPLOYMENT_TARGET = 9.3;
 				LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
@@ -1582,6 +1584,7 @@
 			buildSettings = {
 				CLANG_ANALYZER_NONNULL = YES;
 				"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+				GCC_INPUT_FILETYPE = sourcecode.cpp.objcpp;
 				INFOPLIST_FILE = CronetUnitTests/Info.plist;
 				IPHONEOS_DEPLOYMENT_TARGET = 9.3;
 				LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
@@ -1597,6 +1600,7 @@
 			buildSettings = {
 				CLANG_ANALYZER_NONNULL = YES;
 				"CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer";
+				GCC_INPUT_FILETYPE = sourcecode.cpp.objcpp;
 				INFOPLIST_FILE = CronetUnitTests/Info.plist;
 				IPHONEOS_DEPLOYMENT_TARGET = 9.3;
 				LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks";
diff --git a/src/objective-c/tests/run_tests.sh b/src/objective-c/tests/run_tests.sh
index 608ae68..cec3478 100755
--- a/src/objective-c/tests/run_tests.sh
+++ b/src/objective-c/tests/run_tests.sh
@@ -34,23 +34,50 @@
 # Kill them when this script exits.
 trap 'kill -9 `jobs -p` ; echo "EXIT TIME:  $(date)"' EXIT
 
+set -o pipefail
+
 # xcodebuild is very verbose. We filter its output and tell Bash to fail if any
 # element of the pipe fails.
 # TODO(jcanizales): Use xctool instead? Issue #2540.
-set -o pipefail
-XCODEBUILD_FILTER='(^CompileC |^Ld |^.*clang |^ *cd |^ *export |^Libtool |^.*libtool |^CpHeader |^ *builtin-copy )'
+XCODEBUILD_FILTER='(^CompileC |^Ld |^ *[^ ]*clang |^ *cd |^ *export |^Libtool |^ *[^ ]*libtool |^CpHeader |^ *builtin-copy )'
+
 echo "TIME:  $(date)"
-xcodebuild \
-    -workspace Tests.xcworkspace \
-    -scheme AllTests \
-    -destination name="iPhone 6" \
-    HOST_PORT_LOCALSSL=localhost:5051 \
-    HOST_PORT_LOCAL=localhost:5050 \
-    HOST_PORT_REMOTE=grpc-test.sandbox.googleapis.com \
-    test \
-    | egrep -v "$XCODEBUILD_FILTER" \
-    | egrep -v '^$' \
-    | egrep -v "(GPBDictionary|GPBArray)" -
+
+# Retry the test for up to 3 times when return code is 65, due to Xcode issue:
+# http://www.openradar.me/29785686
+# The issue seems to be a connectivity issue to Xcode simulator so only retry
+# the first xcodebuild command
+retries=0
+while [ $retries -lt 3 ]; do
+  return_code=0
+  out=$(xcodebuild \
+        -workspace Tests.xcworkspace \
+        -scheme AllTests \
+        -destination name="iPhone 6" \
+        HOST_PORT_LOCALSSL=localhost:5051 \
+        HOST_PORT_LOCAL=localhost:5050 \
+        HOST_PORT_REMOTE=grpc-test.sandbox.googleapis.com \
+        test 2>&1 \
+        | egrep -v "$XCODEBUILD_FILTER" \
+        | egrep -v '^$' \
+        | egrep -v "(GPBDictionary|GPBArray)" - ) || return_code=$?
+  if [ $return_code == 65 ] && [[ $out == *"DTXProxyChannel error 1"* ]]; then
+    echo "$out"
+    echo "Failed with code 65 (DTXProxyChannel error 1); retry."
+    retries=$(($retries+1))
+  elif [ $return_code == 0 ]; then
+    echo "$out"
+    break
+  else
+    echo "$out"
+    echo "Failed with code $return_code."
+    exit 1
+  fi
+done
+if [ $retries == 3 ]; then
+  echo "Failed with code 65 for 3 times; abort."
+  exit 1
+fi
 
 echo "TIME:  $(date)"
 xcodebuild \
@@ -82,3 +109,5 @@
     | egrep -v "$XCODEBUILD_FILTER" \
     | egrep -v '^$' \
     | egrep -v "(GPBDictionary|GPBArray)" -
+
+exit 0
diff --git a/src/objective-c/tests/version.h b/src/objective-c/tests/version.h
index 63a0db9..6e3a073 100644
--- a/src/objective-c/tests/version.h
+++ b/src/objective-c/tests/version.h
@@ -23,5 +23,5 @@
 // `tools/buildgen/generate_projects.sh`.
 
 
-#define GRPC_OBJC_VERSION_STRING @"1.8.3"
-#define GRPC_C_VERSION_STRING @"5.0.0"
+#define GRPC_OBJC_VERSION_STRING @"1.9.0-dev"
+#define GRPC_C_VERSION_STRING @"5.0.0-dev"
diff --git a/src/php/composer.json b/src/php/composer.json
index 7c21402..4383398 100644
--- a/src/php/composer.json
+++ b/src/php/composer.json
@@ -2,7 +2,7 @@
   "name": "grpc/grpc-dev",
   "description": "gRPC library for PHP - for Developement use only",
   "license": "Apache-2.0",
-  "version": "1.8.3",
+  "version": "1.9.0",
   "require": {
     "php": ">=5.5.0",
     "google/protobuf": "^v3.3.0"
diff --git a/src/php/ext/grpc/channel_credentials.c b/src/php/ext/grpc/channel_credentials.c
index 86e4f46..d120d6e 100644
--- a/src/php/ext/grpc/channel_credentials.c
+++ b/src/php/ext/grpc/channel_credentials.c
@@ -35,6 +35,7 @@
 #include <zend_hash.h>
 
 #include <grpc/support/alloc.h>
+#include <grpc/support/string_util.h>
 #include <grpc/grpc.h>
 #include <grpc/grpc_security.h>
 
@@ -46,10 +47,11 @@
 
 static grpc_ssl_roots_override_result get_ssl_roots_override(
     char **pem_root_certs) {
-  *pem_root_certs = default_pem_root_certs;
-  if (default_pem_root_certs == NULL) {
+  if (!default_pem_root_certs) {
+    *pem_root_certs = NULL;
     return GRPC_SSL_ROOTS_OVERRIDE_FAIL;
   }
+  *pem_root_certs = gpr_strdup(default_pem_root_certs);
   return GRPC_SSL_ROOTS_OVERRIDE_OK;
 }
 
@@ -101,7 +103,7 @@
                          "setDefaultRootsPem expects 1 string", 1 TSRMLS_CC);
     return;
   }
-  default_pem_root_certs = gpr_malloc((pem_roots_length + 1) * sizeof(char));
+  default_pem_root_certs = gpr_realloc(default_pem_root_certs, (pem_roots_length + 1) * sizeof(char));
   memcpy(default_pem_root_certs, pem_roots, pem_roots_length + 1);
 }
 
diff --git a/src/php/ext/grpc/version.h b/src/php/ext/grpc/version.h
index 48ce15f..48131d7 100644
--- a/src/php/ext/grpc/version.h
+++ b/src/php/ext/grpc/version.h
@@ -20,6 +20,6 @@
 #ifndef VERSION_H
 #define VERSION_H
 
-#define PHP_GRPC_VERSION "1.8.3"
+#define PHP_GRPC_VERSION "1.9.0dev"
 
 #endif /* VERSION_H */
diff --git a/src/proto/grpc/lb/v1/load_balancer.proto b/src/proto/grpc/lb/v1/load_balancer.proto
index 0a33568..75c916d 100644
--- a/src/proto/grpc/lb/v1/load_balancer.proto
+++ b/src/proto/grpc/lb/v1/load_balancer.proto
@@ -133,11 +133,8 @@
   // unless instructed otherwise via the client_config.
   repeated Server servers = 1;
 
-  // Indicates the amount of time that the client should consider this server
-  // list as valid. It may be considered stale after waiting this interval of
-  // time after receiving the list. If the interval is not positive, the
-  // client can assume the list is valid until the next list is received.
-  Duration expiration_interval = 3;
+  // Was google.protobuf.Duration expiration_interval.
+  reserved 3;
 }
 
 // Contains server information. When the drop field is not true, use the other
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index 5f28e91..4c2ebae 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -104,8 +104,8 @@
         with open(bdist_path, 'w') as bdist_file:
             bdist_file.write(bdist_data)
     except IOError as error:
-        raise CommandError('{}\n\nCould not write grpcio bdist: {}'
-                           .format(traceback.format_exc(), error.message))
+        raise CommandError('{}\n\nCould not write grpcio bdist: {}'.format(
+            traceback.format_exc(), error.message))
     return bdist_path
 
 
@@ -141,7 +141,8 @@
         with open(glossary_filepath, 'a') as glossary_filepath:
             glossary_filepath.write(API_GLOSSARY)
         sphinx.main(
-            ['', os.path.join('doc', 'src'), os.path.join('doc', 'build')])
+            ['', os.path.join('doc', 'src'),
+             os.path.join('doc', 'build')])
 
 
 class BuildProjectMetadata(setuptools.Command):
@@ -189,10 +190,11 @@
         for source in extension.sources:
             base, file_ext = os.path.splitext(source)
             if file_ext == '.pyx':
-                generated_pyx_source = next((base + gen_ext
-                                             for gen_ext in ('.c', '.cpp',)
-                                             if os.path.isfile(base + gen_ext)),
-                                            None)
+                generated_pyx_source = next(
+                    (base + gen_ext for gen_ext in (
+                        '.c',
+                        '.cpp',
+                    ) if os.path.isfile(base + gen_ext)), None)
                 if generated_pyx_source:
                     generated_pyx_sources.append(generated_pyx_source)
                 else:
@@ -299,10 +301,10 @@
     """Command to gather project dependencies."""
 
     description = 'gather dependencies for grpcio'
-    user_options = [
-        ('test', 't', 'flag indicating to gather test dependencies'),
-        ('install', 'i', 'flag indicating to gather install dependencies')
-    ]
+    user_options = [('test', 't',
+                     'flag indicating to gather test dependencies'),
+                    ('install', 'i',
+                     'flag indicating to gather install dependencies')]
 
     def initialize_options(self):
         self.test = False
diff --git a/src/python/grpcio/grpc/__init__.py b/src/python/grpcio/grpc/__init__.py
index 8b913ac..db410d3 100644
--- a/src/python/grpcio/grpc/__init__.py
+++ b/src/python/grpcio/grpc/__init__.py
@@ -1376,8 +1376,8 @@
       A CallCredentials.
     """
     from grpc import _plugin_wrapping  # pylint: disable=cyclic-import
-    return _plugin_wrapping.metadata_plugin_call_credentials(metadata_plugin,
-                                                             name)
+    return _plugin_wrapping.metadata_plugin_call_credentials(
+        metadata_plugin, name)
 
 
 def access_token_call_credentials(access_token):
@@ -1631,25 +1631,57 @@
 ###################################  __all__  #################################
 
 __all__ = (
-    'FutureTimeoutError', 'FutureCancelledError', 'Future',
-    'ChannelConnectivity', 'StatusCode', 'RpcError', 'RpcContext', 'Call',
-    'ChannelCredentials', 'CallCredentials', 'AuthMetadataContext',
-    'AuthMetadataPluginCallback', 'AuthMetadataPlugin', 'ClientCallDetails',
-    'ServerCertificateConfiguration', 'ServerCredentials',
-    'UnaryUnaryMultiCallable', 'UnaryStreamMultiCallable',
-    'StreamUnaryMultiCallable', 'StreamStreamMultiCallable',
-    'UnaryUnaryClientInterceptor', 'UnaryStreamClientInterceptor',
-    'StreamUnaryClientInterceptor', 'StreamStreamClientInterceptor', 'Channel',
-    'ServicerContext', 'RpcMethodHandler', 'HandlerCallDetails',
-    'GenericRpcHandler', 'ServiceRpcHandler', 'Server', 'ServerInterceptor',
-    'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler',
-    'stream_unary_rpc_method_handler', 'stream_stream_rpc_method_handler',
-    'method_handlers_generic_handler', 'ssl_channel_credentials',
-    'metadata_call_credentials', 'access_token_call_credentials',
-    'composite_call_credentials', 'composite_channel_credentials',
-    'ssl_server_credentials', 'ssl_server_certificate_configuration',
-    'dynamic_ssl_server_credentials', 'channel_ready_future',
-    'insecure_channel', 'secure_channel', 'intercept_channel', 'server',)
+    'FutureTimeoutError',
+    'FutureCancelledError',
+    'Future',
+    'ChannelConnectivity',
+    'StatusCode',
+    'RpcError',
+    'RpcContext',
+    'Call',
+    'ChannelCredentials',
+    'CallCredentials',
+    'AuthMetadataContext',
+    'AuthMetadataPluginCallback',
+    'AuthMetadataPlugin',
+    'ClientCallDetails',
+    'ServerCertificateConfiguration',
+    'ServerCredentials',
+    'UnaryUnaryMultiCallable',
+    'UnaryStreamMultiCallable',
+    'StreamUnaryMultiCallable',
+    'StreamStreamMultiCallable',
+    'UnaryUnaryClientInterceptor',
+    'UnaryStreamClientInterceptor',
+    'StreamUnaryClientInterceptor',
+    'StreamStreamClientInterceptor',
+    'Channel',
+    'ServicerContext',
+    'RpcMethodHandler',
+    'HandlerCallDetails',
+    'GenericRpcHandler',
+    'ServiceRpcHandler',
+    'Server',
+    'ServerInterceptor',
+    'unary_unary_rpc_method_handler',
+    'unary_stream_rpc_method_handler',
+    'stream_unary_rpc_method_handler',
+    'stream_stream_rpc_method_handler',
+    'method_handlers_generic_handler',
+    'ssl_channel_credentials',
+    'metadata_call_credentials',
+    'access_token_call_credentials',
+    'composite_call_credentials',
+    'composite_channel_credentials',
+    'ssl_server_credentials',
+    'ssl_server_certificate_configuration',
+    'dynamic_ssl_server_credentials',
+    'channel_ready_future',
+    'insecure_channel',
+    'secure_channel',
+    'intercept_channel',
+    'server',
+)
 
 ############################### Extension Shims ################################
 
diff --git a/src/python/grpcio/grpc/_auth.py b/src/python/grpcio/grpc/_auth.py
index 9a339b5..c178245 100644
--- a/src/python/grpcio/grpc/_auth.py
+++ b/src/python/grpcio/grpc/_auth.py
@@ -54,7 +54,9 @@
         if self._is_jwt:
             future = self._pool.submit(
                 self._credentials.get_access_token,
-                additional_claims={'aud': context.service_url})
+                additional_claims={
+                    'aud': context.service_url
+                })
         else:
             future = self._pool.submit(self._credentials.get_access_token)
         future.add_done_callback(_create_get_token_callback(callback))
diff --git a/src/python/grpcio/grpc/_channel.py b/src/python/grpcio/grpc/_channel.py
index 3572737..24be042 100644
--- a/src/python/grpcio/grpc/_channel.py
+++ b/src/python/grpcio/grpc/_channel.py
@@ -29,24 +29,32 @@
 _EMPTY_FLAGS = 0
 _INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
 
-_UNARY_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
-                            cygrpc.OperationType.send_message,
-                            cygrpc.OperationType.send_close_from_client,
-                            cygrpc.OperationType.receive_initial_metadata,
-                            cygrpc.OperationType.receive_message,
-                            cygrpc.OperationType.receive_status_on_client,)
-_UNARY_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
-                             cygrpc.OperationType.send_message,
-                             cygrpc.OperationType.send_close_from_client,
-                             cygrpc.OperationType.receive_initial_metadata,
-                             cygrpc.OperationType.receive_status_on_client,)
-_STREAM_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
-                             cygrpc.OperationType.receive_initial_metadata,
-                             cygrpc.OperationType.receive_message,
-                             cygrpc.OperationType.receive_status_on_client,)
-_STREAM_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
-                              cygrpc.OperationType.receive_initial_metadata,
-                              cygrpc.OperationType.receive_status_on_client,)
+_UNARY_UNARY_INITIAL_DUE = (
+    cygrpc.OperationType.send_initial_metadata,
+    cygrpc.OperationType.send_message,
+    cygrpc.OperationType.send_close_from_client,
+    cygrpc.OperationType.receive_initial_metadata,
+    cygrpc.OperationType.receive_message,
+    cygrpc.OperationType.receive_status_on_client,
+)
+_UNARY_STREAM_INITIAL_DUE = (
+    cygrpc.OperationType.send_initial_metadata,
+    cygrpc.OperationType.send_message,
+    cygrpc.OperationType.send_close_from_client,
+    cygrpc.OperationType.receive_initial_metadata,
+    cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_UNARY_INITIAL_DUE = (
+    cygrpc.OperationType.send_initial_metadata,
+    cygrpc.OperationType.receive_initial_metadata,
+    cygrpc.OperationType.receive_message,
+    cygrpc.OperationType.receive_status_on_client,
+)
+_STREAM_STREAM_INITIAL_DUE = (
+    cygrpc.OperationType.send_initial_metadata,
+    cygrpc.OperationType.receive_initial_metadata,
+    cygrpc.OperationType.receive_status_on_client,
+)
 
 _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
     'Exception calling channel subscription callback!')
@@ -457,7 +465,8 @@
                 cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
                 cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
                 cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
-                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
+                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+            )
             return state, operations, deadline, deadline_timespec, None
 
     def _blocking(self, request, timeout, metadata, credentials):
@@ -538,11 +547,12 @@
                     (cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
                     event_handler)
                 operations = (
-                    cygrpc.SendInitialMetadataOperation(
-                        metadata, _EMPTY_FLAGS), cygrpc.SendMessageOperation(
-                            serialized_request, _EMPTY_FLAGS),
+                    cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
+                    cygrpc.SendMessageOperation(serialized_request,
+                                                _EMPTY_FLAGS),
                     cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
-                    cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
+                    cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+                )
                 call_error = call.start_client_batch(operations, event_handler)
                 if call_error != cygrpc.CallError.ok:
                     _call_error_set_RPCstate(state, call_error, metadata)
@@ -576,7 +586,8 @@
             operations = (
                 cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
                 cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
-                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
+                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+            )
             call_error = call.start_client_batch(operations, None)
             _check_call_error(call_error, metadata)
             _consume_request_iterator(request_iterator, state, call,
@@ -627,7 +638,8 @@
             operations = (
                 cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
                 cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
-                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
+                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+            )
             call_error = call.start_client_batch(operations, event_handler)
             if call_error != cygrpc.CallError.ok:
                 _call_error_set_RPCstate(state, call_error, metadata)
@@ -666,7 +678,8 @@
                 event_handler)
             operations = (
                 cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
-                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
+                cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+            )
             call_error = call.start_client_batch(operations, event_handler)
             if call_error != cygrpc.CallError.ok:
                 _call_error_set_RPCstate(state, call_error, metadata)
@@ -787,7 +800,11 @@
 
 def _spawn_delivery(state, callbacks):
     delivering_thread = threading.Thread(
-        target=_deliver, args=(state, state.connectivity, callbacks,))
+        target=_deliver, args=(
+            state,
+            state.connectivity,
+            callbacks,
+        ))
     delivering_thread.start()
     state.delivering = True
 
@@ -862,17 +879,16 @@
 
 def _unsubscribe(state, callback):
     with state.lock:
-        for index, (subscribed_callback, unused_connectivity
-                   ) in enumerate(state.callbacks_and_connectivities):
+        for index, (subscribed_callback, unused_connectivity) in enumerate(
+                state.callbacks_and_connectivities):
             if callback == subscribed_callback:
                 state.callbacks_and_connectivities.pop(index)
                 break
 
 
 def _options(options):
-    return list(options) + [
-        (cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
-    ]
+    return list(options) + [(cygrpc.ChannelArgKey.primary_user_agent_string,
+                             _USER_AGENT)]
 
 
 class Channel(grpc.Channel):
@@ -887,8 +903,8 @@
       credentials: A cygrpc.ChannelCredentials or None.
     """
         self._channel = cygrpc.Channel(
-            _common.encode(target),
-            _common.channel_args(_options(options)), credentials)
+            _common.encode(target), _common.channel_args(_options(options)),
+            credentials)
         self._call_state = _ChannelCallState(self._channel)
         self._connectivity_state = _ChannelConnectivityState(self._channel)
 
@@ -908,8 +924,7 @@
                     request_serializer=None,
                     response_deserializer=None):
         return _UnaryUnaryMultiCallable(
-            self._channel,
-            _channel_managed_call_management(self._call_state),
+            self._channel, _channel_managed_call_management(self._call_state),
             _common.encode(method), request_serializer, response_deserializer)
 
     def unary_stream(self,
@@ -917,8 +932,7 @@
                      request_serializer=None,
                      response_deserializer=None):
         return _UnaryStreamMultiCallable(
-            self._channel,
-            _channel_managed_call_management(self._call_state),
+            self._channel, _channel_managed_call_management(self._call_state),
             _common.encode(method), request_serializer, response_deserializer)
 
     def stream_unary(self,
@@ -926,8 +940,7 @@
                      request_serializer=None,
                      response_deserializer=None):
         return _StreamUnaryMultiCallable(
-            self._channel,
-            _channel_managed_call_management(self._call_state),
+            self._channel, _channel_managed_call_management(self._call_state),
             _common.encode(method), request_serializer, response_deserializer)
 
     def stream_stream(self,
@@ -935,8 +948,7 @@
                       request_serializer=None,
                       response_deserializer=None):
         return _StreamStreamMultiCallable(
-            self._channel,
-            _channel_managed_call_management(self._call_state),
+            self._channel, _channel_managed_call_management(self._call_state),
             _common.encode(method), request_serializer, response_deserializer)
 
     def __del__(self):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
index 6361669..0892215 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/call.pyx.pxi
@@ -26,16 +26,13 @@
   def _start_batch(self, operations, tag, retain_self):
     if not self.is_valid:
       raise ValueError("invalid call object cannot be used from Python")
-    cdef OperationTag operation_tag = OperationTag(tag, operations)
-    if retain_self:
-      operation_tag.operation_call = self
-    else:
-      operation_tag.operation_call = None
-    operation_tag.store_ops()
-    cpython.Py_INCREF(operation_tag)
+    cdef _BatchOperationTag batch_operation_tag = _BatchOperationTag(
+        tag, operations, self if retain_self else None)
+    batch_operation_tag.prepare()
+    cpython.Py_INCREF(batch_operation_tag)
     return grpc_call_start_batch(
-          self.c_call, operation_tag.c_ops, operation_tag.c_nops,
-          <cpython.PyObject *>operation_tag, NULL)
+          self.c_call, batch_operation_tag.c_ops, batch_operation_tag.c_nops,
+          <cpython.PyObject *>batch_operation_tag, NULL)
 
   def start_client_batch(self, operations, tag):
     # We don't reference this call in the operations tag because
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
index 644df67..443d534 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/channel.pyx.pxi
@@ -76,12 +76,12 @@
   def watch_connectivity_state(
       self, grpc_connectivity_state last_observed_state,
       Timespec deadline not None, CompletionQueue queue not None, tag):
-    cdef OperationTag operation_tag = OperationTag(tag, None)
-    cpython.Py_INCREF(operation_tag)
+    cdef _ConnectivityTag connectivity_tag = _ConnectivityTag(tag)
+    cpython.Py_INCREF(connectivity_tag)
     with nogil:
       grpc_channel_watch_connectivity_state(
           self.c_channel, last_observed_state, deadline.c_time,
-          queue.c_completion_queue, <cpython.PyObject *>operation_tag)
+          queue.c_completion_queue, <cpython.PyObject *>connectivity_tag)
 
   def target(self):
     cdef char *target = NULL
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
index 140fc35..e259789 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/completion_queue.pyx.pxi
@@ -37,42 +37,20 @@
     self.is_shutdown = False
 
   cdef _interpret_event(self, grpc_event event):
-    cdef OperationTag tag = None
-    cdef object user_tag = None
-    cdef Call operation_call = None
-    cdef CallDetails request_call_details = None
-    cdef object request_metadata = None
-    cdef object batch_operations = None
+    cdef _Tag tag = None
     if event.type == GRPC_QUEUE_TIMEOUT:
-      return Event(
-          event.type, False, None, None, None, None, False, None)
+      # NOTE(nathaniel): For now we coopt ConnectivityEvent here.
+      return ConnectivityEvent(GRPC_QUEUE_TIMEOUT, False, None)
     elif event.type == GRPC_QUEUE_SHUTDOWN:
       self.is_shutdown = True
-      return Event(
-          event.type, True, None, None, None, None, False, None)
+      # NOTE(nathaniel): For now we coopt ConnectivityEvent here.
+      return ConnectivityEvent(GRPC_QUEUE_TIMEOUT, True, None)
     else:
-      if event.tag != NULL:
-        tag = <OperationTag>event.tag
-        # We receive event tags only after they've been inc-ref'd elsewhere in
-        # the code.
-        cpython.Py_DECREF(tag)
-        if tag.shutting_down_server is not None:
-          tag.shutting_down_server.notify_shutdown_complete()
-        user_tag = tag.user_tag
-        operation_call = tag.operation_call
-        request_call_details = tag.request_call_details
-        if tag.is_new_request:
-          request_metadata = _metadata(&tag._c_request_metadata)
-          grpc_metadata_array_destroy(&tag._c_request_metadata)
-        batch_operations = tag.release_ops()
-        if tag.is_new_request:
-          # Stuff in the tag not explicitly handled by us needs to live through
-          # the life of the call
-          operation_call.references.extend(tag.references)
-      return Event(
-          event.type, event.success, user_tag, operation_call,
-          request_call_details, request_metadata, tag.is_new_request,
-          batch_operations)
+      tag = <_Tag>event.tag
+      # We receive event tags only after they've been inc-ref'd elsewhere in
+      # the code.
+      cpython.Py_DECREF(tag)
+      return tag.event(event)
 
   def poll(self, Timespec deadline=None):
     # We name this 'poll' to avoid problems with CPython's expectations for
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/event.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/event.pxd.pxi
new file mode 100644
index 0000000..686199e
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/event.pxd.pxi
@@ -0,0 +1,45 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class ConnectivityEvent:
+
+  cdef readonly grpc_completion_type completion_type
+  cdef readonly bint success
+  cdef readonly object tag
+
+
+cdef class RequestCallEvent:
+
+  cdef readonly grpc_completion_type completion_type
+  cdef readonly bint success
+  cdef readonly object tag
+  cdef readonly Call call
+  cdef readonly CallDetails call_details
+  cdef readonly tuple invocation_metadata
+
+
+cdef class BatchOperationEvent:
+
+  cdef readonly grpc_completion_type completion_type
+  cdef readonly bint success
+  cdef readonly object tag
+  cdef readonly object batch_operations
+
+
+cdef class ServerShutdownEvent:
+
+  cdef readonly grpc_completion_type completion_type
+  cdef readonly bint success
+  cdef readonly object tag
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/event.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/event.pyx.pxi
new file mode 100644
index 0000000..af26d27
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/event.pyx.pxi
@@ -0,0 +1,55 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class ConnectivityEvent:
+
+  def __cinit__(
+      self, grpc_completion_type completion_type, bint success, object tag):
+    self.completion_type = completion_type
+    self.success = success
+    self.tag = tag
+
+
+cdef class RequestCallEvent:
+
+  def __cinit__(
+      self, grpc_completion_type completion_type, bint success, object tag,
+      Call call, CallDetails call_details, tuple invocation_metadata):
+    self.completion_type = completion_type
+    self.success = success
+    self.tag = tag
+    self.call = call
+    self.call_details = call_details
+    self.invocation_metadata = invocation_metadata
+
+
+cdef class BatchOperationEvent:
+
+  def __cinit__(
+      self, grpc_completion_type completion_type, bint success, object tag,
+      object batch_operations):
+    self.completion_type = completion_type
+    self.success = success
+    self.tag = tag
+    self.batch_operations = batch_operations
+
+
+cdef class ServerShutdownEvent:
+
+  def __cinit__(
+      self, grpc_completion_type completion_type, bint success, object tag):
+    self.completion_type = completion_type
+    self.success = success
+    self.tag = tag
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
index 7f91965..6ee8336 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/grpc.pxi
@@ -38,13 +38,6 @@
     pass
 
 
-cdef extern from "grpc/impl/codegen/exec_ctx_fwd.h":
-
-  struct grpc_exec_ctx:
-    # We don't care about the internals
-    pass
-
-
 cdef extern from "grpc/grpc.h":
 
   ctypedef struct grpc_slice:
@@ -171,7 +164,7 @@
 
   ctypedef struct grpc_arg_pointer_vtable:
     void *(*copy)(void *)
-    void (*destroy)(grpc_exec_ctx *, void *)
+    void (*destroy)(void *)
     int (*cmp)(void *, void *)
 
   ctypedef struct grpc_arg_value_pointer:
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
index 537cf2b..7b2482d 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pxd.pxi
@@ -28,43 +28,6 @@
   cdef grpc_call_details c_details
 
 
-cdef class OperationTag:
-
-  cdef object user_tag
-  cdef list references
-  # This allows CompletionQueue to notify the Python Server object that the
-  # underlying GRPC core server has shutdown
-  cdef Server shutting_down_server
-  cdef Call operation_call
-  cdef CallDetails request_call_details
-  cdef grpc_metadata_array _c_request_metadata
-  cdef grpc_op *c_ops
-  cdef size_t c_nops
-  cdef readonly object _operations
-  cdef bint is_new_request
-
-  cdef void store_ops(self)
-  cdef object release_ops(self)
-
-
-cdef class Event:
-
-  cdef readonly grpc_completion_type type
-  cdef readonly bint success
-  cdef readonly object tag
-
-  # For Server.request_call
-  cdef readonly bint is_new_request
-  cdef readonly CallDetails request_call_details
-  cdef readonly object request_metadata
-
-  # For server calls
-  cdef readonly Call operation_call
-
-  # For Call.start_batch
-  cdef readonly object batch_operations
-
-
 cdef class SslPemKeyCertPair:
 
   cdef grpc_ssl_pem_key_cert_pair c_pair
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
index 99f8ffa..bc2cd03 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/records.pyx.pxi
@@ -218,50 +218,6 @@
     return timespec
 
 
-cdef class OperationTag:
-
-  def __cinit__(self, user_tag, operations):
-    self.user_tag = user_tag
-    self.references = []
-    self._operations = operations
-
-  cdef void store_ops(self):
-    self.c_nops = 0 if self._operations is None else len(self._operations)
-    if 0 < self.c_nops:
-      self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
-      for index, operation in enumerate(self._operations):
-        (<Operation>operation).c()
-        self.c_ops[index] = (<Operation>operation).c_op
-
-  cdef object release_ops(self):
-    if 0 < self.c_nops:
-      for index, operation in enumerate(self._operations):
-        (<Operation>operation).c_op = self.c_ops[index]
-        (<Operation>operation).un_c()
-      gpr_free(self.c_ops)
-      return self._operations
-    else:
-      return ()
-
-
-cdef class Event:
-
-  def __cinit__(self, grpc_completion_type type, bint success,
-                object tag, Call operation_call,
-                CallDetails request_call_details,
-                object request_metadata,
-                bint is_new_request,
-                object batch_operations):
-    self.type = type
-    self.success = success
-    self.tag = tag
-    self.operation_call = operation_call
-    self.request_call_details = request_call_details
-    self.request_metadata = request_metadata
-    self.batch_operations = batch_operations
-    self.is_new_request = is_new_request
-
-
 cdef class SslPemKeyCertPair:
 
   def __cinit__(self, bytes private_key, bytes certificate_chain):
@@ -276,7 +232,7 @@
   return ptr
 
 
-cdef void destroy_ptr(grpc_exec_ctx* ctx, void* ptr):
+cdef void destroy_ptr(void* ptr):
   pass
 
 
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
index f8d7892..c19becc 100644
--- a/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/server.pyx.pxi
@@ -78,19 +78,15 @@
       raise ValueError("server must be started and not shutting down")
     if server_queue not in self.registered_completion_queues:
       raise ValueError("server_queue must be a registered completion queue")
-    cdef OperationTag operation_tag = OperationTag(tag, None)
-    operation_tag.operation_call = Call()
-    operation_tag.request_call_details = CallDetails()
-    grpc_metadata_array_init(&operation_tag._c_request_metadata)
-    operation_tag.references.extend([self, call_queue, server_queue])
-    operation_tag.is_new_request = True
-    cpython.Py_INCREF(operation_tag)
+    cdef _RequestCallTag request_call_tag = _RequestCallTag(tag)
+    request_call_tag.prepare()
+    cpython.Py_INCREF(request_call_tag)
     return grpc_server_request_call(
-        self.c_server, &operation_tag.operation_call.c_call,
-        &operation_tag.request_call_details.c_details,
-        &operation_tag._c_request_metadata,
+        self.c_server, &request_call_tag.call.c_call,
+        &request_call_tag.call_details.c_details,
+        &request_call_tag.c_invocation_metadata,
         call_queue.c_completion_queue, server_queue.c_completion_queue,
-        <cpython.PyObject *>operation_tag)
+        <cpython.PyObject *>request_call_tag)
 
   def register_completion_queue(
       self, CompletionQueue queue not None):
@@ -131,16 +127,14 @@
 
   cdef _c_shutdown(self, CompletionQueue queue, tag):
     self.is_shutting_down = True
-    operation_tag = OperationTag(tag, None)
-    operation_tag.shutting_down_server = self
-    cpython.Py_INCREF(operation_tag)
+    cdef _ServerShutdownTag server_shutdown_tag = _ServerShutdownTag(tag, self)
+    cpython.Py_INCREF(server_shutdown_tag)
     with nogil:
       grpc_server_shutdown_and_notify(
           self.c_server, queue.c_completion_queue,
-          <cpython.PyObject *>operation_tag)
+          <cpython.PyObject *>server_shutdown_tag)
 
   def shutdown(self, CompletionQueue queue not None, tag):
-    cdef OperationTag operation_tag
     if queue.is_shutting_down:
       raise ValueError("queue must be live")
     elif not self.is_started:
@@ -153,7 +147,8 @@
       self._c_shutdown(queue, tag)
 
   cdef notify_shutdown_complete(self):
-    # called only by a completion queue on receiving our shutdown operation tag
+    # called only after our server shutdown tag has emerged from a completion
+    # queue.
     self.is_shutdown = True
 
   def cancel_all_calls(self):
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi
new file mode 100644
index 0000000..f9a3b5e
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pxd.pxi
@@ -0,0 +1,58 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _Tag:
+
+  cdef object event(self, grpc_event c_event)
+
+
+cdef class _ConnectivityTag(_Tag):
+
+  cdef readonly object _user_tag
+
+  cdef ConnectivityEvent event(self, grpc_event c_event)
+
+
+cdef class _RequestCallTag(_Tag):
+
+  cdef readonly object _user_tag
+  cdef Call call
+  cdef CallDetails call_details
+  cdef grpc_metadata_array c_invocation_metadata
+
+  cdef void prepare(self)
+  cdef RequestCallEvent event(self, grpc_event c_event)
+
+
+cdef class _BatchOperationTag(_Tag):
+
+  cdef object _user_tag
+  cdef readonly object _operations
+  cdef readonly object _retained_call
+  cdef grpc_op *c_ops
+  cdef size_t c_nops
+
+  cdef void prepare(self)
+  cdef BatchOperationEvent event(self, grpc_event c_event)
+
+
+cdef class _ServerShutdownTag(_Tag):
+
+  cdef readonly object _user_tag
+  # This allows CompletionQueue to notify the Python Server object that the
+  # underlying GRPC core server has shutdown
+  cdef readonly Server _shutting_down_server
+
+  cdef ServerShutdownEvent event(self, grpc_event c_event)
diff --git a/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
new file mode 100644
index 0000000..aaca458
--- /dev/null
+++ b/src/python/grpcio/grpc/_cython/_cygrpc/tag.pyx.pxi
@@ -0,0 +1,87 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+cdef class _Tag:
+
+  cdef object event(self, grpc_event c_event):
+    raise NotImplementedError()
+
+
+cdef class _ConnectivityTag(_Tag):
+
+  def __cinit__(self, user_tag):
+    self._user_tag = user_tag
+
+  cdef ConnectivityEvent event(self, grpc_event c_event):
+    return ConnectivityEvent(c_event.type, c_event.success, self._user_tag)
+
+
+cdef class _RequestCallTag(_Tag):
+
+  def __cinit__(self, user_tag):
+    self._user_tag = user_tag
+    self.call = None
+    self.call_details = None
+
+  cdef void prepare(self):
+    self.call = Call()
+    self.call_details = CallDetails()
+    grpc_metadata_array_init(&self.c_invocation_metadata)
+
+  cdef RequestCallEvent event(self, grpc_event c_event):
+    cdef tuple invocation_metadata = _metadata(&self.c_invocation_metadata)
+    grpc_metadata_array_destroy(&self.c_invocation_metadata)
+    return RequestCallEvent(
+        c_event.type, c_event.success, self._user_tag, self.call,
+        self.call_details, invocation_metadata)
+
+
+cdef class _BatchOperationTag:
+
+  def __cinit__(self, user_tag, operations, call):
+    self._user_tag = user_tag
+    self._operations = operations
+    self._retained_call = call
+
+  cdef void prepare(self):
+    self.c_nops = 0 if self._operations is None else len(self._operations)
+    if 0 < self.c_nops:
+      self.c_ops = <grpc_op *>gpr_malloc(sizeof(grpc_op) * self.c_nops)
+      for index, operation in enumerate(self._operations):
+        (<Operation>operation).c()
+        self.c_ops[index] = (<Operation>operation).c_op
+
+  cdef BatchOperationEvent event(self, grpc_event c_event):
+    if 0 < self.c_nops:
+      for index, operation in enumerate(self._operations):
+        (<Operation>operation).c_op = self.c_ops[index]
+        (<Operation>operation).un_c()
+      gpr_free(self.c_ops)
+      return BatchOperationEvent(
+          c_event.type, c_event.success, self._user_tag, self._operations)
+    else:
+      return BatchOperationEvent(
+          c_event.type, c_event.success, self._user_tag, ())
+
+
+cdef class _ServerShutdownTag(_Tag):
+
+  def __cinit__(self, user_tag, shutting_down_server):
+    self._user_tag = user_tag
+    self._shutting_down_server = shutting_down_server
+
+  cdef ServerShutdownEvent event(self, grpc_event c_event):
+    self._shutting_down_server.notify_shutdown_complete()
+    return ServerShutdownEvent(c_event.type, c_event.success, self._user_tag)
\ No newline at end of file
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pxd b/src/python/grpcio/grpc/_cython/cygrpc.pxd
index ad229de..b32fa51 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pxd
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pxd
@@ -18,8 +18,10 @@
 include "_cygrpc/channel.pxd.pxi"
 include "_cygrpc/credentials.pxd.pxi"
 include "_cygrpc/completion_queue.pxd.pxi"
+include "_cygrpc/event.pxd.pxi"
 include "_cygrpc/metadata.pxd.pxi"
 include "_cygrpc/operation.pxd.pxi"
 include "_cygrpc/records.pxd.pxi"
 include "_cygrpc/security.pxd.pxi"
 include "_cygrpc/server.pxd.pxi"
+include "_cygrpc/tag.pxd.pxi"
diff --git a/src/python/grpcio/grpc/_cython/cygrpc.pyx b/src/python/grpcio/grpc/_cython/cygrpc.pyx
index 0964fb6..5106394 100644
--- a/src/python/grpcio/grpc/_cython/cygrpc.pyx
+++ b/src/python/grpcio/grpc/_cython/cygrpc.pyx
@@ -25,11 +25,13 @@
 include "_cygrpc/channel.pyx.pxi"
 include "_cygrpc/credentials.pyx.pxi"
 include "_cygrpc/completion_queue.pyx.pxi"
+include "_cygrpc/event.pyx.pxi"
 include "_cygrpc/metadata.pyx.pxi"
 include "_cygrpc/operation.pyx.pxi"
 include "_cygrpc/records.pyx.pxi"
 include "_cygrpc/security.pyx.pxi"
 include "_cygrpc/server.pyx.pxi"
+include "_cygrpc/tag.pyx.pxi"
 
 #
 # initialize gRPC
diff --git a/src/python/grpcio/grpc/_grpcio_metadata.py b/src/python/grpcio/grpc/_grpcio_metadata.py
index 2f403d1..993c49d 100644
--- a/src/python/grpcio/grpc/_grpcio_metadata.py
+++ b/src/python/grpcio/grpc/_grpcio_metadata.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc/_grpcio_metadata.py.template`!!!
 
-__version__ = """1.8.3"""
+__version__ = """1.9.0.dev0"""
diff --git a/src/python/grpcio/grpc/_interceptor.py b/src/python/grpcio/grpc/_interceptor.py
index fffb269..56a2806 100644
--- a/src/python/grpcio/grpc/_interceptor.py
+++ b/src/python/grpcio/grpc/_interceptor.py
@@ -44,9 +44,10 @@
 
 
 class _ClientCallDetails(
-        collections.namedtuple('_ClientCallDetails',
-                               ('method', 'timeout', 'metadata',
-                                'credentials')), grpc.ClientCallDetails):
+        collections.namedtuple(
+            '_ClientCallDetails',
+            ('method', 'timeout', 'metadata', 'credentials')),
+        grpc.ClientCallDetails):
     pass
 
 
diff --git a/src/python/grpcio/grpc/_plugin_wrapping.py b/src/python/grpcio/grpc/_plugin_wrapping.py
index f728795..6785e58 100644
--- a/src/python/grpcio/grpc/_plugin_wrapping.py
+++ b/src/python/grpcio/grpc/_plugin_wrapping.py
@@ -23,7 +23,9 @@
 
 class _AuthMetadataContext(
         collections.namedtuple('AuthMetadataContext', (
-            'service_url', 'method_name',)), grpc.AuthMetadataContext):
+            'service_url',
+            'method_name',
+        )), grpc.AuthMetadataContext):
     pass
 
 
@@ -70,8 +72,9 @@
             _common.decode(service_url), _common.decode(method_name))
         callback_state = _CallbackState()
         try:
-            self._metadata_plugin(
-                context, _AuthMetadataPluginCallback(callback_state, callback))
+            self._metadata_plugin(context,
+                                  _AuthMetadataPluginCallback(
+                                      callback_state, callback))
         except Exception as exception:  # pylint: disable=broad-except
             logging.exception(
                 'AuthMetadataPluginCallback "%s" raised exception!',
diff --git a/src/python/grpcio/grpc/_server.py b/src/python/grpcio/grpc/_server.py
index eec31bd..1cdb2d4 100644
--- a/src/python/grpcio/grpc/_server.py
+++ b/src/python/grpcio/grpc/_server.py
@@ -78,7 +78,9 @@
 
 class _HandlerCallDetails(
         collections.namedtuple('_HandlerCallDetails', (
-            'method', 'invocation_metadata',)), grpc.HandlerCallDetails):
+            'method',
+            'invocation_metadata',
+        )), grpc.HandlerCallDetails):
     pass
 
 
@@ -130,10 +132,12 @@
         effective_code = _abortion_code(state, code)
         effective_details = details if state.details is None else state.details
         if state.initial_metadata_allowed:
-            operations = (cygrpc.SendInitialMetadataOperation(
-                None, _EMPTY_FLAGS), cygrpc.SendStatusFromServerOperation(
+            operations = (
+                cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
+                cygrpc.SendStatusFromServerOperation(
                     state.trailing_metadata, effective_code, effective_details,
-                    _EMPTY_FLAGS),)
+                    _EMPTY_FLAGS),
+            )
             token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
         else:
             operations = (cygrpc.SendStatusFromServerOperation(
@@ -217,11 +221,10 @@
 
     def time_remaining(self):
         return max(
-            float(self._rpc_event.request_call_details.deadline) - time.time(),
-            0)
+            float(self._rpc_event.call_details.deadline) - time.time(), 0)
 
     def cancel(self):
-        self._rpc_event.operation_call.cancel()
+        self._rpc_event.call.cancel()
 
     def add_callback(self, callback):
         with self._state.condition:
@@ -236,23 +239,23 @@
             self._state.disable_next_compression = True
 
     def invocation_metadata(self):
-        return self._rpc_event.request_metadata
+        return self._rpc_event.invocation_metadata
 
     def peer(self):
-        return _common.decode(self._rpc_event.operation_call.peer())
+        return _common.decode(self._rpc_event.call.peer())
 
     def peer_identities(self):
-        return cygrpc.peer_identities(self._rpc_event.operation_call)
+        return cygrpc.peer_identities(self._rpc_event.call)
 
     def peer_identity_key(self):
-        id_key = cygrpc.peer_identity_key(self._rpc_event.operation_call)
+        id_key = cygrpc.peer_identity_key(self._rpc_event.call)
         return id_key if id_key is None else _common.decode(id_key)
 
     def auth_context(self):
         return {
             _common.decode(key): value
             for key, value in six.iteritems(
-                cygrpc.auth_context(self._rpc_event.operation_call))
+                cygrpc.auth_context(self._rpc_event.call))
         }
 
     def send_initial_metadata(self, initial_metadata):
@@ -263,7 +266,7 @@
                 if self._state.initial_metadata_allowed:
                     operation = cygrpc.SendInitialMetadataOperation(
                         initial_metadata, _EMPTY_FLAGS)
-                    self._rpc_event.operation_call.start_server_batch(
+                    self._rpc_event.call.start_server_batch(
                         (operation,), _send_initial_metadata(self._state))
                     self._state.initial_metadata_allowed = False
                     self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
@@ -346,9 +349,9 @@
             if state.client is _CANCELLED or state.statused:
                 return None
             else:
-                rpc_event.operation_call.start_server_batch(
+                rpc_event.call.start_server_batch(
                     (cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
-                    _receive_message(state, rpc_event.operation_call,
+                    _receive_message(state, rpc_event.call,
                                      request_deserializer))
                 state.due.add(_RECEIVE_MESSAGE_TOKEN)
                 while True:
@@ -356,8 +359,8 @@
                     if state.request is None:
                         if state.client is _CLOSED:
                             details = '"{}" requires exactly one request message.'.format(
-                                rpc_event.request_call_details.method)
-                            _abort(state, rpc_event.operation_call,
+                                rpc_event.call_details.method)
+                            _abort(state, rpc_event.call,
                                    cygrpc.StatusCode.unimplemented,
                                    _common.encode(details))
                             return None
@@ -378,13 +381,13 @@
     except Exception as exception:  # pylint: disable=broad-except
         with state.condition:
             if exception is state.abortion:
-                _abort(state, rpc_event.operation_call,
-                       cygrpc.StatusCode.unknown, b'RPC Aborted')
+                _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+                       b'RPC Aborted')
             elif exception not in state.rpc_errors:
                 details = 'Exception calling application: {}'.format(exception)
                 logging.exception(details)
-                _abort(state, rpc_event.operation_call,
-                       cygrpc.StatusCode.unknown, _common.encode(details))
+                _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+                       _common.encode(details))
         return None, False
 
 
@@ -396,13 +399,13 @@
     except Exception as exception:  # pylint: disable=broad-except
         with state.condition:
             if exception is state.abortion:
-                _abort(state, rpc_event.operation_call,
-                       cygrpc.StatusCode.unknown, b'RPC Aborted')
+                _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+                       b'RPC Aborted')
             elif exception not in state.rpc_errors:
                 details = 'Exception iterating responses: {}'.format(exception)
                 logging.exception(details)
-                _abort(state, rpc_event.operation_call,
-                       cygrpc.StatusCode.unknown, _common.encode(details))
+                _abort(state, rpc_event.call, cygrpc.StatusCode.unknown,
+                       _common.encode(details))
         return None, False
 
 
@@ -410,7 +413,7 @@
     serialized_response = _common.serialize(response, response_serializer)
     if serialized_response is None:
         with state.condition:
-            _abort(state, rpc_event.operation_call, cygrpc.StatusCode.internal,
+            _abort(state, rpc_event.call, cygrpc.StatusCode.internal,
                    b'Failed to serialize response!')
         return None
     else:
@@ -423,18 +426,19 @@
             return False
         else:
             if state.initial_metadata_allowed:
-                operations = (cygrpc.SendInitialMetadataOperation(None,
-                                                                  _EMPTY_FLAGS),
-                              cygrpc.SendMessageOperation(serialized_response,
-                                                          _EMPTY_FLAGS),)
+                operations = (
+                    cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
+                    cygrpc.SendMessageOperation(serialized_response,
+                                                _EMPTY_FLAGS),
+                )
                 state.initial_metadata_allowed = False
                 token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
             else:
-                operations = (cygrpc.SendMessageOperation(serialized_response,
-                                                          _EMPTY_FLAGS),)
+                operations = (cygrpc.SendMessageOperation(
+                    serialized_response, _EMPTY_FLAGS),)
                 token = _SEND_MESSAGE_TOKEN
-            rpc_event.operation_call.start_server_batch(
-                operations, _send_message(state, token))
+            rpc_event.call.start_server_batch(operations,
+                                              _send_message(state, token))
             state.due.add(token)
             while True:
                 state.condition.wait()
@@ -458,7 +462,7 @@
                 operations.append(
                     cygrpc.SendMessageOperation(serialized_response,
                                                 _EMPTY_FLAGS))
-            rpc_event.operation_call.start_server_batch(
+            rpc_event.call.start_server_batch(
                 operations,
                 _send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
             state.statused = True
@@ -525,7 +529,7 @@
 
 
 def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
-    request_iterator = _RequestIterator(state, rpc_event.operation_call,
+    request_iterator = _RequestIterator(state, rpc_event.call,
                                         method_handler.request_deserializer)
     return thread_pool.submit(
         _unary_response_in_pool, rpc_event, state, method_handler.stream_unary,
@@ -534,7 +538,7 @@
 
 
 def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
-    request_iterator = _RequestIterator(state, rpc_event.operation_call,
+    request_iterator = _RequestIterator(state, rpc_event.call,
                                         method_handler.request_deserializer)
     return thread_pool.submit(
         _stream_response_in_pool, rpc_event, state,
@@ -552,8 +556,8 @@
         return None
 
     handler_call_details = _HandlerCallDetails(
-        _common.decode(rpc_event.request_call_details.method),
-        rpc_event.request_metadata)
+        _common.decode(rpc_event.call_details.method),
+        rpc_event.invocation_metadata)
 
     if interceptor_pipeline is not None:
         return interceptor_pipeline.execute(query_handlers,
@@ -563,20 +567,22 @@
 
 
 def _reject_rpc(rpc_event, status, details):
-    operations = (cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
-                  cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
-                  cygrpc.SendStatusFromServerOperation(None, status, details,
-                                                       _EMPTY_FLAGS),)
+    operations = (
+        cygrpc.SendInitialMetadataOperation(None, _EMPTY_FLAGS),
+        cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
+        cygrpc.SendStatusFromServerOperation(None, status, details,
+                                             _EMPTY_FLAGS),
+    )
     rpc_state = _RPCState()
-    rpc_event.operation_call.start_server_batch(
-        operations, lambda ignored_event: (rpc_state, (),))
+    rpc_event.call.start_server_batch(operations,
+                                      lambda ignored_event: (rpc_state, (),))
     return rpc_state
 
 
 def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
     state = _RPCState()
     with state.condition:
-        rpc_event.operation_call.start_server_batch(
+        rpc_event.call.start_server_batch(
             (cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),),
             _receive_close_on_server(state))
         state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
@@ -600,7 +606,7 @@
                  concurrency_exceeded):
     if not rpc_event.success:
         return None, None
-    if rpc_event.request_call_details.method is not None:
+    if rpc_event.call_details.method is not None:
         try:
             method_handler = _find_method_handler(rpc_event, generic_handlers,
                                                   interceptor_pipeline)
@@ -799,8 +805,8 @@
         return _add_insecure_port(self._state, _common.encode(address))
 
     def add_secure_port(self, address, server_credentials):
-        return _add_secure_port(self._state,
-                                _common.encode(address), server_credentials)
+        return _add_secure_port(self._state, _common.encode(address),
+                                server_credentials)
 
     def start(self):
         _start(self._state)
diff --git a/src/python/grpcio/grpc/_utilities.py b/src/python/grpcio/grpc/_utilities.py
index 47cedcc..25bd1ce 100644
--- a/src/python/grpcio/grpc/_utilities.py
+++ b/src/python/grpcio/grpc/_utilities.py
@@ -29,9 +29,15 @@
 
 class RpcMethodHandler(
         collections.namedtuple('_RpcMethodHandler', (
-            'request_streaming', 'response_streaming', 'request_deserializer',
-            'response_serializer', 'unary_unary', 'unary_stream',
-            'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
+            'request_streaming',
+            'response_streaming',
+            'request_deserializer',
+            'response_serializer',
+            'unary_unary',
+            'unary_stream',
+            'stream_unary',
+            'stream_stream',
+        )), grpc.RpcMethodHandler):
     pass
 
 
diff --git a/src/python/grpcio/grpc/beta/_client_adaptations.py b/src/python/grpcio/grpc/beta/_client_adaptations.py
index dcaa0ee..cf200a8 100644
--- a/src/python/grpcio/grpc/beta/_client_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_client_adaptations.py
@@ -51,8 +51,7 @@
     code = rpc_error_call.code()
     pair = _STATUS_CODE_TO_ABORTION_KIND_AND_ABORTION_ERROR_CLASS.get(code)
     error_kind = face.Abortion.Kind.LOCAL_FAILURE if pair is None else pair[0]
-    return face.Abortion(error_kind,
-                         rpc_error_call.initial_metadata(),
+    return face.Abortion(error_kind, rpc_error_call.initial_metadata(),
                          rpc_error_call.trailing_metadata(), code,
                          rpc_error_call.details())
 
@@ -441,9 +440,14 @@
                              metadata=None,
                              with_call=None,
                              protocol_options=None):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _blocking_unary_unary(self._channel, group, method, timeout,
                                      with_call, protocol_options, metadata,
                                      self._metadata_transformer, request,
@@ -456,9 +460,14 @@
                            timeout,
                            metadata=None,
                            protocol_options=None):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _future_unary_unary(self._channel, group, method, timeout,
                                    protocol_options, metadata,
                                    self._metadata_transformer, request,
@@ -471,9 +480,14 @@
                             timeout,
                             metadata=None,
                             protocol_options=None):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _unary_stream(self._channel, group, method, timeout,
                              protocol_options, metadata,
                              self._metadata_transformer, request,
@@ -487,9 +501,14 @@
                               metadata=None,
                               with_call=None,
                               protocol_options=None):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _blocking_stream_unary(
             self._channel, group, method, timeout, with_call, protocol_options,
             metadata, self._metadata_transformer, request_iterator,
@@ -502,9 +521,14 @@
                             timeout,
                             metadata=None,
                             protocol_options=None):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _future_stream_unary(
             self._channel, group, method, timeout, protocol_options, metadata,
             self._metadata_transformer, request_iterator, request_serializer,
@@ -517,9 +541,14 @@
                              timeout,
                              metadata=None,
                              protocol_options=None):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _stream_stream(self._channel, group, method, timeout,
                               protocol_options, metadata,
                               self._metadata_transformer, request_iterator,
@@ -568,33 +597,53 @@
         raise NotImplementedError()
 
     def unary_unary(self, group, method):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _UnaryUnaryMultiCallable(
             self._channel, group, method, self._metadata_transformer,
             request_serializer, response_deserializer)
 
     def unary_stream(self, group, method):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _UnaryStreamMultiCallable(
             self._channel, group, method, self._metadata_transformer,
             request_serializer, response_deserializer)
 
     def stream_unary(self, group, method):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _StreamUnaryMultiCallable(
             self._channel, group, method, self._metadata_transformer,
             request_serializer, response_deserializer)
 
     def stream_stream(self, group, method):
-        request_serializer = self._request_serializers.get((group, method,))
-        response_deserializer = self._response_deserializers.get((group,
-                                                                  method,))
+        request_serializer = self._request_serializers.get((
+            group,
+            method,
+        ))
+        response_deserializer = self._response_deserializers.get((
+            group,
+            method,
+        ))
         return _StreamStreamMultiCallable(
             self._channel, group, method, self._metadata_transformer,
             request_serializer, response_deserializer)
@@ -624,8 +673,8 @@
         elif method_cardinality is cardinality.Cardinality.STREAM_STREAM:
             return self._generic_stub.stream_stream(self._group, attr)
         else:
-            raise AttributeError('_DynamicStub object has no attribute "%s"!' %
-                                 attr)
+            raise AttributeError(
+                '_DynamicStub object has no attribute "%s"!' % attr)
 
     def __enter__(self):
         return self
diff --git a/src/python/grpcio/grpc/beta/_metadata.py b/src/python/grpcio/grpc/beta/_metadata.py
index e135f4d..b7c8535 100644
--- a/src/python/grpcio/grpc/beta/_metadata.py
+++ b/src/python/grpcio/grpc/beta/_metadata.py
@@ -15,7 +15,10 @@
 
 import collections
 
-_Metadatum = collections.namedtuple('_Metadatum', ('key', 'value',))
+_Metadatum = collections.namedtuple('_Metadatum', (
+    'key',
+    'value',
+))
 
 
 def _beta_metadatum(key, value):
diff --git a/src/python/grpcio/grpc/beta/_server_adaptations.py b/src/python/grpcio/grpc/beta/_server_adaptations.py
index 1c22dbe..3c04fd7 100644
--- a/src/python/grpcio/grpc/beta/_server_adaptations.py
+++ b/src/python/grpcio/grpc/beta/_server_adaptations.py
@@ -245,9 +245,15 @@
 
 class _SimpleMethodHandler(
         collections.namedtuple('_MethodHandler', (
-            'request_streaming', 'response_streaming', 'request_deserializer',
-            'response_serializer', 'unary_unary', 'unary_stream',
-            'stream_unary', 'stream_stream',)), grpc.RpcMethodHandler):
+            'request_streaming',
+            'response_streaming',
+            'request_deserializer',
+            'response_serializer',
+            'unary_unary',
+            'unary_stream',
+            'stream_unary',
+            'stream_stream',
+        )), grpc.RpcMethodHandler):
     pass
 
 
@@ -255,15 +261,17 @@
                            response_serializer):
     if implementation.style is style.Service.INLINE:
         if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
-            return _SimpleMethodHandler(
-                False, False, request_deserializer, response_serializer,
-                _adapt_unary_request_inline(implementation.unary_unary_inline),
-                None, None, None)
+            return _SimpleMethodHandler(False, False, request_deserializer,
+                                        response_serializer,
+                                        _adapt_unary_request_inline(
+                                            implementation.unary_unary_inline),
+                                        None, None, None)
         elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
-            return _SimpleMethodHandler(
-                False, True, request_deserializer, response_serializer, None,
-                _adapt_unary_request_inline(implementation.unary_stream_inline),
-                None, None)
+            return _SimpleMethodHandler(False, True, request_deserializer,
+                                        response_serializer, None,
+                                        _adapt_unary_request_inline(
+                                            implementation.unary_stream_inline),
+                                        None, None)
         elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
             return _SimpleMethodHandler(True, False, request_deserializer,
                                         response_serializer, None, None,
@@ -278,26 +286,28 @@
                     implementation.stream_stream_inline))
     elif implementation.style is style.Service.EVENT:
         if implementation.cardinality is cardinality.Cardinality.UNARY_UNARY:
-            return _SimpleMethodHandler(
-                False, False, request_deserializer, response_serializer,
-                _adapt_unary_unary_event(implementation.unary_unary_event),
-                None, None, None)
+            return _SimpleMethodHandler(False, False, request_deserializer,
+                                        response_serializer,
+                                        _adapt_unary_unary_event(
+                                            implementation.unary_unary_event),
+                                        None, None, None)
         elif implementation.cardinality is cardinality.Cardinality.UNARY_STREAM:
-            return _SimpleMethodHandler(
-                False, True, request_deserializer, response_serializer, None,
-                _adapt_unary_stream_event(implementation.unary_stream_event),
-                None, None)
+            return _SimpleMethodHandler(False, True, request_deserializer,
+                                        response_serializer, None,
+                                        _adapt_unary_stream_event(
+                                            implementation.unary_stream_event),
+                                        None, None)
         elif implementation.cardinality is cardinality.Cardinality.STREAM_UNARY:
-            return _SimpleMethodHandler(
-                True, False, request_deserializer, response_serializer, None,
-                None,
-                _adapt_stream_unary_event(implementation.stream_unary_event),
-                None)
+            return _SimpleMethodHandler(True, False, request_deserializer,
+                                        response_serializer, None, None,
+                                        _adapt_stream_unary_event(
+                                            implementation.stream_unary_event),
+                                        None)
         elif implementation.cardinality is cardinality.Cardinality.STREAM_STREAM:
-            return _SimpleMethodHandler(
-                True, True, request_deserializer, response_serializer, None,
-                None, None,
-                _adapt_stream_stream_event(implementation.stream_stream_event))
+            return _SimpleMethodHandler(True, True, request_deserializer,
+                                        response_serializer, None, None, None,
+                                        _adapt_stream_stream_event(
+                                            implementation.stream_stream_event))
 
 
 def _flatten_method_pair_map(method_pair_map):
@@ -325,10 +335,11 @@
         method_implementation = self._method_implementations.get(
             handler_call_details.method)
         if method_implementation is not None:
-            return _simple_method_handler(
-                method_implementation,
-                self._request_deserializers.get(handler_call_details.method),
-                self._response_serializers.get(handler_call_details.method))
+            return _simple_method_handler(method_implementation,
+                                          self._request_deserializers.get(
+                                              handler_call_details.method),
+                                          self._response_serializers.get(
+                                              handler_call_details.method))
         elif self._multi_method_implementation is None:
             return None
         else:
diff --git a/src/python/grpcio/grpc/beta/implementations.py b/src/python/grpcio/grpc/beta/implementations.py
index 312daf0..44dbd61 100644
--- a/src/python/grpcio/grpc/beta/implementations.py
+++ b/src/python/grpcio/grpc/beta/implementations.py
@@ -110,8 +110,8 @@
   Returns:
     A Channel to the remote host through which RPCs may be conducted.
   """
-    channel = grpc.insecure_channel(host
-                                    if port is None else '%s:%d' % (host, port))
+    channel = grpc.insecure_channel(host if port is None else '%s:%d' % (host,
+                                                                         port))
     return Channel(channel)
 
 
diff --git a/src/python/grpcio/grpc/framework/foundation/callable_util.py b/src/python/grpcio/grpc/framework/foundation/callable_util.py
index 5bdfda5..b9b9c49 100644
--- a/src/python/grpcio/grpc/framework/foundation/callable_util.py
+++ b/src/python/grpcio/grpc/framework/foundation/callable_util.py
@@ -50,8 +50,8 @@
 
 def _call_logging_exceptions(behavior, message, *args, **kwargs):
     try:
-        return _EasyOutcome(Outcome.Kind.RETURNED,
-                            behavior(*args, **kwargs), None)
+        return _EasyOutcome(Outcome.Kind.RETURNED, behavior(*args, **kwargs),
+                            None)
     except Exception as e:  # pylint: disable=broad-except
         logging.exception(message)
         return _EasyOutcome(Outcome.Kind.RAISED, None, e)
diff --git a/src/python/grpcio/grpc/framework/interfaces/base/utilities.py b/src/python/grpcio/grpc/framework/interfaces/base/utilities.py
index a9163d8..281db62 100644
--- a/src/python/grpcio/grpc/framework/interfaces/base/utilities.py
+++ b/src/python/grpcio/grpc/framework/interfaces/base/utilities.py
@@ -19,15 +19,22 @@
 
 
 class _Completion(base.Completion,
-                  collections.namedtuple('_Completion', ('terminal_metadata',
-                                                         'code', 'message',))):
+                  collections.namedtuple('_Completion', (
+                      'terminal_metadata',
+                      'code',
+                      'message',
+                  ))):
     """A trivial implementation of base.Completion."""
 
 
 class _Subscription(base.Subscription,
                     collections.namedtuple('_Subscription', (
-                        'kind', 'termination_callback', 'allowance', 'operator',
-                        'protocol_receiver',))):
+                        'kind',
+                        'termination_callback',
+                        'allowance',
+                        'operator',
+                        'protocol_receiver',
+                    ))):
     """A trivial implementation of base.Subscription."""
 
 
diff --git a/src/python/grpcio/grpc/framework/interfaces/face/face.py b/src/python/grpcio/grpc/framework/interfaces/face/face.py
index 0b93ea0..5b47f11 100644
--- a/src/python/grpcio/grpc/framework/interfaces/face/face.py
+++ b/src/python/grpcio/grpc/framework/interfaces/face/face.py
@@ -50,13 +50,20 @@
         self.method = method
 
     def __repr__(self):
-        return 'face.NoSuchMethodError(%s, %s)' % (self.group, self.method,)
+        return 'face.NoSuchMethodError(%s, %s)' % (
+            self.group,
+            self.method,
+        )
 
 
 class Abortion(
-        collections.namedtuple('Abortion',
-                               ('kind', 'initial_metadata', 'terminal_metadata',
-                                'code', 'details',))):
+        collections.namedtuple('Abortion', (
+            'kind',
+            'initial_metadata',
+            'terminal_metadata',
+            'code',
+            'details',
+        ))):
     """A value describing RPC abortion.
 
   Attributes:
diff --git a/src/python/grpcio/grpc_core_dependencies.py b/src/python/grpcio/grpc_core_dependencies.py
index 56d6ebd..aea0786 100644
--- a/src/python/grpcio/grpc_core_dependencies.py
+++ b/src/python/grpcio/grpc_core_dependencies.py
@@ -15,668 +15,667 @@
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_core_dependencies.py.template`!!!
 
 CORE_SOURCE_FILES = [
-  'src/core/lib/profiling/basic_timers.cc',
-  'src/core/lib/profiling/stap_timers.cc',
-  'src/core/lib/support/alloc.cc',
-  'src/core/lib/support/arena.cc',
-  'src/core/lib/support/atm.cc',
-  'src/core/lib/support/avl.cc',
-  'src/core/lib/support/cmdline.cc',
-  'src/core/lib/support/cpu_iphone.cc',
-  'src/core/lib/support/cpu_linux.cc',
-  'src/core/lib/support/cpu_posix.cc',
-  'src/core/lib/support/cpu_windows.cc',
-  'src/core/lib/support/env_linux.cc',
-  'src/core/lib/support/env_posix.cc',
-  'src/core/lib/support/env_windows.cc',
-  'src/core/lib/support/fork.cc',
-  'src/core/lib/support/histogram.cc',
-  'src/core/lib/support/host_port.cc',
-  'src/core/lib/support/log.cc',
-  'src/core/lib/support/log_android.cc',
-  'src/core/lib/support/log_linux.cc',
-  'src/core/lib/support/log_posix.cc',
-  'src/core/lib/support/log_windows.cc',
-  'src/core/lib/support/mpscq.cc',
-  'src/core/lib/support/murmur_hash.cc',
-  'src/core/lib/support/string.cc',
-  'src/core/lib/support/string_posix.cc',
-  'src/core/lib/support/string_util_windows.cc',
-  'src/core/lib/support/string_windows.cc',
-  'src/core/lib/support/subprocess_posix.cc',
-  'src/core/lib/support/subprocess_windows.cc',
-  'src/core/lib/support/sync.cc',
-  'src/core/lib/support/sync_posix.cc',
-  'src/core/lib/support/sync_windows.cc',
-  'src/core/lib/support/thd.cc',
-  'src/core/lib/support/thd_posix.cc',
-  'src/core/lib/support/thd_windows.cc',
-  'src/core/lib/support/time.cc',
-  'src/core/lib/support/time_posix.cc',
-  'src/core/lib/support/time_precise.cc',
-  'src/core/lib/support/time_windows.cc',
-  'src/core/lib/support/tls_pthread.cc',
-  'src/core/lib/support/tmpfile_msys.cc',
-  'src/core/lib/support/tmpfile_posix.cc',
-  'src/core/lib/support/tmpfile_windows.cc',
-  'src/core/lib/support/wrap_memcpy.cc',
-  'src/core/lib/surface/init.cc',
-  'src/core/lib/backoff/backoff.cc',
-  'src/core/lib/channel/channel_args.cc',
-  'src/core/lib/channel/channel_stack.cc',
-  'src/core/lib/channel/channel_stack_builder.cc',
-  'src/core/lib/channel/connected_channel.cc',
-  'src/core/lib/channel/handshaker.cc',
-  'src/core/lib/channel/handshaker_factory.cc',
-  'src/core/lib/channel/handshaker_registry.cc',
-  'src/core/lib/compression/compression.cc',
-  'src/core/lib/compression/message_compress.cc',
-  'src/core/lib/compression/stream_compression.cc',
-  'src/core/lib/compression/stream_compression_gzip.cc',
-  'src/core/lib/compression/stream_compression_identity.cc',
-  'src/core/lib/debug/stats.cc',
-  'src/core/lib/debug/stats_data.cc',
-  'src/core/lib/http/format_request.cc',
-  'src/core/lib/http/httpcli.cc',
-  'src/core/lib/http/parser.cc',
-  'src/core/lib/iomgr/call_combiner.cc',
-  'src/core/lib/iomgr/combiner.cc',
-  'src/core/lib/iomgr/endpoint.cc',
-  'src/core/lib/iomgr/endpoint_pair_posix.cc',
-  'src/core/lib/iomgr/endpoint_pair_uv.cc',
-  'src/core/lib/iomgr/endpoint_pair_windows.cc',
-  'src/core/lib/iomgr/error.cc',
-  'src/core/lib/iomgr/ev_epoll1_linux.cc',
-  'src/core/lib/iomgr/ev_epollex_linux.cc',
-  'src/core/lib/iomgr/ev_epollsig_linux.cc',
-  'src/core/lib/iomgr/ev_poll_posix.cc',
-  'src/core/lib/iomgr/ev_posix.cc',
-  'src/core/lib/iomgr/ev_windows.cc',
-  'src/core/lib/iomgr/exec_ctx.cc',
-  'src/core/lib/iomgr/executor.cc',
-  'src/core/lib/iomgr/fork_posix.cc',
-  'src/core/lib/iomgr/fork_windows.cc',
-  'src/core/lib/iomgr/gethostname_fallback.cc',
-  'src/core/lib/iomgr/gethostname_host_name_max.cc',
-  'src/core/lib/iomgr/gethostname_sysconf.cc',
-  'src/core/lib/iomgr/iocp_windows.cc',
-  'src/core/lib/iomgr/iomgr.cc',
-  'src/core/lib/iomgr/iomgr_posix.cc',
-  'src/core/lib/iomgr/iomgr_uv.cc',
-  'src/core/lib/iomgr/iomgr_windows.cc',
-  'src/core/lib/iomgr/is_epollexclusive_available.cc',
-  'src/core/lib/iomgr/load_file.cc',
-  'src/core/lib/iomgr/lockfree_event.cc',
-  'src/core/lib/iomgr/network_status_tracker.cc',
-  'src/core/lib/iomgr/polling_entity.cc',
-  'src/core/lib/iomgr/pollset_set_uv.cc',
-  'src/core/lib/iomgr/pollset_set_windows.cc',
-  'src/core/lib/iomgr/pollset_uv.cc',
-  'src/core/lib/iomgr/pollset_windows.cc',
-  'src/core/lib/iomgr/resolve_address_posix.cc',
-  'src/core/lib/iomgr/resolve_address_uv.cc',
-  'src/core/lib/iomgr/resolve_address_windows.cc',
-  'src/core/lib/iomgr/resource_quota.cc',
-  'src/core/lib/iomgr/sockaddr_utils.cc',
-  'src/core/lib/iomgr/socket_factory_posix.cc',
-  'src/core/lib/iomgr/socket_mutator.cc',
-  'src/core/lib/iomgr/socket_utils_common_posix.cc',
-  'src/core/lib/iomgr/socket_utils_linux.cc',
-  'src/core/lib/iomgr/socket_utils_posix.cc',
-  'src/core/lib/iomgr/socket_utils_uv.cc',
-  'src/core/lib/iomgr/socket_utils_windows.cc',
-  'src/core/lib/iomgr/socket_windows.cc',
-  'src/core/lib/iomgr/tcp_client_posix.cc',
-  'src/core/lib/iomgr/tcp_client_uv.cc',
-  'src/core/lib/iomgr/tcp_client_windows.cc',
-  'src/core/lib/iomgr/tcp_posix.cc',
-  'src/core/lib/iomgr/tcp_server_posix.cc',
-  'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
-  'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
-  'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
-  'src/core/lib/iomgr/tcp_server_uv.cc',
-  'src/core/lib/iomgr/tcp_server_windows.cc',
-  'src/core/lib/iomgr/tcp_uv.cc',
-  'src/core/lib/iomgr/tcp_windows.cc',
-  'src/core/lib/iomgr/time_averaged_stats.cc',
-  'src/core/lib/iomgr/timer_generic.cc',
-  'src/core/lib/iomgr/timer_heap.cc',
-  'src/core/lib/iomgr/timer_manager.cc',
-  'src/core/lib/iomgr/timer_uv.cc',
-  'src/core/lib/iomgr/udp_server.cc',
-  'src/core/lib/iomgr/unix_sockets_posix.cc',
-  'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
-  'src/core/lib/iomgr/wakeup_fd_cv.cc',
-  'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
-  'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
-  'src/core/lib/iomgr/wakeup_fd_pipe.cc',
-  'src/core/lib/iomgr/wakeup_fd_posix.cc',
-  'src/core/lib/json/json.cc',
-  'src/core/lib/json/json_reader.cc',
-  'src/core/lib/json/json_string.cc',
-  'src/core/lib/json/json_writer.cc',
-  'src/core/lib/slice/b64.cc',
-  'src/core/lib/slice/percent_encoding.cc',
-  'src/core/lib/slice/slice.cc',
-  'src/core/lib/slice/slice_buffer.cc',
-  'src/core/lib/slice/slice_hash_table.cc',
-  'src/core/lib/slice/slice_intern.cc',
-  'src/core/lib/slice/slice_string_helpers.cc',
-  'src/core/lib/surface/alarm.cc',
-  'src/core/lib/surface/api_trace.cc',
-  'src/core/lib/surface/byte_buffer.cc',
-  'src/core/lib/surface/byte_buffer_reader.cc',
-  'src/core/lib/surface/call.cc',
-  'src/core/lib/surface/call_details.cc',
-  'src/core/lib/surface/call_log_batch.cc',
-  'src/core/lib/surface/channel.cc',
-  'src/core/lib/surface/channel_init.cc',
-  'src/core/lib/surface/channel_ping.cc',
-  'src/core/lib/surface/channel_stack_type.cc',
-  'src/core/lib/surface/completion_queue.cc',
-  'src/core/lib/surface/completion_queue_factory.cc',
-  'src/core/lib/surface/event_string.cc',
-  'src/core/lib/surface/lame_client.cc',
-  'src/core/lib/surface/metadata_array.cc',
-  'src/core/lib/surface/server.cc',
-  'src/core/lib/surface/validate_metadata.cc',
-  'src/core/lib/surface/version.cc',
-  'src/core/lib/transport/bdp_estimator.cc',
-  'src/core/lib/transport/byte_stream.cc',
-  'src/core/lib/transport/connectivity_state.cc',
-  'src/core/lib/transport/error_utils.cc',
-  'src/core/lib/transport/metadata.cc',
-  'src/core/lib/transport/metadata_batch.cc',
-  'src/core/lib/transport/pid_controller.cc',
-  'src/core/lib/transport/service_config.cc',
-  'src/core/lib/transport/static_metadata.cc',
-  'src/core/lib/transport/status_conversion.cc',
-  'src/core/lib/transport/timeout_encoding.cc',
-  'src/core/lib/transport/transport.cc',
-  'src/core/lib/transport/transport_op_string.cc',
-  'src/core/lib/debug/trace.cc',
-  'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
-  'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
-  'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
-  'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
-  'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
-  'src/core/ext/transport/chttp2/transport/flow_control.cc',
-  'src/core/ext/transport/chttp2/transport/frame_data.cc',
-  'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
-  'src/core/ext/transport/chttp2/transport/frame_ping.cc',
-  'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
-  'src/core/ext/transport/chttp2/transport/frame_settings.cc',
-  'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
-  'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
-  'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
-  'src/core/ext/transport/chttp2/transport/hpack_table.cc',
-  'src/core/ext/transport/chttp2/transport/http2_settings.cc',
-  'src/core/ext/transport/chttp2/transport/huffsyms.cc',
-  'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
-  'src/core/ext/transport/chttp2/transport/parsing.cc',
-  'src/core/ext/transport/chttp2/transport/stream_lists.cc',
-  'src/core/ext/transport/chttp2/transport/stream_map.cc',
-  'src/core/ext/transport/chttp2/transport/varint.cc',
-  'src/core/ext/transport/chttp2/transport/writing.cc',
-  'src/core/ext/transport/chttp2/alpn/alpn.cc',
-  'src/core/ext/filters/http/client/http_client_filter.cc',
-  'src/core/ext/filters/http/http_filters_plugin.cc',
-  'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
-  'src/core/ext/filters/http/server/http_server_filter.cc',
-  'src/core/lib/http/httpcli_security_connector.cc',
-  'src/core/lib/security/context/security_context.cc',
-  'src/core/lib/security/credentials/composite/composite_credentials.cc',
-  'src/core/lib/security/credentials/credentials.cc',
-  'src/core/lib/security/credentials/credentials_metadata.cc',
-  'src/core/lib/security/credentials/fake/fake_credentials.cc',
-  'src/core/lib/security/credentials/google_default/credentials_generic.cc',
-  'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
-  'src/core/lib/security/credentials/iam/iam_credentials.cc',
-  'src/core/lib/security/credentials/jwt/json_token.cc',
-  'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
-  'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
-  'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
-  'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
-  'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
-  'src/core/lib/security/transport/client_auth_filter.cc',
-  'src/core/lib/security/transport/lb_targets_info.cc',
-  'src/core/lib/security/transport/secure_endpoint.cc',
-  'src/core/lib/security/transport/security_connector.cc',
-  'src/core/lib/security/transport/security_handshaker.cc',
-  'src/core/lib/security/transport/server_auth_filter.cc',
-  'src/core/lib/security/transport/tsi_error.cc',
-  'src/core/lib/security/util/json_util.cc',
-  'src/core/lib/surface/init_secure.cc',
-  'src/core/tsi/fake_transport_security.cc',
-  'src/core/tsi/gts_transport_security.cc',
-  'src/core/tsi/ssl_transport_security.cc',
-  'src/core/tsi/transport_security_grpc.cc',
-  'src/core/tsi/transport_security.cc',
-  'src/core/tsi/transport_security_adapter.cc',
-  'src/core/ext/transport/chttp2/server/chttp2_server.cc',
-  'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
-  'src/core/ext/filters/client_channel/backup_poller.cc',
-  'src/core/ext/filters/client_channel/channel_connectivity.cc',
-  'src/core/ext/filters/client_channel/client_channel.cc',
-  'src/core/ext/filters/client_channel/client_channel_factory.cc',
-  'src/core/ext/filters/client_channel/client_channel_plugin.cc',
-  'src/core/ext/filters/client_channel/connector.cc',
-  'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
-  'src/core/ext/filters/client_channel/http_proxy.cc',
-  'src/core/ext/filters/client_channel/lb_policy.cc',
-  'src/core/ext/filters/client_channel/lb_policy_factory.cc',
-  'src/core/ext/filters/client_channel/lb_policy_registry.cc',
-  'src/core/ext/filters/client_channel/parse_address.cc',
-  'src/core/ext/filters/client_channel/proxy_mapper.cc',
-  'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
-  'src/core/ext/filters/client_channel/resolver.cc',
-  'src/core/ext/filters/client_channel/resolver_factory.cc',
-  'src/core/ext/filters/client_channel/resolver_registry.cc',
-  'src/core/ext/filters/client_channel/retry_throttle.cc',
-  'src/core/ext/filters/client_channel/subchannel.cc',
-  'src/core/ext/filters/client_channel/subchannel_index.cc',
-  'src/core/ext/filters/client_channel/uri_parser.cc',
-  'src/core/ext/filters/deadline/deadline_filter.cc',
-  'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
-  'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
-  'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
-  'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
-  'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
-  'src/core/ext/transport/inproc/inproc_plugin.cc',
-  'src/core/ext/transport/inproc/inproc_transport.cc',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
-  'third_party/nanopb/pb_common.c',
-  'third_party/nanopb/pb_decode.c',
-  'third_party/nanopb/pb_encode.c',
-  'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
-  'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
-  'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
-  'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
-  'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
-  'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
-  'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
-  'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc',
-  'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
-  'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
-  'src/core/ext/filters/load_reporting/server_load_reporting_filter.cc',
-  'src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc',
-  'src/core/ext/census/grpc_context.cc',
-  'src/core/ext/filters/max_age/max_age_filter.cc',
-  'src/core/ext/filters/message_size/message_size_filter.cc',
-  'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
-  'src/core/ext/filters/workarounds/workaround_utils.cc',
-  'src/core/plugin_registry/grpc_plugin_registry.cc',
-  'src/boringssl/err_data.c',
-  'third_party/boringssl/crypto/aes/aes.c',
-  'third_party/boringssl/crypto/aes/key_wrap.c',
-  'third_party/boringssl/crypto/aes/mode_wrappers.c',
-  'third_party/boringssl/crypto/asn1/a_bitstr.c',
-  'third_party/boringssl/crypto/asn1/a_bool.c',
-  'third_party/boringssl/crypto/asn1/a_d2i_fp.c',
-  'third_party/boringssl/crypto/asn1/a_dup.c',
-  'third_party/boringssl/crypto/asn1/a_enum.c',
-  'third_party/boringssl/crypto/asn1/a_gentm.c',
-  'third_party/boringssl/crypto/asn1/a_i2d_fp.c',
-  'third_party/boringssl/crypto/asn1/a_int.c',
-  'third_party/boringssl/crypto/asn1/a_mbstr.c',
-  'third_party/boringssl/crypto/asn1/a_object.c',
-  'third_party/boringssl/crypto/asn1/a_octet.c',
-  'third_party/boringssl/crypto/asn1/a_print.c',
-  'third_party/boringssl/crypto/asn1/a_strnid.c',
-  'third_party/boringssl/crypto/asn1/a_time.c',
-  'third_party/boringssl/crypto/asn1/a_type.c',
-  'third_party/boringssl/crypto/asn1/a_utctm.c',
-  'third_party/boringssl/crypto/asn1/a_utf8.c',
-  'third_party/boringssl/crypto/asn1/asn1_lib.c',
-  'third_party/boringssl/crypto/asn1/asn1_par.c',
-  'third_party/boringssl/crypto/asn1/asn_pack.c',
-  'third_party/boringssl/crypto/asn1/f_enum.c',
-  'third_party/boringssl/crypto/asn1/f_int.c',
-  'third_party/boringssl/crypto/asn1/f_string.c',
-  'third_party/boringssl/crypto/asn1/t_bitst.c',
-  'third_party/boringssl/crypto/asn1/tasn_dec.c',
-  'third_party/boringssl/crypto/asn1/tasn_enc.c',
-  'third_party/boringssl/crypto/asn1/tasn_fre.c',
-  'third_party/boringssl/crypto/asn1/tasn_new.c',
-  'third_party/boringssl/crypto/asn1/tasn_typ.c',
-  'third_party/boringssl/crypto/asn1/tasn_utl.c',
-  'third_party/boringssl/crypto/asn1/time_support.c',
-  'third_party/boringssl/crypto/asn1/x_bignum.c',
-  'third_party/boringssl/crypto/asn1/x_long.c',
-  'third_party/boringssl/crypto/base64/base64.c',
-  'third_party/boringssl/crypto/bio/bio.c',
-  'third_party/boringssl/crypto/bio/bio_mem.c',
-  'third_party/boringssl/crypto/bio/connect.c',
-  'third_party/boringssl/crypto/bio/fd.c',
-  'third_party/boringssl/crypto/bio/file.c',
-  'third_party/boringssl/crypto/bio/hexdump.c',
-  'third_party/boringssl/crypto/bio/pair.c',
-  'third_party/boringssl/crypto/bio/printf.c',
-  'third_party/boringssl/crypto/bio/socket.c',
-  'third_party/boringssl/crypto/bio/socket_helper.c',
-  'third_party/boringssl/crypto/bn/add.c',
-  'third_party/boringssl/crypto/bn/asm/x86_64-gcc.c',
-  'third_party/boringssl/crypto/bn/bn.c',
-  'third_party/boringssl/crypto/bn/bn_asn1.c',
-  'third_party/boringssl/crypto/bn/cmp.c',
-  'third_party/boringssl/crypto/bn/convert.c',
-  'third_party/boringssl/crypto/bn/ctx.c',
-  'third_party/boringssl/crypto/bn/div.c',
-  'third_party/boringssl/crypto/bn/exponentiation.c',
-  'third_party/boringssl/crypto/bn/gcd.c',
-  'third_party/boringssl/crypto/bn/generic.c',
-  'third_party/boringssl/crypto/bn/kronecker.c',
-  'third_party/boringssl/crypto/bn/montgomery.c',
-  'third_party/boringssl/crypto/bn/montgomery_inv.c',
-  'third_party/boringssl/crypto/bn/mul.c',
-  'third_party/boringssl/crypto/bn/prime.c',
-  'third_party/boringssl/crypto/bn/random.c',
-  'third_party/boringssl/crypto/bn/rsaz_exp.c',
-  'third_party/boringssl/crypto/bn/shift.c',
-  'third_party/boringssl/crypto/bn/sqrt.c',
-  'third_party/boringssl/crypto/buf/buf.c',
-  'third_party/boringssl/crypto/bytestring/asn1_compat.c',
-  'third_party/boringssl/crypto/bytestring/ber.c',
-  'third_party/boringssl/crypto/bytestring/cbb.c',
-  'third_party/boringssl/crypto/bytestring/cbs.c',
-  'third_party/boringssl/crypto/chacha/chacha.c',
-  'third_party/boringssl/crypto/cipher/aead.c',
-  'third_party/boringssl/crypto/cipher/cipher.c',
-  'third_party/boringssl/crypto/cipher/derive_key.c',
-  'third_party/boringssl/crypto/cipher/e_aes.c',
-  'third_party/boringssl/crypto/cipher/e_chacha20poly1305.c',
-  'third_party/boringssl/crypto/cipher/e_des.c',
-  'third_party/boringssl/crypto/cipher/e_null.c',
-  'third_party/boringssl/crypto/cipher/e_rc2.c',
-  'third_party/boringssl/crypto/cipher/e_rc4.c',
-  'third_party/boringssl/crypto/cipher/e_ssl3.c',
-  'third_party/boringssl/crypto/cipher/e_tls.c',
-  'third_party/boringssl/crypto/cipher/tls_cbc.c',
-  'third_party/boringssl/crypto/cmac/cmac.c',
-  'third_party/boringssl/crypto/conf/conf.c',
-  'third_party/boringssl/crypto/cpu-aarch64-linux.c',
-  'third_party/boringssl/crypto/cpu-arm-linux.c',
-  'third_party/boringssl/crypto/cpu-arm.c',
-  'third_party/boringssl/crypto/cpu-intel.c',
-  'third_party/boringssl/crypto/cpu-ppc64le.c',
-  'third_party/boringssl/crypto/crypto.c',
-  'third_party/boringssl/crypto/curve25519/curve25519.c',
-  'third_party/boringssl/crypto/curve25519/spake25519.c',
-  'third_party/boringssl/crypto/curve25519/x25519-x86_64.c',
-  'third_party/boringssl/crypto/des/des.c',
-  'third_party/boringssl/crypto/dh/check.c',
-  'third_party/boringssl/crypto/dh/dh.c',
-  'third_party/boringssl/crypto/dh/dh_asn1.c',
-  'third_party/boringssl/crypto/dh/params.c',
-  'third_party/boringssl/crypto/digest/digest.c',
-  'third_party/boringssl/crypto/digest/digests.c',
-  'third_party/boringssl/crypto/dsa/dsa.c',
-  'third_party/boringssl/crypto/dsa/dsa_asn1.c',
-  'third_party/boringssl/crypto/ec/ec.c',
-  'third_party/boringssl/crypto/ec/ec_asn1.c',
-  'third_party/boringssl/crypto/ec/ec_key.c',
-  'third_party/boringssl/crypto/ec/ec_montgomery.c',
-  'third_party/boringssl/crypto/ec/oct.c',
-  'third_party/boringssl/crypto/ec/p224-64.c',
-  'third_party/boringssl/crypto/ec/p256-64.c',
-  'third_party/boringssl/crypto/ec/p256-x86_64.c',
-  'third_party/boringssl/crypto/ec/simple.c',
-  'third_party/boringssl/crypto/ec/util-64.c',
-  'third_party/boringssl/crypto/ec/wnaf.c',
-  'third_party/boringssl/crypto/ecdh/ecdh.c',
-  'third_party/boringssl/crypto/ecdsa/ecdsa.c',
-  'third_party/boringssl/crypto/ecdsa/ecdsa_asn1.c',
-  'third_party/boringssl/crypto/engine/engine.c',
-  'third_party/boringssl/crypto/err/err.c',
-  'third_party/boringssl/crypto/evp/digestsign.c',
-  'third_party/boringssl/crypto/evp/evp.c',
-  'third_party/boringssl/crypto/evp/evp_asn1.c',
-  'third_party/boringssl/crypto/evp/evp_ctx.c',
-  'third_party/boringssl/crypto/evp/p_dsa_asn1.c',
-  'third_party/boringssl/crypto/evp/p_ec.c',
-  'third_party/boringssl/crypto/evp/p_ec_asn1.c',
-  'third_party/boringssl/crypto/evp/p_rsa.c',
-  'third_party/boringssl/crypto/evp/p_rsa_asn1.c',
-  'third_party/boringssl/crypto/evp/pbkdf.c',
-  'third_party/boringssl/crypto/evp/print.c',
-  'third_party/boringssl/crypto/evp/sign.c',
-  'third_party/boringssl/crypto/ex_data.c',
-  'third_party/boringssl/crypto/hkdf/hkdf.c',
-  'third_party/boringssl/crypto/hmac/hmac.c',
-  'third_party/boringssl/crypto/lhash/lhash.c',
-  'third_party/boringssl/crypto/md4/md4.c',
-  'third_party/boringssl/crypto/md5/md5.c',
-  'third_party/boringssl/crypto/mem.c',
-  'third_party/boringssl/crypto/modes/cbc.c',
-  'third_party/boringssl/crypto/modes/cfb.c',
-  'third_party/boringssl/crypto/modes/ctr.c',
-  'third_party/boringssl/crypto/modes/gcm.c',
-  'third_party/boringssl/crypto/modes/ofb.c',
-  'third_party/boringssl/crypto/modes/polyval.c',
-  'third_party/boringssl/crypto/obj/obj.c',
-  'third_party/boringssl/crypto/obj/obj_xref.c',
-  'third_party/boringssl/crypto/pem/pem_all.c',
-  'third_party/boringssl/crypto/pem/pem_info.c',
-  'third_party/boringssl/crypto/pem/pem_lib.c',
-  'third_party/boringssl/crypto/pem/pem_oth.c',
-  'third_party/boringssl/crypto/pem/pem_pk8.c',
-  'third_party/boringssl/crypto/pem/pem_pkey.c',
-  'third_party/boringssl/crypto/pem/pem_x509.c',
-  'third_party/boringssl/crypto/pem/pem_xaux.c',
-  'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
-  'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
-  'third_party/boringssl/crypto/pkcs8/pkcs8.c',
-  'third_party/boringssl/crypto/poly1305/poly1305.c',
-  'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
-  'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
-  'third_party/boringssl/crypto/pool/pool.c',
-  'third_party/boringssl/crypto/rand/deterministic.c',
-  'third_party/boringssl/crypto/rand/fuchsia.c',
-  'third_party/boringssl/crypto/rand/rand.c',
-  'third_party/boringssl/crypto/rand/urandom.c',
-  'third_party/boringssl/crypto/rand/windows.c',
-  'third_party/boringssl/crypto/rc4/rc4.c',
-  'third_party/boringssl/crypto/refcount_c11.c',
-  'third_party/boringssl/crypto/refcount_lock.c',
-  'third_party/boringssl/crypto/rsa/blinding.c',
-  'third_party/boringssl/crypto/rsa/padding.c',
-  'third_party/boringssl/crypto/rsa/rsa.c',
-  'third_party/boringssl/crypto/rsa/rsa_asn1.c',
-  'third_party/boringssl/crypto/rsa/rsa_impl.c',
-  'third_party/boringssl/crypto/sha/sha1-altivec.c',
-  'third_party/boringssl/crypto/sha/sha1.c',
-  'third_party/boringssl/crypto/sha/sha256.c',
-  'third_party/boringssl/crypto/sha/sha512.c',
-  'third_party/boringssl/crypto/stack/stack.c',
-  'third_party/boringssl/crypto/thread.c',
-  'third_party/boringssl/crypto/thread_none.c',
-  'third_party/boringssl/crypto/thread_pthread.c',
-  'third_party/boringssl/crypto/thread_win.c',
-  'third_party/boringssl/crypto/x509/a_digest.c',
-  'third_party/boringssl/crypto/x509/a_sign.c',
-  'third_party/boringssl/crypto/x509/a_strex.c',
-  'third_party/boringssl/crypto/x509/a_verify.c',
-  'third_party/boringssl/crypto/x509/algorithm.c',
-  'third_party/boringssl/crypto/x509/asn1_gen.c',
-  'third_party/boringssl/crypto/x509/by_dir.c',
-  'third_party/boringssl/crypto/x509/by_file.c',
-  'third_party/boringssl/crypto/x509/i2d_pr.c',
-  'third_party/boringssl/crypto/x509/pkcs7.c',
-  'third_party/boringssl/crypto/x509/rsa_pss.c',
-  'third_party/boringssl/crypto/x509/t_crl.c',
-  'third_party/boringssl/crypto/x509/t_req.c',
-  'third_party/boringssl/crypto/x509/t_x509.c',
-  'third_party/boringssl/crypto/x509/t_x509a.c',
-  'third_party/boringssl/crypto/x509/x509.c',
-  'third_party/boringssl/crypto/x509/x509_att.c',
-  'third_party/boringssl/crypto/x509/x509_cmp.c',
-  'third_party/boringssl/crypto/x509/x509_d2.c',
-  'third_party/boringssl/crypto/x509/x509_def.c',
-  'third_party/boringssl/crypto/x509/x509_ext.c',
-  'third_party/boringssl/crypto/x509/x509_lu.c',
-  'third_party/boringssl/crypto/x509/x509_obj.c',
-  'third_party/boringssl/crypto/x509/x509_r2x.c',
-  'third_party/boringssl/crypto/x509/x509_req.c',
-  'third_party/boringssl/crypto/x509/x509_set.c',
-  'third_party/boringssl/crypto/x509/x509_trs.c',
-  'third_party/boringssl/crypto/x509/x509_txt.c',
-  'third_party/boringssl/crypto/x509/x509_v3.c',
-  'third_party/boringssl/crypto/x509/x509_vfy.c',
-  'third_party/boringssl/crypto/x509/x509_vpm.c',
-  'third_party/boringssl/crypto/x509/x509cset.c',
-  'third_party/boringssl/crypto/x509/x509name.c',
-  'third_party/boringssl/crypto/x509/x509rset.c',
-  'third_party/boringssl/crypto/x509/x509spki.c',
-  'third_party/boringssl/crypto/x509/x509type.c',
-  'third_party/boringssl/crypto/x509/x_algor.c',
-  'third_party/boringssl/crypto/x509/x_all.c',
-  'third_party/boringssl/crypto/x509/x_attrib.c',
-  'third_party/boringssl/crypto/x509/x_crl.c',
-  'third_party/boringssl/crypto/x509/x_exten.c',
-  'third_party/boringssl/crypto/x509/x_info.c',
-  'third_party/boringssl/crypto/x509/x_name.c',
-  'third_party/boringssl/crypto/x509/x_pkey.c',
-  'third_party/boringssl/crypto/x509/x_pubkey.c',
-  'third_party/boringssl/crypto/x509/x_req.c',
-  'third_party/boringssl/crypto/x509/x_sig.c',
-  'third_party/boringssl/crypto/x509/x_spki.c',
-  'third_party/boringssl/crypto/x509/x_val.c',
-  'third_party/boringssl/crypto/x509/x_x509.c',
-  'third_party/boringssl/crypto/x509/x_x509a.c',
-  'third_party/boringssl/crypto/x509v3/pcy_cache.c',
-  'third_party/boringssl/crypto/x509v3/pcy_data.c',
-  'third_party/boringssl/crypto/x509v3/pcy_lib.c',
-  'third_party/boringssl/crypto/x509v3/pcy_map.c',
-  'third_party/boringssl/crypto/x509v3/pcy_node.c',
-  'third_party/boringssl/crypto/x509v3/pcy_tree.c',
-  'third_party/boringssl/crypto/x509v3/v3_akey.c',
-  'third_party/boringssl/crypto/x509v3/v3_akeya.c',
-  'third_party/boringssl/crypto/x509v3/v3_alt.c',
-  'third_party/boringssl/crypto/x509v3/v3_bcons.c',
-  'third_party/boringssl/crypto/x509v3/v3_bitst.c',
-  'third_party/boringssl/crypto/x509v3/v3_conf.c',
-  'third_party/boringssl/crypto/x509v3/v3_cpols.c',
-  'third_party/boringssl/crypto/x509v3/v3_crld.c',
-  'third_party/boringssl/crypto/x509v3/v3_enum.c',
-  'third_party/boringssl/crypto/x509v3/v3_extku.c',
-  'third_party/boringssl/crypto/x509v3/v3_genn.c',
-  'third_party/boringssl/crypto/x509v3/v3_ia5.c',
-  'third_party/boringssl/crypto/x509v3/v3_info.c',
-  'third_party/boringssl/crypto/x509v3/v3_int.c',
-  'third_party/boringssl/crypto/x509v3/v3_lib.c',
-  'third_party/boringssl/crypto/x509v3/v3_ncons.c',
-  'third_party/boringssl/crypto/x509v3/v3_pci.c',
-  'third_party/boringssl/crypto/x509v3/v3_pcia.c',
-  'third_party/boringssl/crypto/x509v3/v3_pcons.c',
-  'third_party/boringssl/crypto/x509v3/v3_pku.c',
-  'third_party/boringssl/crypto/x509v3/v3_pmaps.c',
-  'third_party/boringssl/crypto/x509v3/v3_prn.c',
-  'third_party/boringssl/crypto/x509v3/v3_purp.c',
-  'third_party/boringssl/crypto/x509v3/v3_skey.c',
-  'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
-  'third_party/boringssl/crypto/x509v3/v3_utl.c',
-  'third_party/boringssl/ssl/bio_ssl.c',
-  'third_party/boringssl/ssl/custom_extensions.c',
-  'third_party/boringssl/ssl/d1_both.c',
-  'third_party/boringssl/ssl/d1_lib.c',
-  'third_party/boringssl/ssl/d1_pkt.c',
-  'third_party/boringssl/ssl/d1_srtp.c',
-  'third_party/boringssl/ssl/dtls_method.c',
-  'third_party/boringssl/ssl/dtls_record.c',
-  'third_party/boringssl/ssl/handshake_client.c',
-  'third_party/boringssl/ssl/handshake_server.c',
-  'third_party/boringssl/ssl/s3_both.c',
-  'third_party/boringssl/ssl/s3_lib.c',
-  'third_party/boringssl/ssl/s3_pkt.c',
-  'third_party/boringssl/ssl/ssl_aead_ctx.c',
-  'third_party/boringssl/ssl/ssl_asn1.c',
-  'third_party/boringssl/ssl/ssl_buffer.c',
-  'third_party/boringssl/ssl/ssl_cert.c',
-  'third_party/boringssl/ssl/ssl_cipher.c',
-  'third_party/boringssl/ssl/ssl_ecdh.c',
-  'third_party/boringssl/ssl/ssl_file.c',
-  'third_party/boringssl/ssl/ssl_lib.c',
-  'third_party/boringssl/ssl/ssl_privkey.c',
-  'third_party/boringssl/ssl/ssl_privkey_cc.cc',
-  'third_party/boringssl/ssl/ssl_session.c',
-  'third_party/boringssl/ssl/ssl_stat.c',
-  'third_party/boringssl/ssl/ssl_transcript.c',
-  'third_party/boringssl/ssl/ssl_x509.c',
-  'third_party/boringssl/ssl/t1_enc.c',
-  'third_party/boringssl/ssl/t1_lib.c',
-  'third_party/boringssl/ssl/tls13_both.c',
-  'third_party/boringssl/ssl/tls13_client.c',
-  'third_party/boringssl/ssl/tls13_enc.c',
-  'third_party/boringssl/ssl/tls13_server.c',
-  'third_party/boringssl/ssl/tls_method.c',
-  'third_party/boringssl/ssl/tls_record.c',
-  'third_party/zlib/adler32.c',
-  'third_party/zlib/compress.c',
-  'third_party/zlib/crc32.c',
-  'third_party/zlib/deflate.c',
-  'third_party/zlib/gzclose.c',
-  'third_party/zlib/gzlib.c',
-  'third_party/zlib/gzread.c',
-  'third_party/zlib/gzwrite.c',
-  'third_party/zlib/infback.c',
-  'third_party/zlib/inffast.c',
-  'third_party/zlib/inflate.c',
-  'third_party/zlib/inftrees.c',
-  'third_party/zlib/trees.c',
-  'third_party/zlib/uncompr.c',
-  'third_party/zlib/zutil.c',
-  'third_party/cares/cares/ares__close_sockets.c',
-  'third_party/cares/cares/ares__get_hostent.c',
-  'third_party/cares/cares/ares__read_line.c',
-  'third_party/cares/cares/ares__timeval.c',
-  'third_party/cares/cares/ares_cancel.c',
-  'third_party/cares/cares/ares_create_query.c',
-  'third_party/cares/cares/ares_data.c',
-  'third_party/cares/cares/ares_destroy.c',
-  'third_party/cares/cares/ares_expand_name.c',
-  'third_party/cares/cares/ares_expand_string.c',
-  'third_party/cares/cares/ares_fds.c',
-  'third_party/cares/cares/ares_free_hostent.c',
-  'third_party/cares/cares/ares_free_string.c',
-  'third_party/cares/cares/ares_getenv.c',
-  'third_party/cares/cares/ares_gethostbyaddr.c',
-  'third_party/cares/cares/ares_gethostbyname.c',
-  'third_party/cares/cares/ares_getnameinfo.c',
-  'third_party/cares/cares/ares_getopt.c',
-  'third_party/cares/cares/ares_getsock.c',
-  'third_party/cares/cares/ares_init.c',
-  'third_party/cares/cares/ares_library_init.c',
-  'third_party/cares/cares/ares_llist.c',
-  'third_party/cares/cares/ares_mkquery.c',
-  'third_party/cares/cares/ares_nowarn.c',
-  'third_party/cares/cares/ares_options.c',
-  'third_party/cares/cares/ares_parse_a_reply.c',
-  'third_party/cares/cares/ares_parse_aaaa_reply.c',
-  'third_party/cares/cares/ares_parse_mx_reply.c',
-  'third_party/cares/cares/ares_parse_naptr_reply.c',
-  'third_party/cares/cares/ares_parse_ns_reply.c',
-  'third_party/cares/cares/ares_parse_ptr_reply.c',
-  'third_party/cares/cares/ares_parse_soa_reply.c',
-  'third_party/cares/cares/ares_parse_srv_reply.c',
-  'third_party/cares/cares/ares_parse_txt_reply.c',
-  'third_party/cares/cares/ares_platform.c',
-  'third_party/cares/cares/ares_process.c',
-  'third_party/cares/cares/ares_query.c',
-  'third_party/cares/cares/ares_search.c',
-  'third_party/cares/cares/ares_send.c',
-  'third_party/cares/cares/ares_strcasecmp.c',
-  'third_party/cares/cares/ares_strdup.c',
-  'third_party/cares/cares/ares_strerror.c',
-  'third_party/cares/cares/ares_timeout.c',
-  'third_party/cares/cares/ares_version.c',
-  'third_party/cares/cares/ares_writev.c',
-  'third_party/cares/cares/bitncmp.c',
-  'third_party/cares/cares/inet_net_pton.c',
-  'third_party/cares/cares/inet_ntop.c',
-  'third_party/cares/cares/windows_port.c',
+    'src/core/lib/profiling/basic_timers.cc',
+    'src/core/lib/profiling/stap_timers.cc',
+    'src/core/lib/support/alloc.cc',
+    'src/core/lib/support/arena.cc',
+    'src/core/lib/support/atm.cc',
+    'src/core/lib/support/avl.cc',
+    'src/core/lib/support/cmdline.cc',
+    'src/core/lib/support/cpu_iphone.cc',
+    'src/core/lib/support/cpu_linux.cc',
+    'src/core/lib/support/cpu_posix.cc',
+    'src/core/lib/support/cpu_windows.cc',
+    'src/core/lib/support/env_linux.cc',
+    'src/core/lib/support/env_posix.cc',
+    'src/core/lib/support/env_windows.cc',
+    'src/core/lib/support/fork.cc',
+    'src/core/lib/support/host_port.cc',
+    'src/core/lib/support/log.cc',
+    'src/core/lib/support/log_android.cc',
+    'src/core/lib/support/log_linux.cc',
+    'src/core/lib/support/log_posix.cc',
+    'src/core/lib/support/log_windows.cc',
+    'src/core/lib/support/mpscq.cc',
+    'src/core/lib/support/murmur_hash.cc',
+    'src/core/lib/support/string.cc',
+    'src/core/lib/support/string_posix.cc',
+    'src/core/lib/support/string_util_windows.cc',
+    'src/core/lib/support/string_windows.cc',
+    'src/core/lib/support/subprocess_posix.cc',
+    'src/core/lib/support/subprocess_windows.cc',
+    'src/core/lib/support/sync.cc',
+    'src/core/lib/support/sync_posix.cc',
+    'src/core/lib/support/sync_windows.cc',
+    'src/core/lib/support/thd.cc',
+    'src/core/lib/support/thd_posix.cc',
+    'src/core/lib/support/thd_windows.cc',
+    'src/core/lib/support/time.cc',
+    'src/core/lib/support/time_posix.cc',
+    'src/core/lib/support/time_precise.cc',
+    'src/core/lib/support/time_windows.cc',
+    'src/core/lib/support/tls_pthread.cc',
+    'src/core/lib/support/tmpfile_msys.cc',
+    'src/core/lib/support/tmpfile_posix.cc',
+    'src/core/lib/support/tmpfile_windows.cc',
+    'src/core/lib/support/wrap_memcpy.cc',
+    'src/core/lib/surface/init.cc',
+    'src/core/lib/backoff/backoff.cc',
+    'src/core/lib/channel/channel_args.cc',
+    'src/core/lib/channel/channel_stack.cc',
+    'src/core/lib/channel/channel_stack_builder.cc',
+    'src/core/lib/channel/connected_channel.cc',
+    'src/core/lib/channel/handshaker.cc',
+    'src/core/lib/channel/handshaker_factory.cc',
+    'src/core/lib/channel/handshaker_registry.cc',
+    'src/core/lib/compression/compression.cc',
+    'src/core/lib/compression/message_compress.cc',
+    'src/core/lib/compression/stream_compression.cc',
+    'src/core/lib/compression/stream_compression_gzip.cc',
+    'src/core/lib/compression/stream_compression_identity.cc',
+    'src/core/lib/debug/stats.cc',
+    'src/core/lib/debug/stats_data.cc',
+    'src/core/lib/http/format_request.cc',
+    'src/core/lib/http/httpcli.cc',
+    'src/core/lib/http/parser.cc',
+    'src/core/lib/iomgr/call_combiner.cc',
+    'src/core/lib/iomgr/combiner.cc',
+    'src/core/lib/iomgr/endpoint.cc',
+    'src/core/lib/iomgr/endpoint_pair_posix.cc',
+    'src/core/lib/iomgr/endpoint_pair_uv.cc',
+    'src/core/lib/iomgr/endpoint_pair_windows.cc',
+    'src/core/lib/iomgr/error.cc',
+    'src/core/lib/iomgr/ev_epoll1_linux.cc',
+    'src/core/lib/iomgr/ev_epollex_linux.cc',
+    'src/core/lib/iomgr/ev_epollsig_linux.cc',
+    'src/core/lib/iomgr/ev_poll_posix.cc',
+    'src/core/lib/iomgr/ev_posix.cc',
+    'src/core/lib/iomgr/ev_windows.cc',
+    'src/core/lib/iomgr/exec_ctx.cc',
+    'src/core/lib/iomgr/executor.cc',
+    'src/core/lib/iomgr/fork_posix.cc',
+    'src/core/lib/iomgr/fork_windows.cc',
+    'src/core/lib/iomgr/gethostname_fallback.cc',
+    'src/core/lib/iomgr/gethostname_host_name_max.cc',
+    'src/core/lib/iomgr/gethostname_sysconf.cc',
+    'src/core/lib/iomgr/iocp_windows.cc',
+    'src/core/lib/iomgr/iomgr.cc',
+    'src/core/lib/iomgr/iomgr_posix.cc',
+    'src/core/lib/iomgr/iomgr_uv.cc',
+    'src/core/lib/iomgr/iomgr_windows.cc',
+    'src/core/lib/iomgr/is_epollexclusive_available.cc',
+    'src/core/lib/iomgr/load_file.cc',
+    'src/core/lib/iomgr/lockfree_event.cc',
+    'src/core/lib/iomgr/network_status_tracker.cc',
+    'src/core/lib/iomgr/polling_entity.cc',
+    'src/core/lib/iomgr/pollset_set_uv.cc',
+    'src/core/lib/iomgr/pollset_set_windows.cc',
+    'src/core/lib/iomgr/pollset_uv.cc',
+    'src/core/lib/iomgr/pollset_windows.cc',
+    'src/core/lib/iomgr/resolve_address_posix.cc',
+    'src/core/lib/iomgr/resolve_address_uv.cc',
+    'src/core/lib/iomgr/resolve_address_windows.cc',
+    'src/core/lib/iomgr/resource_quota.cc',
+    'src/core/lib/iomgr/sockaddr_utils.cc',
+    'src/core/lib/iomgr/socket_factory_posix.cc',
+    'src/core/lib/iomgr/socket_mutator.cc',
+    'src/core/lib/iomgr/socket_utils_common_posix.cc',
+    'src/core/lib/iomgr/socket_utils_linux.cc',
+    'src/core/lib/iomgr/socket_utils_posix.cc',
+    'src/core/lib/iomgr/socket_utils_uv.cc',
+    'src/core/lib/iomgr/socket_utils_windows.cc',
+    'src/core/lib/iomgr/socket_windows.cc',
+    'src/core/lib/iomgr/tcp_client_posix.cc',
+    'src/core/lib/iomgr/tcp_client_uv.cc',
+    'src/core/lib/iomgr/tcp_client_windows.cc',
+    'src/core/lib/iomgr/tcp_posix.cc',
+    'src/core/lib/iomgr/tcp_server_posix.cc',
+    'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
+    'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
+    'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
+    'src/core/lib/iomgr/tcp_server_uv.cc',
+    'src/core/lib/iomgr/tcp_server_windows.cc',
+    'src/core/lib/iomgr/tcp_uv.cc',
+    'src/core/lib/iomgr/tcp_windows.cc',
+    'src/core/lib/iomgr/time_averaged_stats.cc',
+    'src/core/lib/iomgr/timer_generic.cc',
+    'src/core/lib/iomgr/timer_heap.cc',
+    'src/core/lib/iomgr/timer_manager.cc',
+    'src/core/lib/iomgr/timer_uv.cc',
+    'src/core/lib/iomgr/udp_server.cc',
+    'src/core/lib/iomgr/unix_sockets_posix.cc',
+    'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
+    'src/core/lib/iomgr/wakeup_fd_cv.cc',
+    'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
+    'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
+    'src/core/lib/iomgr/wakeup_fd_pipe.cc',
+    'src/core/lib/iomgr/wakeup_fd_posix.cc',
+    'src/core/lib/json/json.cc',
+    'src/core/lib/json/json_reader.cc',
+    'src/core/lib/json/json_string.cc',
+    'src/core/lib/json/json_writer.cc',
+    'src/core/lib/slice/b64.cc',
+    'src/core/lib/slice/percent_encoding.cc',
+    'src/core/lib/slice/slice.cc',
+    'src/core/lib/slice/slice_buffer.cc',
+    'src/core/lib/slice/slice_hash_table.cc',
+    'src/core/lib/slice/slice_intern.cc',
+    'src/core/lib/slice/slice_string_helpers.cc',
+    'src/core/lib/surface/alarm.cc',
+    'src/core/lib/surface/api_trace.cc',
+    'src/core/lib/surface/byte_buffer.cc',
+    'src/core/lib/surface/byte_buffer_reader.cc',
+    'src/core/lib/surface/call.cc',
+    'src/core/lib/surface/call_details.cc',
+    'src/core/lib/surface/call_log_batch.cc',
+    'src/core/lib/surface/channel.cc',
+    'src/core/lib/surface/channel_init.cc',
+    'src/core/lib/surface/channel_ping.cc',
+    'src/core/lib/surface/channel_stack_type.cc',
+    'src/core/lib/surface/completion_queue.cc',
+    'src/core/lib/surface/completion_queue_factory.cc',
+    'src/core/lib/surface/event_string.cc',
+    'src/core/lib/surface/lame_client.cc',
+    'src/core/lib/surface/metadata_array.cc',
+    'src/core/lib/surface/server.cc',
+    'src/core/lib/surface/validate_metadata.cc',
+    'src/core/lib/surface/version.cc',
+    'src/core/lib/transport/bdp_estimator.cc',
+    'src/core/lib/transport/byte_stream.cc',
+    'src/core/lib/transport/connectivity_state.cc',
+    'src/core/lib/transport/error_utils.cc',
+    'src/core/lib/transport/metadata.cc',
+    'src/core/lib/transport/metadata_batch.cc',
+    'src/core/lib/transport/pid_controller.cc',
+    'src/core/lib/transport/service_config.cc',
+    'src/core/lib/transport/static_metadata.cc',
+    'src/core/lib/transport/status_conversion.cc',
+    'src/core/lib/transport/timeout_encoding.cc',
+    'src/core/lib/transport/transport.cc',
+    'src/core/lib/transport/transport_op_string.cc',
+    'src/core/lib/debug/trace.cc',
+    'src/core/ext/transport/chttp2/server/secure/server_secure_chttp2.cc',
+    'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
+    'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
+    'src/core/ext/transport/chttp2/transport/chttp2_plugin.cc',
+    'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
+    'src/core/ext/transport/chttp2/transport/flow_control.cc',
+    'src/core/ext/transport/chttp2/transport/frame_data.cc',
+    'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
+    'src/core/ext/transport/chttp2/transport/frame_ping.cc',
+    'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
+    'src/core/ext/transport/chttp2/transport/frame_settings.cc',
+    'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
+    'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
+    'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
+    'src/core/ext/transport/chttp2/transport/hpack_table.cc',
+    'src/core/ext/transport/chttp2/transport/http2_settings.cc',
+    'src/core/ext/transport/chttp2/transport/huffsyms.cc',
+    'src/core/ext/transport/chttp2/transport/incoming_metadata.cc',
+    'src/core/ext/transport/chttp2/transport/parsing.cc',
+    'src/core/ext/transport/chttp2/transport/stream_lists.cc',
+    'src/core/ext/transport/chttp2/transport/stream_map.cc',
+    'src/core/ext/transport/chttp2/transport/varint.cc',
+    'src/core/ext/transport/chttp2/transport/writing.cc',
+    'src/core/ext/transport/chttp2/alpn/alpn.cc',
+    'src/core/ext/filters/http/client/http_client_filter.cc',
+    'src/core/ext/filters/http/http_filters_plugin.cc',
+    'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
+    'src/core/ext/filters/http/server/http_server_filter.cc',
+    'src/core/lib/http/httpcli_security_connector.cc',
+    'src/core/lib/security/context/security_context.cc',
+    'src/core/lib/security/credentials/composite/composite_credentials.cc',
+    'src/core/lib/security/credentials/credentials.cc',
+    'src/core/lib/security/credentials/credentials_metadata.cc',
+    'src/core/lib/security/credentials/fake/fake_credentials.cc',
+    'src/core/lib/security/credentials/google_default/credentials_generic.cc',
+    'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
+    'src/core/lib/security/credentials/iam/iam_credentials.cc',
+    'src/core/lib/security/credentials/jwt/json_token.cc',
+    'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
+    'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
+    'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
+    'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
+    'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
+    'src/core/lib/security/transport/client_auth_filter.cc',
+    'src/core/lib/security/transport/lb_targets_info.cc',
+    'src/core/lib/security/transport/secure_endpoint.cc',
+    'src/core/lib/security/transport/security_connector.cc',
+    'src/core/lib/security/transport/security_handshaker.cc',
+    'src/core/lib/security/transport/server_auth_filter.cc',
+    'src/core/lib/security/transport/tsi_error.cc',
+    'src/core/lib/security/util/json_util.cc',
+    'src/core/lib/surface/init_secure.cc',
+    'src/core/tsi/fake_transport_security.cc',
+    'src/core/tsi/gts_transport_security.cc',
+    'src/core/tsi/ssl_transport_security.cc',
+    'src/core/tsi/transport_security_grpc.cc',
+    'src/core/tsi/transport_security.cc',
+    'src/core/tsi/transport_security_adapter.cc',
+    'src/core/ext/transport/chttp2/server/chttp2_server.cc',
+    'src/core/ext/transport/chttp2/client/secure/secure_channel_create.cc',
+    'src/core/ext/filters/client_channel/backup_poller.cc',
+    'src/core/ext/filters/client_channel/channel_connectivity.cc',
+    'src/core/ext/filters/client_channel/client_channel.cc',
+    'src/core/ext/filters/client_channel/client_channel_factory.cc',
+    'src/core/ext/filters/client_channel/client_channel_plugin.cc',
+    'src/core/ext/filters/client_channel/connector.cc',
+    'src/core/ext/filters/client_channel/http_connect_handshaker.cc',
+    'src/core/ext/filters/client_channel/http_proxy.cc',
+    'src/core/ext/filters/client_channel/lb_policy.cc',
+    'src/core/ext/filters/client_channel/lb_policy_factory.cc',
+    'src/core/ext/filters/client_channel/lb_policy_registry.cc',
+    'src/core/ext/filters/client_channel/parse_address.cc',
+    'src/core/ext/filters/client_channel/proxy_mapper.cc',
+    'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
+    'src/core/ext/filters/client_channel/resolver.cc',
+    'src/core/ext/filters/client_channel/resolver_factory.cc',
+    'src/core/ext/filters/client_channel/resolver_registry.cc',
+    'src/core/ext/filters/client_channel/retry_throttle.cc',
+    'src/core/ext/filters/client_channel/subchannel.cc',
+    'src/core/ext/filters/client_channel/subchannel_index.cc',
+    'src/core/ext/filters/client_channel/uri_parser.cc',
+    'src/core/ext/filters/deadline/deadline_filter.cc',
+    'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
+    'src/core/ext/transport/chttp2/server/insecure/server_chttp2.cc',
+    'src/core/ext/transport/chttp2/server/insecure/server_chttp2_posix.cc',
+    'src/core/ext/transport/chttp2/client/insecure/channel_create.cc',
+    'src/core/ext/transport/chttp2/client/insecure/channel_create_posix.cc',
+    'src/core/ext/transport/inproc/inproc_plugin.cc',
+    'src/core/ext/transport/inproc/inproc_transport.cc',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_channel_secure.cc',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
+    'third_party/nanopb/pb_common.c',
+    'third_party/nanopb/pb_decode.c',
+    'third_party/nanopb/pb_encode.c',
+    'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
+    'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
+    'src/core/ext/filters/client_channel/lb_policy/subchannel_list.cc',
+    'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
+    'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
+    'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
+    'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
+    'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_fallback.cc',
+    'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
+    'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
+    'src/core/ext/filters/load_reporting/server_load_reporting_filter.cc',
+    'src/core/ext/filters/load_reporting/server_load_reporting_plugin.cc',
+    'src/core/ext/census/grpc_context.cc',
+    'src/core/ext/filters/max_age/max_age_filter.cc',
+    'src/core/ext/filters/message_size/message_size_filter.cc',
+    'src/core/ext/filters/workarounds/workaround_cronet_compression_filter.cc',
+    'src/core/ext/filters/workarounds/workaround_utils.cc',
+    'src/core/plugin_registry/grpc_plugin_registry.cc',
+    'src/boringssl/err_data.c',
+    'third_party/boringssl/crypto/aes/aes.c',
+    'third_party/boringssl/crypto/aes/key_wrap.c',
+    'third_party/boringssl/crypto/aes/mode_wrappers.c',
+    'third_party/boringssl/crypto/asn1/a_bitstr.c',
+    'third_party/boringssl/crypto/asn1/a_bool.c',
+    'third_party/boringssl/crypto/asn1/a_d2i_fp.c',
+    'third_party/boringssl/crypto/asn1/a_dup.c',
+    'third_party/boringssl/crypto/asn1/a_enum.c',
+    'third_party/boringssl/crypto/asn1/a_gentm.c',
+    'third_party/boringssl/crypto/asn1/a_i2d_fp.c',
+    'third_party/boringssl/crypto/asn1/a_int.c',
+    'third_party/boringssl/crypto/asn1/a_mbstr.c',
+    'third_party/boringssl/crypto/asn1/a_object.c',
+    'third_party/boringssl/crypto/asn1/a_octet.c',
+    'third_party/boringssl/crypto/asn1/a_print.c',
+    'third_party/boringssl/crypto/asn1/a_strnid.c',
+    'third_party/boringssl/crypto/asn1/a_time.c',
+    'third_party/boringssl/crypto/asn1/a_type.c',
+    'third_party/boringssl/crypto/asn1/a_utctm.c',
+    'third_party/boringssl/crypto/asn1/a_utf8.c',
+    'third_party/boringssl/crypto/asn1/asn1_lib.c',
+    'third_party/boringssl/crypto/asn1/asn1_par.c',
+    'third_party/boringssl/crypto/asn1/asn_pack.c',
+    'third_party/boringssl/crypto/asn1/f_enum.c',
+    'third_party/boringssl/crypto/asn1/f_int.c',
+    'third_party/boringssl/crypto/asn1/f_string.c',
+    'third_party/boringssl/crypto/asn1/t_bitst.c',
+    'third_party/boringssl/crypto/asn1/tasn_dec.c',
+    'third_party/boringssl/crypto/asn1/tasn_enc.c',
+    'third_party/boringssl/crypto/asn1/tasn_fre.c',
+    'third_party/boringssl/crypto/asn1/tasn_new.c',
+    'third_party/boringssl/crypto/asn1/tasn_typ.c',
+    'third_party/boringssl/crypto/asn1/tasn_utl.c',
+    'third_party/boringssl/crypto/asn1/time_support.c',
+    'third_party/boringssl/crypto/asn1/x_bignum.c',
+    'third_party/boringssl/crypto/asn1/x_long.c',
+    'third_party/boringssl/crypto/base64/base64.c',
+    'third_party/boringssl/crypto/bio/bio.c',
+    'third_party/boringssl/crypto/bio/bio_mem.c',
+    'third_party/boringssl/crypto/bio/connect.c',
+    'third_party/boringssl/crypto/bio/fd.c',
+    'third_party/boringssl/crypto/bio/file.c',
+    'third_party/boringssl/crypto/bio/hexdump.c',
+    'third_party/boringssl/crypto/bio/pair.c',
+    'third_party/boringssl/crypto/bio/printf.c',
+    'third_party/boringssl/crypto/bio/socket.c',
+    'third_party/boringssl/crypto/bio/socket_helper.c',
+    'third_party/boringssl/crypto/bn/add.c',
+    'third_party/boringssl/crypto/bn/asm/x86_64-gcc.c',
+    'third_party/boringssl/crypto/bn/bn.c',
+    'third_party/boringssl/crypto/bn/bn_asn1.c',
+    'third_party/boringssl/crypto/bn/cmp.c',
+    'third_party/boringssl/crypto/bn/convert.c',
+    'third_party/boringssl/crypto/bn/ctx.c',
+    'third_party/boringssl/crypto/bn/div.c',
+    'third_party/boringssl/crypto/bn/exponentiation.c',
+    'third_party/boringssl/crypto/bn/gcd.c',
+    'third_party/boringssl/crypto/bn/generic.c',
+    'third_party/boringssl/crypto/bn/kronecker.c',
+    'third_party/boringssl/crypto/bn/montgomery.c',
+    'third_party/boringssl/crypto/bn/montgomery_inv.c',
+    'third_party/boringssl/crypto/bn/mul.c',
+    'third_party/boringssl/crypto/bn/prime.c',
+    'third_party/boringssl/crypto/bn/random.c',
+    'third_party/boringssl/crypto/bn/rsaz_exp.c',
+    'third_party/boringssl/crypto/bn/shift.c',
+    'third_party/boringssl/crypto/bn/sqrt.c',
+    'third_party/boringssl/crypto/buf/buf.c',
+    'third_party/boringssl/crypto/bytestring/asn1_compat.c',
+    'third_party/boringssl/crypto/bytestring/ber.c',
+    'third_party/boringssl/crypto/bytestring/cbb.c',
+    'third_party/boringssl/crypto/bytestring/cbs.c',
+    'third_party/boringssl/crypto/chacha/chacha.c',
+    'third_party/boringssl/crypto/cipher/aead.c',
+    'third_party/boringssl/crypto/cipher/cipher.c',
+    'third_party/boringssl/crypto/cipher/derive_key.c',
+    'third_party/boringssl/crypto/cipher/e_aes.c',
+    'third_party/boringssl/crypto/cipher/e_chacha20poly1305.c',
+    'third_party/boringssl/crypto/cipher/e_des.c',
+    'third_party/boringssl/crypto/cipher/e_null.c',
+    'third_party/boringssl/crypto/cipher/e_rc2.c',
+    'third_party/boringssl/crypto/cipher/e_rc4.c',
+    'third_party/boringssl/crypto/cipher/e_ssl3.c',
+    'third_party/boringssl/crypto/cipher/e_tls.c',
+    'third_party/boringssl/crypto/cipher/tls_cbc.c',
+    'third_party/boringssl/crypto/cmac/cmac.c',
+    'third_party/boringssl/crypto/conf/conf.c',
+    'third_party/boringssl/crypto/cpu-aarch64-linux.c',
+    'third_party/boringssl/crypto/cpu-arm-linux.c',
+    'third_party/boringssl/crypto/cpu-arm.c',
+    'third_party/boringssl/crypto/cpu-intel.c',
+    'third_party/boringssl/crypto/cpu-ppc64le.c',
+    'third_party/boringssl/crypto/crypto.c',
+    'third_party/boringssl/crypto/curve25519/curve25519.c',
+    'third_party/boringssl/crypto/curve25519/spake25519.c',
+    'third_party/boringssl/crypto/curve25519/x25519-x86_64.c',
+    'third_party/boringssl/crypto/des/des.c',
+    'third_party/boringssl/crypto/dh/check.c',
+    'third_party/boringssl/crypto/dh/dh.c',
+    'third_party/boringssl/crypto/dh/dh_asn1.c',
+    'third_party/boringssl/crypto/dh/params.c',
+    'third_party/boringssl/crypto/digest/digest.c',
+    'third_party/boringssl/crypto/digest/digests.c',
+    'third_party/boringssl/crypto/dsa/dsa.c',
+    'third_party/boringssl/crypto/dsa/dsa_asn1.c',
+    'third_party/boringssl/crypto/ec/ec.c',
+    'third_party/boringssl/crypto/ec/ec_asn1.c',
+    'third_party/boringssl/crypto/ec/ec_key.c',
+    'third_party/boringssl/crypto/ec/ec_montgomery.c',
+    'third_party/boringssl/crypto/ec/oct.c',
+    'third_party/boringssl/crypto/ec/p224-64.c',
+    'third_party/boringssl/crypto/ec/p256-64.c',
+    'third_party/boringssl/crypto/ec/p256-x86_64.c',
+    'third_party/boringssl/crypto/ec/simple.c',
+    'third_party/boringssl/crypto/ec/util-64.c',
+    'third_party/boringssl/crypto/ec/wnaf.c',
+    'third_party/boringssl/crypto/ecdh/ecdh.c',
+    'third_party/boringssl/crypto/ecdsa/ecdsa.c',
+    'third_party/boringssl/crypto/ecdsa/ecdsa_asn1.c',
+    'third_party/boringssl/crypto/engine/engine.c',
+    'third_party/boringssl/crypto/err/err.c',
+    'third_party/boringssl/crypto/evp/digestsign.c',
+    'third_party/boringssl/crypto/evp/evp.c',
+    'third_party/boringssl/crypto/evp/evp_asn1.c',
+    'third_party/boringssl/crypto/evp/evp_ctx.c',
+    'third_party/boringssl/crypto/evp/p_dsa_asn1.c',
+    'third_party/boringssl/crypto/evp/p_ec.c',
+    'third_party/boringssl/crypto/evp/p_ec_asn1.c',
+    'third_party/boringssl/crypto/evp/p_rsa.c',
+    'third_party/boringssl/crypto/evp/p_rsa_asn1.c',
+    'third_party/boringssl/crypto/evp/pbkdf.c',
+    'third_party/boringssl/crypto/evp/print.c',
+    'third_party/boringssl/crypto/evp/sign.c',
+    'third_party/boringssl/crypto/ex_data.c',
+    'third_party/boringssl/crypto/hkdf/hkdf.c',
+    'third_party/boringssl/crypto/hmac/hmac.c',
+    'third_party/boringssl/crypto/lhash/lhash.c',
+    'third_party/boringssl/crypto/md4/md4.c',
+    'third_party/boringssl/crypto/md5/md5.c',
+    'third_party/boringssl/crypto/mem.c',
+    'third_party/boringssl/crypto/modes/cbc.c',
+    'third_party/boringssl/crypto/modes/cfb.c',
+    'third_party/boringssl/crypto/modes/ctr.c',
+    'third_party/boringssl/crypto/modes/gcm.c',
+    'third_party/boringssl/crypto/modes/ofb.c',
+    'third_party/boringssl/crypto/modes/polyval.c',
+    'third_party/boringssl/crypto/obj/obj.c',
+    'third_party/boringssl/crypto/obj/obj_xref.c',
+    'third_party/boringssl/crypto/pem/pem_all.c',
+    'third_party/boringssl/crypto/pem/pem_info.c',
+    'third_party/boringssl/crypto/pem/pem_lib.c',
+    'third_party/boringssl/crypto/pem/pem_oth.c',
+    'third_party/boringssl/crypto/pem/pem_pk8.c',
+    'third_party/boringssl/crypto/pem/pem_pkey.c',
+    'third_party/boringssl/crypto/pem/pem_x509.c',
+    'third_party/boringssl/crypto/pem/pem_xaux.c',
+    'third_party/boringssl/crypto/pkcs8/p5_pbev2.c',
+    'third_party/boringssl/crypto/pkcs8/p8_pkey.c',
+    'third_party/boringssl/crypto/pkcs8/pkcs8.c',
+    'third_party/boringssl/crypto/poly1305/poly1305.c',
+    'third_party/boringssl/crypto/poly1305/poly1305_arm.c',
+    'third_party/boringssl/crypto/poly1305/poly1305_vec.c',
+    'third_party/boringssl/crypto/pool/pool.c',
+    'third_party/boringssl/crypto/rand/deterministic.c',
+    'third_party/boringssl/crypto/rand/fuchsia.c',
+    'third_party/boringssl/crypto/rand/rand.c',
+    'third_party/boringssl/crypto/rand/urandom.c',
+    'third_party/boringssl/crypto/rand/windows.c',
+    'third_party/boringssl/crypto/rc4/rc4.c',
+    'third_party/boringssl/crypto/refcount_c11.c',
+    'third_party/boringssl/crypto/refcount_lock.c',
+    'third_party/boringssl/crypto/rsa/blinding.c',
+    'third_party/boringssl/crypto/rsa/padding.c',
+    'third_party/boringssl/crypto/rsa/rsa.c',
+    'third_party/boringssl/crypto/rsa/rsa_asn1.c',
+    'third_party/boringssl/crypto/rsa/rsa_impl.c',
+    'third_party/boringssl/crypto/sha/sha1-altivec.c',
+    'third_party/boringssl/crypto/sha/sha1.c',
+    'third_party/boringssl/crypto/sha/sha256.c',
+    'third_party/boringssl/crypto/sha/sha512.c',
+    'third_party/boringssl/crypto/stack/stack.c',
+    'third_party/boringssl/crypto/thread.c',
+    'third_party/boringssl/crypto/thread_none.c',
+    'third_party/boringssl/crypto/thread_pthread.c',
+    'third_party/boringssl/crypto/thread_win.c',
+    'third_party/boringssl/crypto/x509/a_digest.c',
+    'third_party/boringssl/crypto/x509/a_sign.c',
+    'third_party/boringssl/crypto/x509/a_strex.c',
+    'third_party/boringssl/crypto/x509/a_verify.c',
+    'third_party/boringssl/crypto/x509/algorithm.c',
+    'third_party/boringssl/crypto/x509/asn1_gen.c',
+    'third_party/boringssl/crypto/x509/by_dir.c',
+    'third_party/boringssl/crypto/x509/by_file.c',
+    'third_party/boringssl/crypto/x509/i2d_pr.c',
+    'third_party/boringssl/crypto/x509/pkcs7.c',
+    'third_party/boringssl/crypto/x509/rsa_pss.c',
+    'third_party/boringssl/crypto/x509/t_crl.c',
+    'third_party/boringssl/crypto/x509/t_req.c',
+    'third_party/boringssl/crypto/x509/t_x509.c',
+    'third_party/boringssl/crypto/x509/t_x509a.c',
+    'third_party/boringssl/crypto/x509/x509.c',
+    'third_party/boringssl/crypto/x509/x509_att.c',
+    'third_party/boringssl/crypto/x509/x509_cmp.c',
+    'third_party/boringssl/crypto/x509/x509_d2.c',
+    'third_party/boringssl/crypto/x509/x509_def.c',
+    'third_party/boringssl/crypto/x509/x509_ext.c',
+    'third_party/boringssl/crypto/x509/x509_lu.c',
+    'third_party/boringssl/crypto/x509/x509_obj.c',
+    'third_party/boringssl/crypto/x509/x509_r2x.c',
+    'third_party/boringssl/crypto/x509/x509_req.c',
+    'third_party/boringssl/crypto/x509/x509_set.c',
+    'third_party/boringssl/crypto/x509/x509_trs.c',
+    'third_party/boringssl/crypto/x509/x509_txt.c',
+    'third_party/boringssl/crypto/x509/x509_v3.c',
+    'third_party/boringssl/crypto/x509/x509_vfy.c',
+    'third_party/boringssl/crypto/x509/x509_vpm.c',
+    'third_party/boringssl/crypto/x509/x509cset.c',
+    'third_party/boringssl/crypto/x509/x509name.c',
+    'third_party/boringssl/crypto/x509/x509rset.c',
+    'third_party/boringssl/crypto/x509/x509spki.c',
+    'third_party/boringssl/crypto/x509/x509type.c',
+    'third_party/boringssl/crypto/x509/x_algor.c',
+    'third_party/boringssl/crypto/x509/x_all.c',
+    'third_party/boringssl/crypto/x509/x_attrib.c',
+    'third_party/boringssl/crypto/x509/x_crl.c',
+    'third_party/boringssl/crypto/x509/x_exten.c',
+    'third_party/boringssl/crypto/x509/x_info.c',
+    'third_party/boringssl/crypto/x509/x_name.c',
+    'third_party/boringssl/crypto/x509/x_pkey.c',
+    'third_party/boringssl/crypto/x509/x_pubkey.c',
+    'third_party/boringssl/crypto/x509/x_req.c',
+    'third_party/boringssl/crypto/x509/x_sig.c',
+    'third_party/boringssl/crypto/x509/x_spki.c',
+    'third_party/boringssl/crypto/x509/x_val.c',
+    'third_party/boringssl/crypto/x509/x_x509.c',
+    'third_party/boringssl/crypto/x509/x_x509a.c',
+    'third_party/boringssl/crypto/x509v3/pcy_cache.c',
+    'third_party/boringssl/crypto/x509v3/pcy_data.c',
+    'third_party/boringssl/crypto/x509v3/pcy_lib.c',
+    'third_party/boringssl/crypto/x509v3/pcy_map.c',
+    'third_party/boringssl/crypto/x509v3/pcy_node.c',
+    'third_party/boringssl/crypto/x509v3/pcy_tree.c',
+    'third_party/boringssl/crypto/x509v3/v3_akey.c',
+    'third_party/boringssl/crypto/x509v3/v3_akeya.c',
+    'third_party/boringssl/crypto/x509v3/v3_alt.c',
+    'third_party/boringssl/crypto/x509v3/v3_bcons.c',
+    'third_party/boringssl/crypto/x509v3/v3_bitst.c',
+    'third_party/boringssl/crypto/x509v3/v3_conf.c',
+    'third_party/boringssl/crypto/x509v3/v3_cpols.c',
+    'third_party/boringssl/crypto/x509v3/v3_crld.c',
+    'third_party/boringssl/crypto/x509v3/v3_enum.c',
+    'third_party/boringssl/crypto/x509v3/v3_extku.c',
+    'third_party/boringssl/crypto/x509v3/v3_genn.c',
+    'third_party/boringssl/crypto/x509v3/v3_ia5.c',
+    'third_party/boringssl/crypto/x509v3/v3_info.c',
+    'third_party/boringssl/crypto/x509v3/v3_int.c',
+    'third_party/boringssl/crypto/x509v3/v3_lib.c',
+    'third_party/boringssl/crypto/x509v3/v3_ncons.c',
+    'third_party/boringssl/crypto/x509v3/v3_pci.c',
+    'third_party/boringssl/crypto/x509v3/v3_pcia.c',
+    'third_party/boringssl/crypto/x509v3/v3_pcons.c',
+    'third_party/boringssl/crypto/x509v3/v3_pku.c',
+    'third_party/boringssl/crypto/x509v3/v3_pmaps.c',
+    'third_party/boringssl/crypto/x509v3/v3_prn.c',
+    'third_party/boringssl/crypto/x509v3/v3_purp.c',
+    'third_party/boringssl/crypto/x509v3/v3_skey.c',
+    'third_party/boringssl/crypto/x509v3/v3_sxnet.c',
+    'third_party/boringssl/crypto/x509v3/v3_utl.c',
+    'third_party/boringssl/ssl/bio_ssl.c',
+    'third_party/boringssl/ssl/custom_extensions.c',
+    'third_party/boringssl/ssl/d1_both.c',
+    'third_party/boringssl/ssl/d1_lib.c',
+    'third_party/boringssl/ssl/d1_pkt.c',
+    'third_party/boringssl/ssl/d1_srtp.c',
+    'third_party/boringssl/ssl/dtls_method.c',
+    'third_party/boringssl/ssl/dtls_record.c',
+    'third_party/boringssl/ssl/handshake_client.c',
+    'third_party/boringssl/ssl/handshake_server.c',
+    'third_party/boringssl/ssl/s3_both.c',
+    'third_party/boringssl/ssl/s3_lib.c',
+    'third_party/boringssl/ssl/s3_pkt.c',
+    'third_party/boringssl/ssl/ssl_aead_ctx.c',
+    'third_party/boringssl/ssl/ssl_asn1.c',
+    'third_party/boringssl/ssl/ssl_buffer.c',
+    'third_party/boringssl/ssl/ssl_cert.c',
+    'third_party/boringssl/ssl/ssl_cipher.c',
+    'third_party/boringssl/ssl/ssl_ecdh.c',
+    'third_party/boringssl/ssl/ssl_file.c',
+    'third_party/boringssl/ssl/ssl_lib.c',
+    'third_party/boringssl/ssl/ssl_privkey.c',
+    'third_party/boringssl/ssl/ssl_privkey_cc.cc',
+    'third_party/boringssl/ssl/ssl_session.c',
+    'third_party/boringssl/ssl/ssl_stat.c',
+    'third_party/boringssl/ssl/ssl_transcript.c',
+    'third_party/boringssl/ssl/ssl_x509.c',
+    'third_party/boringssl/ssl/t1_enc.c',
+    'third_party/boringssl/ssl/t1_lib.c',
+    'third_party/boringssl/ssl/tls13_both.c',
+    'third_party/boringssl/ssl/tls13_client.c',
+    'third_party/boringssl/ssl/tls13_enc.c',
+    'third_party/boringssl/ssl/tls13_server.c',
+    'third_party/boringssl/ssl/tls_method.c',
+    'third_party/boringssl/ssl/tls_record.c',
+    'third_party/zlib/adler32.c',
+    'third_party/zlib/compress.c',
+    'third_party/zlib/crc32.c',
+    'third_party/zlib/deflate.c',
+    'third_party/zlib/gzclose.c',
+    'third_party/zlib/gzlib.c',
+    'third_party/zlib/gzread.c',
+    'third_party/zlib/gzwrite.c',
+    'third_party/zlib/infback.c',
+    'third_party/zlib/inffast.c',
+    'third_party/zlib/inflate.c',
+    'third_party/zlib/inftrees.c',
+    'third_party/zlib/trees.c',
+    'third_party/zlib/uncompr.c',
+    'third_party/zlib/zutil.c',
+    'third_party/cares/cares/ares__close_sockets.c',
+    'third_party/cares/cares/ares__get_hostent.c',
+    'third_party/cares/cares/ares__read_line.c',
+    'third_party/cares/cares/ares__timeval.c',
+    'third_party/cares/cares/ares_cancel.c',
+    'third_party/cares/cares/ares_create_query.c',
+    'third_party/cares/cares/ares_data.c',
+    'third_party/cares/cares/ares_destroy.c',
+    'third_party/cares/cares/ares_expand_name.c',
+    'third_party/cares/cares/ares_expand_string.c',
+    'third_party/cares/cares/ares_fds.c',
+    'third_party/cares/cares/ares_free_hostent.c',
+    'third_party/cares/cares/ares_free_string.c',
+    'third_party/cares/cares/ares_getenv.c',
+    'third_party/cares/cares/ares_gethostbyaddr.c',
+    'third_party/cares/cares/ares_gethostbyname.c',
+    'third_party/cares/cares/ares_getnameinfo.c',
+    'third_party/cares/cares/ares_getopt.c',
+    'third_party/cares/cares/ares_getsock.c',
+    'third_party/cares/cares/ares_init.c',
+    'third_party/cares/cares/ares_library_init.c',
+    'third_party/cares/cares/ares_llist.c',
+    'third_party/cares/cares/ares_mkquery.c',
+    'third_party/cares/cares/ares_nowarn.c',
+    'third_party/cares/cares/ares_options.c',
+    'third_party/cares/cares/ares_parse_a_reply.c',
+    'third_party/cares/cares/ares_parse_aaaa_reply.c',
+    'third_party/cares/cares/ares_parse_mx_reply.c',
+    'third_party/cares/cares/ares_parse_naptr_reply.c',
+    'third_party/cares/cares/ares_parse_ns_reply.c',
+    'third_party/cares/cares/ares_parse_ptr_reply.c',
+    'third_party/cares/cares/ares_parse_soa_reply.c',
+    'third_party/cares/cares/ares_parse_srv_reply.c',
+    'third_party/cares/cares/ares_parse_txt_reply.c',
+    'third_party/cares/cares/ares_platform.c',
+    'third_party/cares/cares/ares_process.c',
+    'third_party/cares/cares/ares_query.c',
+    'third_party/cares/cares/ares_search.c',
+    'third_party/cares/cares/ares_send.c',
+    'third_party/cares/cares/ares_strcasecmp.c',
+    'third_party/cares/cares/ares_strdup.c',
+    'third_party/cares/cares/ares_strerror.c',
+    'third_party/cares/cares/ares_timeout.c',
+    'third_party/cares/cares/ares_version.c',
+    'third_party/cares/cares/ares_writev.c',
+    'third_party/cares/cares/bitncmp.c',
+    'third_party/cares/cares/inet_net_pton.c',
+    'third_party/cares/cares/inet_ntop.c',
+    'third_party/cares/cares/windows_port.c',
 ]
diff --git a/src/python/grpcio/grpc_version.py b/src/python/grpcio/grpc_version.py
index b6da32e..1fac57b 100644
--- a/src/python/grpcio/grpc_version.py
+++ b/src/python/grpcio/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
 
-VERSION='1.8.3'
+VERSION = '1.9.0.dev0'
diff --git a/src/python/grpcio_health_checking/grpc_version.py b/src/python/grpcio_health_checking/grpc_version.py
index f38db22..5b7e585 100644
--- a/src/python/grpcio_health_checking/grpc_version.py
+++ b/src/python/grpcio_health_checking/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!!
 
-VERSION='1.8.3'
+VERSION = '1.9.0.dev0'
diff --git a/src/python/grpcio_health_checking/health_commands.py b/src/python/grpcio_health_checking/health_commands.py
index 1954735..933f965 100644
--- a/src/python/grpcio_health_checking/health_commands.py
+++ b/src/python/grpcio_health_checking/health_commands.py
@@ -36,9 +36,9 @@
 
     def run(self):
         if os.path.isfile(HEALTH_PROTO):
-            shutil.copyfile(
-                HEALTH_PROTO,
-                os.path.join(ROOT_DIR, 'grpc_health/v1/health.proto'))
+            shutil.copyfile(HEALTH_PROTO,
+                            os.path.join(ROOT_DIR,
+                                         'grpc_health/v1/health.proto'))
 
 
 class BuildPackageProtos(setuptools.Command):
diff --git a/src/python/grpcio_health_checking/setup.py b/src/python/grpcio_health_checking/setup.py
index c105f57..60d309e 100644
--- a/src/python/grpcio_health_checking/setup.py
+++ b/src/python/grpcio_health_checking/setup.py
@@ -56,8 +56,10 @@
     '': '.',
 }
 
-INSTALL_REQUIRES = ('protobuf>=3.5.0.post1',
-                    'grpcio>={version}'.format(version=grpc_version.VERSION),)
+INSTALL_REQUIRES = (
+    'protobuf>=3.5.0.post1',
+    'grpcio>={version}'.format(version=grpc_version.VERSION),
+)
 
 try:
     import health_commands as _health_commands
diff --git a/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
index c598ea9..0c564f1 100644
--- a/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
+++ b/src/python/grpcio_reflection/grpc_reflection/v1alpha/reflection.py
@@ -27,7 +27,8 @@
     return reflection_pb2.ServerReflectionResponse(
         error_response=reflection_pb2.ErrorResponse(
             error_code=grpc.StatusCode.NOT_FOUND.value[0],
-            error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),))
+            error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
+        ))
 
 
 def _file_descriptor_response(descriptor):
@@ -70,7 +71,8 @@
 
     def _file_containing_extension(self, containing_type, extension_number):
         try:
-            message_descriptor = self._pool.FindMessageTypeByName(containing_type)
+            message_descriptor = self._pool.FindMessageTypeByName(
+                containing_type)
             extension_descriptor = self._pool.FindExtensionByNumber(
                 message_descriptor, extension_number)
             descriptor = self._pool.FindFileContainingSymbol(
@@ -82,10 +84,13 @@
 
     def _all_extension_numbers_of_type(self, containing_type):
         try:
-            message_descriptor = self._pool.FindMessageTypeByName(containing_type)
-            extension_numbers = tuple(sorted(
-                extension.number
-                for extension in self._pool.FindAllExtensions(message_descriptor)))
+            message_descriptor = self._pool.FindMessageTypeByName(
+                containing_type)
+            extension_numbers = tuple(
+                sorted(
+                    extension.number
+                    for extension in self._pool.FindAllExtensions(
+                        message_descriptor)))
         except KeyError:
             return _not_found_error()
         else:
@@ -97,10 +102,11 @@
 
     def _list_services(self):
         return reflection_pb2.ServerReflectionResponse(
-            list_services_response=reflection_pb2.ListServiceResponse(service=[
-                reflection_pb2.ServiceResponse(name=service_name)
-                for service_name in self._service_names
-            ]))
+            list_services_response=reflection_pb2.ListServiceResponse(
+                service=[
+                    reflection_pb2.ServiceResponse(name=service_name)
+                    for service_name in self._service_names
+                ]))
 
     def ServerReflectionInfo(self, request_iterator, context):
         # pylint: disable=unused-argument
@@ -124,7 +130,8 @@
                     error_response=reflection_pb2.ErrorResponse(
                         error_code=grpc.StatusCode.INVALID_ARGUMENT.value[0],
                         error_message=grpc.StatusCode.INVALID_ARGUMENT.value[1]
-                        .encode(),))
+                        .encode(),
+                    ))
 
 
 def enable_server_reflection(service_names, server, pool=None):
diff --git a/src/python/grpcio_reflection/grpc_version.py b/src/python/grpcio_reflection/grpc_version.py
index aa51f09..0ad9621 100644
--- a/src/python/grpcio_reflection/grpc_version.py
+++ b/src/python/grpcio_reflection/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
 
-VERSION='1.8.3'
+VERSION = '1.9.0.dev0'
diff --git a/src/python/grpcio_reflection/setup.py b/src/python/grpcio_reflection/setup.py
index 760b893..10c4c38 100644
--- a/src/python/grpcio_reflection/setup.py
+++ b/src/python/grpcio_reflection/setup.py
@@ -57,8 +57,10 @@
     '': '.',
 }
 
-INSTALL_REQUIRES = ('protobuf>=3.5.0.post1',
-                    'grpcio>={version}'.format(version=grpc_version.VERSION),)
+INSTALL_REQUIRES = (
+    'protobuf>=3.5.0.post1',
+    'grpcio>={version}'.format(version=grpc_version.VERSION),
+)
 
 try:
     import reflection_commands as _reflection_commands
diff --git a/src/python/grpcio_testing/grpc_testing/__init__.py b/src/python/grpcio_testing/grpc_testing/__init__.py
index 9942745..e87d0ff 100644
--- a/src/python/grpcio_testing/grpc_testing/__init__.py
+++ b/src/python/grpcio_testing/grpc_testing/__init__.py
@@ -495,8 +495,8 @@
     """A server with which to test a system that services RPCs."""
 
     @abc.abstractmethod
-    def invoke_unary_unary(
-            self, method_descriptor, invocation_metadata, request, timeout):
+    def invoke_unary_unary(self, method_descriptor, invocation_metadata,
+                           request, timeout):
         """Invokes an RPC to be serviced by the system under test.
 
         Args:
@@ -513,8 +513,8 @@
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def invoke_unary_stream(
-            self, method_descriptor, invocation_metadata, request, timeout):
+    def invoke_unary_stream(self, method_descriptor, invocation_metadata,
+                            request, timeout):
         """Invokes an RPC to be serviced by the system under test.
 
         Args:
@@ -531,8 +531,8 @@
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def invoke_stream_unary(
-            self, method_descriptor, invocation_metadata, timeout):
+    def invoke_stream_unary(self, method_descriptor, invocation_metadata,
+                            timeout):
         """Invokes an RPC to be serviced by the system under test.
 
         Args:
@@ -548,8 +548,8 @@
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def invoke_stream_stream(
-            self, method_descriptor, invocation_metadata, timeout):
+    def invoke_stream_stream(self, method_descriptor, invocation_metadata,
+                             timeout):
         """Invokes an RPC to be serviced by the system under test.
 
         Args:
diff --git a/src/python/grpcio_testing/grpc_testing/_channel/__init__.py b/src/python/grpcio_testing/grpc_testing/_channel/__init__.py
index 8011975..7a64cda 100644
--- a/src/python/grpcio_testing/grpc_testing/_channel/__init__.py
+++ b/src/python/grpcio_testing/grpc_testing/_channel/__init__.py
@@ -20,4 +20,6 @@
 # pylint: disable=unused-argument
 def testing_channel(descriptors, time):
     return _channel.TestingChannel(time, _channel_state.State())
+
+
 # pylint: enable=unused-argument
diff --git a/src/python/grpcio_testing/grpc_testing/_channel/_channel.py b/src/python/grpcio_testing/grpc_testing/_channel/_channel.py
index fbd064d..b015b8d 100644
--- a/src/python/grpcio_testing/grpc_testing/_channel/_channel.py
+++ b/src/python/grpcio_testing/grpc_testing/_channel/_channel.py
@@ -32,20 +32,28 @@
     def unsubscribe(self, callback):
         raise NotImplementedError()
 
-    def unary_unary(
-            self, method, request_serializer=None, response_deserializer=None):
+    def unary_unary(self,
+                    method,
+                    request_serializer=None,
+                    response_deserializer=None):
         return _multi_callable.UnaryUnary(method, self._state)
 
-    def unary_stream(
-            self, method, request_serializer=None, response_deserializer=None):
+    def unary_stream(self,
+                     method,
+                     request_serializer=None,
+                     response_deserializer=None):
         return _multi_callable.UnaryStream(method, self._state)
 
-    def stream_unary(
-            self, method, request_serializer=None, response_deserializer=None):
+    def stream_unary(self,
+                     method,
+                     request_serializer=None,
+                     response_deserializer=None):
         return _multi_callable.StreamUnary(method, self._state)
 
-    def stream_stream(
-            self, method, request_serializer=None, response_deserializer=None):
+    def stream_stream(self,
+                      method,
+                      request_serializer=None,
+                      response_deserializer=None):
         return _multi_callable.StreamStream(method, self._state)
 
     def take_unary_unary(self, method_descriptor):
@@ -59,4 +67,6 @@
 
     def take_stream_stream(self, method_descriptor):
         return _channel_rpc.stream_stream(self._state, method_descriptor)
+
+
 # pylint: enable=unused-argument
diff --git a/src/python/grpcio_testing/grpc_testing/_channel/_channel_rpc.py b/src/python/grpcio_testing/grpc_testing/_channel/_channel_rpc.py
index 762b6a0..54499b3 100644
--- a/src/python/grpcio_testing/grpc_testing/_channel/_channel_rpc.py
+++ b/src/python/grpcio_testing/grpc_testing/_channel/_channel_rpc.py
@@ -27,8 +27,8 @@
         self._rpc_state.cancelled()
 
     def terminate(self, response, trailing_metadata, code, details):
-        self._rpc_state.terminate_with_response(
-            response, trailing_metadata, code, details)
+        self._rpc_state.terminate_with_response(response, trailing_metadata,
+                                                code, details)
 
 
 class _UnaryStream(grpc_testing.UnaryStreamChannelRpc):
@@ -67,8 +67,8 @@
         self._rpc_state.cancelled()
 
     def terminate(self, response, trailing_metadata, code, details):
-        self._rpc_state.terminate_with_response(
-            response, trailing_metadata, code, details)
+        self._rpc_state.terminate_with_response(response, trailing_metadata,
+                                                code, details)
 
 
 class _StreamStream(grpc_testing.StreamStreamChannelRpc):
diff --git a/src/python/grpcio_testing/grpc_testing/_channel/_channel_state.py b/src/python/grpcio_testing/grpc_testing/_channel/_channel_state.py
index 569c41d..779d59e 100644
--- a/src/python/grpcio_testing/grpc_testing/_channel/_channel_state.py
+++ b/src/python/grpcio_testing/grpc_testing/_channel/_channel_state.py
@@ -25,11 +25,10 @@
         self._condition = threading.Condition()
         self._rpc_states = collections.defaultdict(list)
 
-    def invoke_rpc(
-            self, method_full_rpc_name, invocation_metadata, requests,
-            requests_closed, timeout):
-        rpc_state = _rpc_state.State(
-            invocation_metadata, requests, requests_closed)
+    def invoke_rpc(self, method_full_rpc_name, invocation_metadata, requests,
+                   requests_closed, timeout):
+        rpc_state = _rpc_state.State(invocation_metadata, requests,
+                                     requests_closed)
         with self._condition:
             self._rpc_states[method_full_rpc_name].append(rpc_state)
             self._condition.notify_all()
diff --git a/src/python/grpcio_testing/grpc_testing/_channel/_multi_callable.py b/src/python/grpcio_testing/grpc_testing/_channel/_multi_callable.py
index fe69257..2b2f576 100644
--- a/src/python/grpcio_testing/grpc_testing/_channel/_multi_callable.py
+++ b/src/python/grpcio_testing/grpc_testing/_channel/_multi_callable.py
@@ -16,6 +16,7 @@
 from grpc_testing import _common
 from grpc_testing._channel import _invocation
 
+
 # All per-call credentials parameters are unused by this test infrastructure.
 # pylint: disable=unused-argument
 class UnaryUnary(grpc.UnaryUnaryMultiCallable):
@@ -51,8 +52,8 @@
 
     def __call__(self, request, timeout=None, metadata=None, credentials=None):
         rpc_handler = self._channel_handler.invoke_rpc(
-            self._method_full_rpc_name,
-            _common.fuss_with_metadata(metadata), [request], True, timeout)
+            self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
+            [request], True, timeout)
         return _invocation.ResponseIteratorCall(rpc_handler)
 
 
@@ -68,8 +69,8 @@
                  metadata=None,
                  credentials=None):
         rpc_handler = self._channel_handler.invoke_rpc(
-            self._method_full_rpc_name,
-            _common.fuss_with_metadata(metadata), [], False, timeout)
+            self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
+            [], False, timeout)
         _invocation.consume_requests(request_iterator, rpc_handler)
         return _invocation.blocking_unary_response(rpc_handler)
 
@@ -79,8 +80,8 @@
                   metadata=None,
                   credentials=None):
         rpc_handler = self._channel_handler.invoke_rpc(
-            self._method_full_rpc_name,
-            _common.fuss_with_metadata(metadata), [], False, timeout)
+            self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
+            [], False, timeout)
         _invocation.consume_requests(request_iterator, rpc_handler)
         return _invocation.blocking_unary_response_with_call(rpc_handler)
 
@@ -90,8 +91,8 @@
                metadata=None,
                credentials=None):
         rpc_handler = self._channel_handler.invoke_rpc(
-            self._method_full_rpc_name,
-            _common.fuss_with_metadata(metadata), [], False, timeout)
+            self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
+            [], False, timeout)
         _invocation.consume_requests(request_iterator, rpc_handler)
         return _invocation.future_call(rpc_handler)
 
@@ -108,8 +109,10 @@
                  metadata=None,
                  credentials=None):
         rpc_handler = self._channel_handler.invoke_rpc(
-            self._method_full_rpc_name,
-            _common.fuss_with_metadata(metadata), [], False, timeout)
+            self._method_full_rpc_name, _common.fuss_with_metadata(metadata),
+            [], False, timeout)
         _invocation.consume_requests(request_iterator, rpc_handler)
         return _invocation.ResponseIteratorCall(rpc_handler)
+
+
 # pylint: enable=unused-argument
diff --git a/src/python/grpcio_testing/grpc_testing/_channel/_rpc_state.py b/src/python/grpcio_testing/grpc_testing/_channel/_rpc_state.py
index e1fa49a..009f675 100644
--- a/src/python/grpcio_testing/grpc_testing/_channel/_rpc_state.py
+++ b/src/python/grpcio_testing/grpc_testing/_channel/_rpc_state.py
@@ -63,23 +63,22 @@
                 if self._code is grpc.StatusCode.OK:
                     if self._responses:
                         response = self._responses.pop(0)
-                        return _common.ChannelRpcRead(
-                            response, None, None, None)
+                        return _common.ChannelRpcRead(response, None, None,
+                                                      None)
                     else:
                         return _common.ChannelRpcRead(
-                            None, self._trailing_metadata,
-                            grpc.StatusCode.OK, self._details)
+                            None, self._trailing_metadata, grpc.StatusCode.OK,
+                            self._details)
                 elif self._code is None:
                     if self._responses:
                         response = self._responses.pop(0)
-                        return _common.ChannelRpcRead(
-                            response, None, None, None)
+                        return _common.ChannelRpcRead(response, None, None,
+                                                      None)
                     else:
                         self._condition.wait()
                 else:
-                    return _common.ChannelRpcRead(
-                        None, self._trailing_metadata, self._code,
-                        self._details)
+                    return _common.ChannelRpcRead(None, self._trailing_metadata,
+                                                  self._code, self._details)
 
     def termination(self):
         with self._condition:
@@ -150,8 +149,8 @@
                 self._responses.append(response)
                 self._condition.notify_all()
 
-    def terminate_with_response(
-            self, response, trailing_metadata, code, details):
+    def terminate_with_response(self, response, trailing_metadata, code,
+                                details):
         with self._condition:
             if self._initial_metadata is None:
                 self._initial_metadata = _common.FUSSED_EMPTY_METADATA
@@ -180,8 +179,8 @@
                 elif self._code is None:
                     self._condition.wait()
                 else:
-                    raise ValueError(
-                        'Status code unexpectedly {}!'.format(self._code))
+                    raise ValueError('Status code unexpectedly {}!'.format(
+                        self._code))
 
     def is_active(self):
         raise NotImplementedError()
diff --git a/src/python/grpcio_testing/grpc_testing/_common.py b/src/python/grpcio_testing/grpc_testing/_common.py
index 1517434..cebad31 100644
--- a/src/python/grpcio_testing/grpc_testing/_common.py
+++ b/src/python/grpcio_testing/grpc_testing/_common.py
@@ -20,12 +20,11 @@
 
 
 def _fuss(tuplified_metadata):
-    return tuplified_metadata + (
-        (
-            'grpc.metadata_added_by_runtime',
-            'gRPC is allowed to add metadata in transmission and does so.',
-        ),
-    )
+    return tuplified_metadata + ((
+        'grpc.metadata_added_by_runtime',
+        'gRPC is allowed to add metadata in transmission and does so.',
+    ),)
+
 
 FUSSED_EMPTY_METADATA = _fuss(())
 
@@ -41,16 +40,19 @@
     rpc_names_to_descriptors = {}
     for service_descriptor in service_descriptors:
         for method_descriptor in service_descriptor.methods_by_name.values():
-            rpc_name = '/{}/{}'.format(
-                service_descriptor.full_name, method_descriptor.name)
+            rpc_name = '/{}/{}'.format(service_descriptor.full_name,
+                                       method_descriptor.name)
             rpc_names_to_descriptors[rpc_name] = method_descriptor
     return rpc_names_to_descriptors
 
 
 class ChannelRpcRead(
-        collections.namedtuple(
-            'ChannelRpcRead',
-            ('response', 'trailing_metadata', 'code', 'details',))):
+        collections.namedtuple('ChannelRpcRead', (
+            'response',
+            'trailing_metadata',
+            'code',
+            'details',
+        ))):
     pass
 
 
@@ -96,15 +98,17 @@
 class ChannelHandler(six.with_metaclass(abc.ABCMeta)):
 
     @abc.abstractmethod
-    def invoke_rpc(
-            self, method_full_rpc_name, invocation_metadata, requests,
-            requests_closed, timeout):
+    def invoke_rpc(self, method_full_rpc_name, invocation_metadata, requests,
+                   requests_closed, timeout):
         raise NotImplementedError()
 
 
 class ServerRpcRead(
-        collections.namedtuple('ServerRpcRead',
-                               ('request', 'requests_closed', 'terminated',))):
+        collections.namedtuple('ServerRpcRead', (
+            'request',
+            'requests_closed',
+            'terminated',
+        ))):
     pass
 
 
@@ -138,23 +142,21 @@
 class Serverish(six.with_metaclass(abc.ABCMeta)):
 
     @abc.abstractmethod
-    def invoke_unary_unary(
-            self, method_descriptor, handler, invocation_metadata, request,
-            deadline):
+    def invoke_unary_unary(self, method_descriptor, handler,
+                           invocation_metadata, request, deadline):
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def invoke_unary_stream(
-            self, method_descriptor, handler, invocation_metadata, request,
-            deadline):
+    def invoke_unary_stream(self, method_descriptor, handler,
+                            invocation_metadata, request, deadline):
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def invoke_stream_unary(
-            self, method_descriptor, handler, invocation_metadata, deadline):
+    def invoke_stream_unary(self, method_descriptor, handler,
+                            invocation_metadata, deadline):
         raise NotImplementedError()
 
     @abc.abstractmethod
-    def invoke_stream_stream(
-            self, method_descriptor, handler, invocation_metadata, deadline):
+    def invoke_stream_stream(self, method_descriptor, handler,
+                             invocation_metadata, deadline):
         raise NotImplementedError()
diff --git a/src/python/grpcio_testing/grpc_testing/_server/__init__.py b/src/python/grpcio_testing/grpc_testing/_server/__init__.py
index 7595129..5f035a9 100644
--- a/src/python/grpcio_testing/grpc_testing/_server/__init__.py
+++ b/src/python/grpcio_testing/grpc_testing/_server/__init__.py
@@ -16,5 +16,5 @@
 
 
 def server_from_dictionary(descriptors_to_servicers, time):
-    return _server.server_from_descriptor_to_servicers(
-        descriptors_to_servicers, time)
+    return _server.server_from_descriptor_to_servicers(descriptors_to_servicers,
+                                                       time)
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_handler.py b/src/python/grpcio_testing/grpc_testing/_server/_handler.py
index b47e04c..d4f50f6 100644
--- a/src/python/grpcio_testing/grpc_testing/_server/_handler.py
+++ b/src/python/grpcio_testing/grpc_testing/_server/_handler.py
@@ -171,9 +171,11 @@
                         if self._responses:
                             self._unary_response = self._responses.pop(0)
                     return (
-                        self._unary_response, self._trailing_metadata,
-                        self._code, self._details,)
-
+                        self._unary_response,
+                        self._trailing_metadata,
+                        self._code,
+                        self._details,
+                    )
 
     def stream_response_termination(self):
         with self._condition:
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_rpc.py b/src/python/grpcio_testing/grpc_testing/_server/_rpc.py
index f81876f..2060e8d 100644
--- a/src/python/grpcio_testing/grpc_testing/_server/_rpc.py
+++ b/src/python/grpcio_testing/grpc_testing/_server/_rpc.py
@@ -80,9 +80,8 @@
 
     def application_cancel(self):
         with self._condition:
-            self._abort(
-                grpc.StatusCode.CANCELLED,
-                'Cancelled by server-side application!')
+            self._abort(grpc.StatusCode.CANCELLED,
+                        'Cancelled by server-side application!')
 
     def application_exception_abort(self, exception):
         with self._condition:
diff --git a/src/python/grpcio_testing/grpc_testing/_server/_server.py b/src/python/grpcio_testing/grpc_testing/_server/_server.py
index 66bcfc1..c7effb6 100644
--- a/src/python/grpcio_testing/grpc_testing/_server/_server.py
+++ b/src/python/grpcio_testing/grpc_testing/_server/_server.py
@@ -29,28 +29,34 @@
 
 
 def _unary_unary_service(request):
+
     def service(implementation, rpc, servicer_context):
-        _service.unary_unary(
-            implementation, rpc, request, servicer_context)
+        _service.unary_unary(implementation, rpc, request, servicer_context)
+
     return service
 
 
 def _unary_stream_service(request):
+
     def service(implementation, rpc, servicer_context):
-        _service.unary_stream(
-            implementation, rpc, request, servicer_context)
+        _service.unary_stream(implementation, rpc, request, servicer_context)
+
     return service
 
 
 def _stream_unary_service(handler):
+
     def service(implementation, rpc, servicer_context):
         _service.stream_unary(implementation, rpc, handler, servicer_context)
+
     return service
 
 
 def _stream_stream_service(handler):
+
     def service(implementation, rpc, servicer_context):
         _service.stream_stream(implementation, rpc, handler, servicer_context)
+
     return service
 
 
@@ -60,42 +66,43 @@
         self._descriptors_to_servicers = descriptors_to_servicers
         self._time = time
 
-    def _invoke(
-            self, service_behavior, method_descriptor, handler,
-            invocation_metadata, deadline):
-        implementation = _implementation(
-            self._descriptors_to_servicers, method_descriptor)
+    def _invoke(self, service_behavior, method_descriptor, handler,
+                invocation_metadata, deadline):
+        implementation = _implementation(self._descriptors_to_servicers,
+                                         method_descriptor)
         rpc = _rpc.Rpc(handler, invocation_metadata)
         if handler.add_termination_callback(rpc.extrinsic_abort):
             servicer_context = _servicer_context.ServicerContext(
                 rpc, self._time, deadline)
             service_thread = threading.Thread(
                 target=service_behavior,
-                args=(implementation, rpc, servicer_context,))
+                args=(
+                    implementation,
+                    rpc,
+                    servicer_context,
+                ))
             service_thread.start()
 
-    def invoke_unary_unary(
-            self, method_descriptor, handler, invocation_metadata, request,
-            deadline):
+    def invoke_unary_unary(self, method_descriptor, handler,
+                           invocation_metadata, request, deadline):
         self._invoke(
             _unary_unary_service(request), method_descriptor, handler,
             invocation_metadata, deadline)
 
-    def invoke_unary_stream(
-            self, method_descriptor, handler, invocation_metadata, request,
-            deadline):
+    def invoke_unary_stream(self, method_descriptor, handler,
+                            invocation_metadata, request, deadline):
         self._invoke(
             _unary_stream_service(request), method_descriptor, handler,
             invocation_metadata, deadline)
 
-    def invoke_stream_unary(
-            self, method_descriptor, handler, invocation_metadata, deadline):
+    def invoke_stream_unary(self, method_descriptor, handler,
+                            invocation_metadata, deadline):
         self._invoke(
             _stream_unary_service(handler), method_descriptor, handler,
             invocation_metadata, deadline)
 
-    def invoke_stream_stream(
-            self, method_descriptor, handler, invocation_metadata, deadline):
+    def invoke_stream_stream(self, method_descriptor, handler,
+                             invocation_metadata, deadline):
         self._invoke(
             _stream_stream_service(handler), method_descriptor, handler,
             invocation_metadata, deadline)
@@ -106,7 +113,8 @@
         return None, _handler.handler_without_deadline(requests_closed)
     else:
         deadline = time.time() + timeout
-        handler = _handler.handler_with_deadline(requests_closed, time, deadline)
+        handler = _handler.handler_with_deadline(requests_closed, time,
+                                                 deadline)
         return deadline, handler
 
 
@@ -116,32 +124,32 @@
         self._serverish = serverish
         self._time = time
 
-    def invoke_unary_unary(
-            self, method_descriptor, invocation_metadata, request, timeout):
+    def invoke_unary_unary(self, method_descriptor, invocation_metadata,
+                           request, timeout):
         deadline, handler = _deadline_and_handler(True, self._time, timeout)
         self._serverish.invoke_unary_unary(
             method_descriptor, handler, invocation_metadata, request, deadline)
         return _server_rpc.UnaryUnaryServerRpc(handler)
 
-    def invoke_unary_stream(
-            self, method_descriptor, invocation_metadata, request, timeout):
+    def invoke_unary_stream(self, method_descriptor, invocation_metadata,
+                            request, timeout):
         deadline, handler = _deadline_and_handler(True, self._time, timeout)
         self._serverish.invoke_unary_stream(
             method_descriptor, handler, invocation_metadata, request, deadline)
         return _server_rpc.UnaryStreamServerRpc(handler)
 
-    def invoke_stream_unary(
-            self, method_descriptor, invocation_metadata, timeout):
+    def invoke_stream_unary(self, method_descriptor, invocation_metadata,
+                            timeout):
         deadline, handler = _deadline_and_handler(False, self._time, timeout)
-        self._serverish.invoke_stream_unary(
-            method_descriptor, handler, invocation_metadata, deadline)
+        self._serverish.invoke_stream_unary(method_descriptor, handler,
+                                            invocation_metadata, deadline)
         return _server_rpc.StreamUnaryServerRpc(handler)
 
-    def invoke_stream_stream(
-            self, method_descriptor, invocation_metadata, timeout):
+    def invoke_stream_stream(self, method_descriptor, invocation_metadata,
+                             timeout):
         deadline, handler = _deadline_and_handler(False, self._time, timeout)
-        self._serverish.invoke_stream_stream(
-            method_descriptor, handler, invocation_metadata, deadline)
+        self._serverish.invoke_stream_stream(method_descriptor, handler,
+                                             invocation_metadata, deadline)
         return _server_rpc.StreamStreamServerRpc(handler)
 
 
diff --git a/src/python/grpcio_testing/grpc_testing/_time.py b/src/python/grpcio_testing/grpc_testing/_time.py
index 3b1ab4b..afbdad3 100644
--- a/src/python/grpcio_testing/grpc_testing/_time.py
+++ b/src/python/grpcio_testing/grpc_testing/_time.py
@@ -46,9 +46,11 @@
 
 
 class _Delta(
-        collections.namedtuple('_Delta',
-                               ('mature_behaviors', 'earliest_mature_time',
-                                'earliest_immature_time',))):
+        collections.namedtuple('_Delta', (
+            'mature_behaviors',
+            'earliest_mature_time',
+            'earliest_immature_time',
+        ))):
     pass
 
 
diff --git a/src/python/grpcio_testing/grpc_version.py b/src/python/grpcio_testing/grpc_version.py
index 560630b..0eb5fbf 100644
--- a/src/python/grpcio_testing/grpc_version.py
+++ b/src/python/grpcio_testing/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
 
-VERSION='1.8.3'
+VERSION = '1.9.0.dev0'
diff --git a/src/python/grpcio_testing/setup.py b/src/python/grpcio_testing/setup.py
index fa40424..5a9d593 100644
--- a/src/python/grpcio_testing/setup.py
+++ b/src/python/grpcio_testing/setup.py
@@ -28,8 +28,10 @@
     '': '.',
 }
 
-INSTALL_REQUIRES = ('protobuf>=3.5.0.post1',
-                    'grpcio>={version}'.format(version=grpc_version.VERSION),)
+INSTALL_REQUIRES = (
+    'protobuf>=3.5.0.post1',
+    'grpcio>={version}'.format(version=grpc_version.VERSION),
+)
 
 setuptools.setup(
     name='grpcio-testing',
diff --git a/src/python/grpcio_tests/grpc_version.py b/src/python/grpcio_tests/grpc_version.py
index 8f99854..b1b4d7e 100644
--- a/src/python/grpcio_tests/grpc_version.py
+++ b/src/python/grpcio_tests/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
 
-VERSION='1.8.3'
+VERSION = '1.9.0.dev0'
diff --git a/src/python/grpcio_tests/setup.py b/src/python/grpcio_tests/setup.py
index aeb4ea9..250df65 100644
--- a/src/python/grpcio_tests/setup.py
+++ b/src/python/grpcio_tests/setup.py
@@ -99,4 +99,5 @@
     tests_require=TESTS_REQUIRE,
     test_suite=TEST_SUITE,
     test_loader=TEST_LOADER,
-    test_runner=TEST_RUNNER,)
+    test_runner=TEST_RUNNER,
+)
diff --git a/src/python/grpcio_tests/tests/_loader.py b/src/python/grpcio_tests/tests/_loader.py
index 281a23c..3168091 100644
--- a/src/python/grpcio_tests/tests/_loader.py
+++ b/src/python/grpcio_tests/tests/_loader.py
@@ -101,5 +101,5 @@
         elif isinstance(item, unittest.TestCase):
             yield item
         else:
-            raise ValueError(
-                'unexpected suite item of type {}'.format(type(item)))
+            raise ValueError('unexpected suite item of type {}'.format(
+                type(item)))
diff --git a/src/python/grpcio_tests/tests/_result.py b/src/python/grpcio_tests/tests/_result.py
index f26fdef..9907c4e 100644
--- a/src/python/grpcio_tests/tests/_result.py
+++ b/src/python/grpcio_tests/tests/_result.py
@@ -215,7 +215,8 @@
     Args:
       filter (callable): A unary predicate to filter over CaseResult objects.
     """
-        return (self.cases[case_id] for case_id in self.cases
+        return (self.cases[case_id]
+                for case_id in self.cases
                 if filter(self.cases[case_id]))
 
 
@@ -285,8 +286,8 @@
     def startTestRun(self):
         """See unittest.TestResult.startTestRun."""
         super(TerminalResult, self).startTestRun()
-        self.out.write(_Colors.HEADER + 'Testing gRPC Python...\n' +
-                       _Colors.END)
+        self.out.write(
+            _Colors.HEADER + 'Testing gRPC Python...\n' + _Colors.END)
 
     def stopTestRun(self):
         """See unittest.TestResult.stopTestRun."""
@@ -297,43 +298,43 @@
     def addError(self, test, error):
         """See unittest.TestResult.addError."""
         super(TerminalResult, self).addError(test, error)
-        self.out.write(_Colors.FAIL + 'ERROR         {}\n'.format(test.id()) +
-                       _Colors.END)
+        self.out.write(
+            _Colors.FAIL + 'ERROR         {}\n'.format(test.id()) + _Colors.END)
         self.out.flush()
 
     def addFailure(self, test, error):
         """See unittest.TestResult.addFailure."""
         super(TerminalResult, self).addFailure(test, error)
-        self.out.write(_Colors.FAIL + 'FAILURE       {}\n'.format(test.id()) +
-                       _Colors.END)
+        self.out.write(
+            _Colors.FAIL + 'FAILURE       {}\n'.format(test.id()) + _Colors.END)
         self.out.flush()
 
     def addSuccess(self, test):
         """See unittest.TestResult.addSuccess."""
         super(TerminalResult, self).addSuccess(test)
-        self.out.write(_Colors.OK + 'SUCCESS       {}\n'.format(test.id()) +
-                       _Colors.END)
+        self.out.write(
+            _Colors.OK + 'SUCCESS       {}\n'.format(test.id()) + _Colors.END)
         self.out.flush()
 
     def addSkip(self, test, reason):
         """See unittest.TestResult.addSkip."""
         super(TerminalResult, self).addSkip(test, reason)
-        self.out.write(_Colors.INFO + 'SKIP          {}\n'.format(test.id()) +
-                       _Colors.END)
+        self.out.write(
+            _Colors.INFO + 'SKIP          {}\n'.format(test.id()) + _Colors.END)
         self.out.flush()
 
     def addExpectedFailure(self, test, error):
         """See unittest.TestResult.addExpectedFailure."""
         super(TerminalResult, self).addExpectedFailure(test, error)
-        self.out.write(_Colors.INFO + 'FAILURE_OK    {}\n'.format(test.id()) +
-                       _Colors.END)
+        self.out.write(
+            _Colors.INFO + 'FAILURE_OK    {}\n'.format(test.id()) + _Colors.END)
         self.out.flush()
 
     def addUnexpectedSuccess(self, test):
         """See unittest.TestResult.addUnexpectedSuccess."""
         super(TerminalResult, self).addUnexpectedSuccess(test)
-        self.out.write(_Colors.INFO + 'UNEXPECTED_OK {}\n'.format(test.id()) +
-                       _Colors.END)
+        self.out.write(
+            _Colors.INFO + 'UNEXPECTED_OK {}\n'.format(test.id()) + _Colors.END)
         self.out.flush()
 
 
diff --git a/src/python/grpcio_tests/tests/_runner.py b/src/python/grpcio_tests/tests/_runner.py
index 8fb4a0e..8e27dc6 100644
--- a/src/python/grpcio_tests/tests/_runner.py
+++ b/src/python/grpcio_tests/tests/_runner.py
@@ -181,8 +181,8 @@
         # Run the tests
         result.startTestRun()
         for augmented_case in augmented_cases:
-            sys.stdout.write(
-                'Running       {}\n'.format(augmented_case.case.id()))
+            sys.stdout.write('Running       {}\n'.format(
+                augmented_case.case.id()))
             sys.stdout.flush()
             case_thread = threading.Thread(
                 target=augmented_case.case.run, args=(result,))
@@ -196,8 +196,8 @@
             except:
                 # re-raise the exception after forcing the with-block to end
                 raise
-            result.set_output(augmented_case.case,
-                              stdout_pipe.output(), stderr_pipe.output())
+            result.set_output(augmented_case.case, stdout_pipe.output(),
+                              stderr_pipe.output())
             sys.stdout.write(result_out.getvalue())
             sys.stdout.flush()
             result_out.truncate(0)
diff --git a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
index ac31e72..3cbbb8d 100644
--- a/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
+++ b/src/python/grpcio_tests/tests/health_check/_health_servicer_test.py
@@ -16,12 +16,11 @@
 import unittest
 
 import grpc
-from grpc.framework.foundation import logging_pool
 from grpc_health.v1 import health
 from grpc_health.v1 import health_pb2
 from grpc_health.v1 import health_pb2_grpc
 
-from tests.unit.framework.common import test_constants
+from tests.unit import test_common
 
 
 class HealthServicerTest(unittest.TestCase):
@@ -35,8 +34,7 @@
                      health_pb2.HealthCheckResponse.UNKNOWN)
         servicer.set('grpc.test.TestServiceNotServing',
                      health_pb2.HealthCheckResponse.NOT_SERVING)
-        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        self._server = grpc.server(server_pool)
+        self._server = test_common.test_server()
         port = self._server.add_insecure_port('[::]:0')
         health_pb2_grpc.add_HealthServicer_to_server(servicer, self._server)
         self._server.start()
diff --git a/src/python/grpcio_tests/tests/http2/negative_http2_client.py b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
index 8dab5b6..e407682 100644
--- a/src/python/grpcio_tests/tests/http2/negative_http2_client.py
+++ b/src/python/grpcio_tests/tests/http2/negative_http2_client.py
@@ -32,14 +32,14 @@
 
 def _expect_status_code(call, expected_code):
     if call.code() != expected_code:
-        raise ValueError('expected code %s, got %s' %
-                         (expected_code, call.code()))
+        raise ValueError('expected code %s, got %s' % (expected_code,
+                                                       call.code()))
 
 
 def _expect_status_details(call, expected_details):
     if call.details() != expected_details:
-        raise ValueError('expected message %s, got %s' %
-                         (expected_details, call.details()))
+        raise ValueError('expected message %s, got %s' % (expected_details,
+                                                          call.details()))
 
 
 def _validate_status_code_and_details(call, expected_code, expected_details):
diff --git a/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
index 4136739..8d464b2 100644
--- a/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 """Insecure client-server interoperability as a unit test."""
 
-from concurrent import futures
 import unittest
 
 import grpc
@@ -22,13 +21,14 @@
 from tests.interop import _intraop_test_case
 from tests.interop import methods
 from tests.interop import server
+from tests.unit import test_common
 
 
 class InsecureIntraopTest(_intraop_test_case.IntraopTestCase,
                           unittest.TestCase):
 
     def setUp(self):
-        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+        self.server = test_common.test_server()
         test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
                                                         self.server)
         port = self.server.add_insecure_port('[::]:0')
diff --git a/src/python/grpcio_tests/tests/interop/_intraop_test_case.py b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
index ce456a6..007db7a 100644
--- a/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
+++ b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
@@ -39,8 +39,8 @@
         methods.TestCase.PING_PONG.test_interoperability(self.stub, None)
 
     def testCancelAfterBegin(self):
-        methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(self.stub,
-                                                                  None)
+        methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(
+            self.stub, None)
 
     def testCancelAfterFirstResponse(self):
         methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(
diff --git a/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
index 6514d77..c891359 100644
--- a/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 """Secure client-server interoperability as a unit test."""
 
-from concurrent import futures
 import unittest
 
 import grpc
@@ -22,6 +21,7 @@
 from tests.interop import _intraop_test_case
 from tests.interop import methods
 from tests.interop import resources
+from tests.unit import test_common
 
 _SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
 
@@ -29,20 +29,21 @@
 class SecureIntraopTest(_intraop_test_case.IntraopTestCase, unittest.TestCase):
 
     def setUp(self):
-        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+        self.server = test_common.test_server()
         test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
                                                         self.server)
         port = self.server.add_secure_port(
             '[::]:0',
-            grpc.ssl_server_credentials(
-                [(resources.private_key(), resources.certificate_chain())]))
+            grpc.ssl_server_credentials([(resources.private_key(),
+                                          resources.certificate_chain())]))
         self.server.start()
         self.stub = test_pb2_grpc.TestServiceStub(
             grpc.secure_channel('localhost:{}'.format(port),
                                 grpc.ssl_channel_credentials(
-                                    resources.test_root_certificates()), (
-                                        ('grpc.ssl_target_name_override',
-                                         _SERVER_HOST_OVERRIDE,),)))
+                                    resources.test_root_certificates()), ((
+                                        'grpc.ssl_target_name_override',
+                                        _SERVER_HOST_OVERRIDE,
+                                    ),)))
 
 
 if __name__ == '__main__':
diff --git a/src/python/grpcio_tests/tests/interop/client.py b/src/python/grpcio_tests/tests/interop/client.py
index 383b5f0..3780ed9 100644
--- a/src/python/grpcio_tests/tests/interop/client.py
+++ b/src/python/grpcio_tests/tests/interop/client.py
@@ -104,8 +104,10 @@
             channel_credentials = grpc.composite_channel_credentials(
                 channel_credentials, call_credentials)
 
-        channel = grpc.secure_channel(target, channel_credentials, (
-            ('grpc.ssl_target_name_override', args.server_host_override,),))
+        channel = grpc.secure_channel(target, channel_credentials, ((
+            'grpc.ssl_target_name_override',
+            args.server_host_override,
+        ),))
     else:
         channel = grpc.insecure_channel(target)
     if args.test_case == "unimplemented_service":
diff --git a/src/python/grpcio_tests/tests/interop/methods.py b/src/python/grpcio_tests/tests/interop/methods.py
index ae9a50d..b728ffd 100644
--- a/src/python/grpcio_tests/tests/interop/methods.py
+++ b/src/python/grpcio_tests/tests/interop/methods.py
@@ -62,9 +62,10 @@
     def UnaryCall(self, request, context):
         _maybe_echo_metadata(context)
         _maybe_echo_status_and_message(request, context)
-        return messages_pb2.SimpleResponse(payload=messages_pb2.Payload(
-            type=messages_pb2.COMPRESSABLE,
-            body=b'\x00' * request.response_size))
+        return messages_pb2.SimpleResponse(
+            payload=messages_pb2.Payload(
+                type=messages_pb2.COMPRESSABLE,
+                body=b'\x00' * request.response_size))
 
     def StreamingOutputCall(self, request, context):
         _maybe_echo_status_and_message(request, context)
@@ -100,14 +101,14 @@
 
 def _expect_status_code(call, expected_code):
     if call.code() != expected_code:
-        raise ValueError('expected code %s, got %s' %
-                         (expected_code, call.code()))
+        raise ValueError('expected code %s, got %s' % (expected_code,
+                                                       call.code()))
 
 
 def _expect_status_details(call, expected_details):
     if call.details() != expected_details:
-        raise ValueError('expected message %s, got %s' %
-                         (expected_details, call.details()))
+        raise ValueError('expected message %s, got %s' % (expected_details,
+                                                          call.details()))
 
 
 def _validate_status_code_and_details(call, expected_code, expected_details):
@@ -152,26 +153,38 @@
 
 
 def _client_streaming(stub):
-    payload_body_sizes = (27182, 8, 1828, 45904,)
+    payload_body_sizes = (
+        27182,
+        8,
+        1828,
+        45904,
+    )
     payloads = (messages_pb2.Payload(body=b'\x00' * size)
                 for size in payload_body_sizes)
     requests = (messages_pb2.StreamingInputCallRequest(payload=payload)
                 for payload in payloads)
     response = stub.StreamingInputCall(requests)
     if response.aggregated_payload_size != 74922:
-        raise ValueError('incorrect size %d!' %
-                         response.aggregated_payload_size)
+        raise ValueError(
+            'incorrect size %d!' % response.aggregated_payload_size)
 
 
 def _server_streaming(stub):
-    sizes = (31415, 9, 2653, 58979,)
+    sizes = (
+        31415,
+        9,
+        2653,
+        58979,
+    )
 
     request = messages_pb2.StreamingOutputCallRequest(
         response_type=messages_pb2.COMPRESSABLE,
-        response_parameters=(messages_pb2.ResponseParameters(size=sizes[0]),
-                             messages_pb2.ResponseParameters(size=sizes[1]),
-                             messages_pb2.ResponseParameters(size=sizes[2]),
-                             messages_pb2.ResponseParameters(size=sizes[3]),))
+        response_parameters=(
+            messages_pb2.ResponseParameters(size=sizes[0]),
+            messages_pb2.ResponseParameters(size=sizes[1]),
+            messages_pb2.ResponseParameters(size=sizes[2]),
+            messages_pb2.ResponseParameters(size=sizes[3]),
+        ))
     response_iterator = stub.StreamingOutputCall(request)
     for index, response in enumerate(response_iterator):
         _validate_payload_type_and_length(response, messages_pb2.COMPRESSABLE,
@@ -218,8 +231,18 @@
 
 
 def _ping_pong(stub):
-    request_response_sizes = (31415, 9, 2653, 58979,)
-    request_payload_sizes = (27182, 8, 1828, 45904,)
+    request_response_sizes = (
+        31415,
+        9,
+        2653,
+        58979,
+    )
+    request_payload_sizes = (
+        27182,
+        8,
+        1828,
+        45904,
+    )
 
     with _Pipe() as pipe:
         response_iterator = stub.FullDuplexCall(pipe)
@@ -247,8 +270,18 @@
 
 
 def _cancel_after_first_response(stub):
-    request_response_sizes = (31415, 9, 2653, 58979,)
-    request_payload_sizes = (27182, 8, 1828, 45904,)
+    request_response_sizes = (
+        31415,
+        9,
+        2653,
+        58979,
+    )
+    request_payload_sizes = (
+        27182,
+        8,
+        1828,
+        45904,
+    )
     with _Pipe() as pipe:
         response_iterator = stub.FullDuplexCall(pipe)
 
@@ -331,14 +364,14 @@
 
 
 def _unimplemented_method(test_service_stub):
-    response_future = (
-        test_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
+    response_future = (test_service_stub.UnimplementedCall.future(
+        empty_pb2.Empty()))
     _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
 
 
 def _unimplemented_service(unimplemented_service_stub):
-    response_future = (
-        unimplemented_service_stub.UnimplementedCall.future(empty_pb2.Empty()))
+    response_future = (unimplemented_service_stub.UnimplementedCall.future(
+        empty_pb2.Empty()))
     _expect_status_code(response_future, grpc.StatusCode.UNIMPLEMENTED)
 
 
@@ -392,11 +425,12 @@
     wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
     response = _large_unary_common_behavior(stub, True, True, None)
     if wanted_email != response.username:
-        raise ValueError('expected username %s, got %s' %
-                         (wanted_email, response.username))
+        raise ValueError('expected username %s, got %s' % (wanted_email,
+                                                           response.username))
     if args.oauth_scope.find(response.oauth_scope) == -1:
-        raise ValueError('expected to find oauth scope "{}" in received "{}"'.
-                         format(response.oauth_scope, args.oauth_scope))
+        raise ValueError(
+            'expected to find oauth scope "{}" in received "{}"'.format(
+                response.oauth_scope, args.oauth_scope))
 
 
 def _jwt_token_creds(stub, args):
@@ -404,8 +438,8 @@
     wanted_email = json.load(open(json_key_filename, 'rb'))['client_email']
     response = _large_unary_common_behavior(stub, True, False, None)
     if wanted_email != response.username:
-        raise ValueError('expected username %s, got %s' %
-                         (wanted_email, response.username))
+        raise ValueError('expected username %s, got %s' % (wanted_email,
+                                                           response.username))
 
 
 def _per_rpc_creds(stub, args):
@@ -419,8 +453,8 @@
             request=google_auth_transport_requests.Request()))
     response = _large_unary_common_behavior(stub, True, False, call_credentials)
     if wanted_email != response.username:
-        raise ValueError('expected username %s, got %s' %
-                         (wanted_email, response.username))
+        raise ValueError('expected username %s, got %s' % (wanted_email,
+                                                           response.username))
 
 
 @enum.unique
@@ -479,5 +513,5 @@
         elif self is TestCase.PER_RPC_CREDS:
             _per_rpc_creds(stub, args)
         else:
-            raise NotImplementedError('Test case "%s" not implemented!' %
-                                      self.name)
+            raise NotImplementedError(
+                'Test case "%s" not implemented!' % self.name)
diff --git a/src/python/grpcio_tests/tests/interop/server.py b/src/python/grpcio_tests/tests/interop/server.py
index eeb41a2..0810de2 100644
--- a/src/python/grpcio_tests/tests/interop/server.py
+++ b/src/python/grpcio_tests/tests/interop/server.py
@@ -23,6 +23,7 @@
 
 from tests.interop import methods
 from tests.interop import resources
+from tests.unit import test_common
 
 _ONE_DAY_IN_SECONDS = 60 * 60 * 24
 
@@ -38,14 +39,14 @@
         help='require a secure connection')
     args = parser.parse_args()
 
-    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+    server = test_common.test_server()
     test_pb2_grpc.add_TestServiceServicer_to_server(methods.TestService(),
                                                     server)
     if args.use_tls:
         private_key = resources.private_key()
         certificate_chain = resources.certificate_chain()
-        credentials = grpc.ssl_server_credentials((
-            (private_key, certificate_chain),))
+        credentials = grpc.ssl_server_credentials(((private_key,
+                                                    certificate_chain),))
         server.add_secure_port('[::]:{}'.format(args.port), credentials)
     else:
         server.add_insecure_port('[::]:{}'.format(args.port))
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
index 5b84001..6d85f43 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_python_plugin_test.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 
 import collections
-from concurrent import futures
 import contextlib
 import distutils.spawn
 import errno
@@ -28,6 +27,7 @@
 from six import moves
 
 import grpc
+from tests.unit import test_common
 from tests.unit.framework.common import test_constants
 
 import tests.protoc_plugin.protos.payload.test_payload_pb2 as payload_pb2
@@ -119,8 +119,11 @@
 
 
 class _Service(
-        collections.namedtuple('_Service', ('servicer_methods', 'server',
-                                            'stub',))):
+        collections.namedtuple('_Service', (
+            'servicer_methods',
+            'server',
+            'stub',
+        ))):
     """A live and running service.
 
   Attributes:
@@ -155,8 +158,7 @@
         def HalfDuplexCall(self, request_iter, context):
             return servicer_methods.HalfDuplexCall(request_iter, context)
 
-    server = grpc.server(
-        futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+    server = test_common.test_server()
     getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
                                                                  server)
     port = server.add_insecure_port('[::]:0')
@@ -177,8 +179,7 @@
     class Servicer(getattr(service_pb2_grpc, SERVICER_IDENTIFIER)):
         pass
 
-    server = grpc.server(
-        futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
+    server = test_common.test_server()
     getattr(service_pb2_grpc, ADD_SERVICER_TO_SERVER_IDENTIFIER)(Servicer(),
                                                                  server)
     port = server.add_insecure_port('[::]:0')
@@ -299,8 +300,8 @@
         responses = service.stub.StreamingOutputCall(request)
         expected_responses = service.servicer_methods.StreamingOutputCall(
             request, 'not a real RpcContext!')
-        for expected_response, response in moves.zip_longest(expected_responses,
-                                                             responses):
+        for expected_response, response in moves.zip_longest(
+                expected_responses, responses):
             self.assertEqual(expected_response, response)
 
     def testStreamingOutputCallExpired(self):
@@ -390,8 +391,8 @@
         responses = service.stub.FullDuplexCall(_full_duplex_request_iterator())
         expected_responses = service.servicer_methods.FullDuplexCall(
             _full_duplex_request_iterator(), 'not a real RpcContext!')
-        for expected_response, response in moves.zip_longest(expected_responses,
-                                                             responses):
+        for expected_response, response in moves.zip_longest(
+                expected_responses, responses):
             self.assertEqual(expected_response, response)
 
     def testFullDuplexCallExpired(self):
@@ -441,8 +442,8 @@
         responses = service.stub.HalfDuplexCall(half_duplex_request_iterator())
         expected_responses = service.servicer_methods.HalfDuplexCall(
             half_duplex_request_iterator(), 'not a real RpcContext!')
-        for expected_response, response in moves.zip_longest(expected_responses,
-                                                             responses):
+        for expected_response, response in moves.zip_longest(
+                expected_responses, responses):
             self.assertEqual(expected_response, response)
 
     def testHalfDuplexCallWedged(self):
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
index 7868cdb..ab33775 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
@@ -13,7 +13,6 @@
 # limitations under the License.
 
 import abc
-from concurrent import futures
 import contextlib
 import importlib
 import os
@@ -29,7 +28,7 @@
 
 import grpc
 from grpc_tools import protoc
-from tests.unit.framework.common import test_constants
+from tests.unit import test_common
 
 _MESSAGES_IMPORT = b'import "messages.proto";'
 _SPLIT_NAMESPACE = b'package grpc_protoc_plugin.invocation_testing.split;'
@@ -65,8 +64,8 @@
                            messages_proto_relative_file_name_bytes):
     package_substitution = (b'package grpc_protoc_plugin.invocation_testing.' +
                             test_name_bytes + b';')
-    common_namespace_substituted = proto_content.replace(_COMMON_NAMESPACE,
-                                                         package_substitution)
+    common_namespace_substituted = proto_content.replace(
+        _COMMON_NAMESPACE, package_substitution)
     split_namespace_substituted = common_namespace_substituted.replace(
         _SPLIT_NAMESPACE, package_substitution)
     message_import_replaced = split_namespace_substituted.replace(
@@ -164,8 +163,12 @@
         return pb2_grpc_protoc_exit_code, pb2_protoc_exit_code,
 
 
-_PROTOC_STYLES = (_Mid2016ProtocStyle(), _SingleProtocExecutionProtocStyle(),
-                  _ProtoBeforeGrpcProtocStyle(), _GrpcBeforeProtoProtocStyle(),)
+_PROTOC_STYLES = (
+    _Mid2016ProtocStyle(),
+    _SingleProtocExecutionProtocStyle(),
+    _ProtoBeforeGrpcProtocStyle(),
+    _GrpcBeforeProtoProtocStyle(),
+)
 
 
 @unittest.skipIf(platform.python_implementation() == 'PyPy',
@@ -181,18 +184,22 @@
         os.makedirs(self._python_out)
 
         proto_directories_and_names = {
-            (self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES,
-             self.MESSAGES_PROTO_FILE_NAME,),
-            (self.SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES,
-             self.SERVICES_PROTO_FILE_NAME,),
+            (
+                self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES,
+                self.MESSAGES_PROTO_FILE_NAME,
+            ),
+            (
+                self.SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES,
+                self.SERVICES_PROTO_FILE_NAME,
+            ),
         }
         messages_proto_relative_file_name_forward_slashes = '/'.join(
-            self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES + (
-                self.MESSAGES_PROTO_FILE_NAME,))
-        _create_directory_tree(self._proto_path, (
-            relative_proto_directory_names
-            for relative_proto_directory_names, _ in proto_directories_and_names
-        ))
+            self.MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES +
+            (self.MESSAGES_PROTO_FILE_NAME,))
+        _create_directory_tree(self._proto_path,
+                               (relative_proto_directory_names
+                                for relative_proto_directory_names, _ in
+                                proto_directories_and_names))
         self._absolute_proto_file_names = set()
         for relative_directory_names, file_name in proto_directories_and_names:
             absolute_proto_file_name = path.join(
@@ -201,8 +208,7 @@
                 'tests.protoc_plugin.protos.invocation_testing',
                 path.join(*relative_directory_names + (file_name,)))
             massaged_proto_content = _massage_proto_content(
-                raw_proto_content,
-                self.NAME.encode(),
+                raw_proto_content, self.NAME.encode(),
                 messages_proto_relative_file_name_forward_slashes.encode())
             with open(absolute_proto_file_name, 'wb') as proto_file:
                 proto_file.write(massaged_proto_content)
@@ -256,9 +262,7 @@
         self._protoc()
 
         for services_module in self._services_modules():
-            server = grpc.server(
-                futures.ThreadPoolExecutor(
-                    max_workers=test_constants.POOL_SIZE))
+            server = test_common.test_server()
             services_module.add_TestServiceServicer_to_server(
                 _Servicer(self._messages_pb2.Response), server)
             port = server.add_insecure_port('[::]:0')
@@ -278,7 +282,9 @@
 
     if split_proto:
         attributes['MESSAGES_PROTO_RELATIVE_DIRECTORY_NAMES'] = (
-            'split_messages', 'sub',)
+            'split_messages',
+            'sub',
+        )
         attributes['MESSAGES_PROTO_FILE_NAME'] = 'messages.proto'
         attributes['SERVICES_PROTO_RELATIVE_DIRECTORY_NAMES'] = (
             'split_services',)
@@ -304,7 +310,10 @@
 
 
 def _create_test_case_classes():
-    for split_proto in (False, True,):
+    for split_proto in (
+            False,
+            True,
+    ):
         for protoc_style in _PROTOC_STYLES:
             yield _create_test_case_class(split_proto, protoc_style)
 
diff --git a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
index 424b153..ad0ecf0 100644
--- a/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
+++ b/src/python/grpcio_tests/tests/protoc_plugin/beta_python_plugin_test.py
@@ -36,10 +36,28 @@
 _RELATIVE_PYTHON_OUT = 'relative_python_out'
 
 _PROTO_FILES_PATH_COMPONENTS = (
-    ('beta_grpc_plugin_test', 'payload', 'test_payload.proto',),
-    ('beta_grpc_plugin_test', 'requests', 'r', 'test_requests.proto',),
-    ('beta_grpc_plugin_test', 'responses', 'test_responses.proto',),
-    ('beta_grpc_plugin_test', 'service', 'test_service.proto',),)
+    (
+        'beta_grpc_plugin_test',
+        'payload',
+        'test_payload.proto',
+    ),
+    (
+        'beta_grpc_plugin_test',
+        'requests',
+        'r',
+        'test_requests.proto',
+    ),
+    (
+        'beta_grpc_plugin_test',
+        'responses',
+        'test_responses.proto',
+    ),
+    (
+        'beta_grpc_plugin_test',
+        'service',
+        'test_service.proto',
+    ),
+)
 
 _PAYLOAD_PB2 = 'beta_grpc_plugin_test.payload.test_payload_pb2'
 _REQUESTS_PB2 = 'beta_grpc_plugin_test.requests.r.test_requests_pb2'
diff --git a/src/python/grpcio_tests/tests/qps/benchmark_client.py b/src/python/grpcio_tests/tests/qps/benchmark_client.py
index 17fa61e..e6392a8 100644
--- a/src/python/grpcio_tests/tests/qps/benchmark_client.py
+++ b/src/python/grpcio_tests/tests/qps/benchmark_client.py
@@ -155,7 +155,8 @@
                                                    _TIMEOUT)
         for _ in response_stream:
             self._handle_response(
-                self, time.time() - self._send_time_queue.get_nowait())
+                self,
+                time.time() - self._send_time_queue.get_nowait())
 
     def stop(self):
         self._is_streaming = False
diff --git a/src/python/grpcio_tests/tests/qps/qps_worker.py b/src/python/grpcio_tests/tests/qps/qps_worker.py
index 3e46c0b..54f69db 100644
--- a/src/python/grpcio_tests/tests/qps/qps_worker.py
+++ b/src/python/grpcio_tests/tests/qps/qps_worker.py
@@ -16,15 +16,15 @@
 import argparse
 import time
 
-from concurrent import futures
 import grpc
 from src.proto.grpc.testing import services_pb2_grpc
 
 from tests.qps import worker_server
+from tests.unit import test_common
 
 
 def run_worker_server(port):
-    server = grpc.server(futures.ThreadPoolExecutor(max_workers=5))
+    server = test_common.test_server()
     servicer = worker_server.WorkerServer()
     services_pb2_grpc.add_WorkerServiceServicer_to_server(servicer, server)
     server.add_insecure_port('[::]:{}'.format(port))
diff --git a/src/python/grpcio_tests/tests/qps/worker_server.py b/src/python/grpcio_tests/tests/qps/worker_server.py
index adb10cb..41e2403 100644
--- a/src/python/grpcio_tests/tests/qps/worker_server.py
+++ b/src/python/grpcio_tests/tests/qps/worker_server.py
@@ -28,6 +28,7 @@
 from tests.qps import client_runner
 from tests.qps import histogram
 from tests.unit import resources
+from tests.unit import test_common
 
 
 class WorkerServer(services_pb2_grpc.WorkerServiceServicer):
@@ -68,12 +69,11 @@
             server_threads = multiprocessing.cpu_count() * 5
         else:
             server_threads = config.async_server_threads
-        server = grpc.server(
-            futures.ThreadPoolExecutor(max_workers=server_threads))
+        server = test_common.test_server(max_workers=server_threads)
         if config.server_type == control_pb2.ASYNC_SERVER:
             servicer = benchmark_server.BenchmarkServer()
-            services_pb2_grpc.add_BenchmarkServiceServicer_to_server(servicer,
-                                                                     server)
+            services_pb2_grpc.add_BenchmarkServiceServicer_to_server(
+                servicer, server)
         elif config.server_type == control_pb2.ASYNC_GENERIC_SERVER:
             resp_size = config.payload_config.bytebuf_params.resp_size
             servicer = benchmark_server.GenericBenchmarkServer(resp_size)
@@ -87,12 +87,12 @@
                 'grpc.testing.BenchmarkService', method_implementations)
             server.add_generic_rpc_handlers((handler,))
         else:
-            raise Exception(
-                'Unsupported server type {}'.format(config.server_type))
+            raise Exception('Unsupported server type {}'.format(
+                config.server_type))
 
         if config.HasField('security_params'):  # Use SSL
-            server_creds = grpc.ssl_server_credentials((
-                (resources.private_key(), resources.certificate_chain()),))
+            server_creds = grpc.ssl_server_credentials(
+                ((resources.private_key(), resources.certificate_chain()),))
             port = server.add_secure_port('[::]:{}'.format(config.port),
                                           server_creds)
         else:
@@ -156,8 +156,8 @@
             else:
                 raise Exception('Async streaming client not supported')
         else:
-            raise Exception(
-                'Unsupported client type {}'.format(config.client_type))
+            raise Exception('Unsupported client type {}'.format(
+                config.client_type))
 
         # In multi-channel tests, we split the load across all channels
         load_factor = float(config.client_channels)
diff --git a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
index a86743f..7ffdba6 100644
--- a/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
+++ b/src/python/grpcio_tests/tests/reflection/_reflection_servicer_test.py
@@ -16,7 +16,6 @@
 import unittest
 
 import grpc
-from grpc.framework.foundation import logging_pool
 from grpc_reflection.v1alpha import reflection
 from grpc_reflection.v1alpha import reflection_pb2
 from grpc_reflection.v1alpha import reflection_pb2_grpc
@@ -27,14 +26,20 @@
 from src.proto.grpc.testing import empty_pb2
 from src.proto.grpc.testing.proto2 import empty2_extensions_pb2
 
-from tests.unit.framework.common import test_constants
+from tests.unit import test_common
 
 _EMPTY_PROTO_FILE_NAME = 'src/proto/grpc/testing/empty.proto'
 _EMPTY_PROTO_SYMBOL_NAME = 'grpc.testing.Empty'
 _SERVICE_NAMES = ('Angstrom', 'Bohr', 'Curie', 'Dyson', 'Einstein', 'Feynman',
                   'Galilei')
 _EMPTY_EXTENSIONS_SYMBOL_NAME = 'grpc.testing.proto2.EmptyWithExtensions'
-_EMPTY_EXTENSIONS_NUMBERS = (124, 125, 126, 127, 128,)
+_EMPTY_EXTENSIONS_NUMBERS = (
+    124,
+    125,
+    126,
+    127,
+    128,
+)
 
 
 def _file_descriptor_to_proto(descriptor):
@@ -46,8 +51,7 @@
 class ReflectionServicerTest(unittest.TestCase):
 
     def setUp(self):
-        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        self._server = grpc.server(server_pool)
+        self._server = test_common.test_server()
         reflection.enable_server_reflection(_SERVICE_NAMES, self._server)
         port = self._server.add_insecure_port('[::]:0')
         self._server.start()
@@ -56,10 +60,12 @@
         self._stub = reflection_pb2_grpc.ServerReflectionStub(channel)
 
     def testFileByName(self):
-        requests = (reflection_pb2.ServerReflectionRequest(
-            file_by_filename=_EMPTY_PROTO_FILE_NAME),
-                    reflection_pb2.ServerReflectionRequest(
-                        file_by_filename='i-donut-exist'),)
+        requests = (
+            reflection_pb2.ServerReflectionRequest(
+                file_by_filename=_EMPTY_PROTO_FILE_NAME),
+            reflection_pb2.ServerReflectionRequest(
+                file_by_filename='i-donut-exist'),
+        )
         responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
         expected_responses = (
             reflection_pb2.ServerReflectionResponse(
@@ -72,14 +78,18 @@
                 error_response=reflection_pb2.ErrorResponse(
                     error_code=grpc.StatusCode.NOT_FOUND.value[0],
                     error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
-                )),)
+                )),
+        )
         self.assertSequenceEqual(expected_responses, responses)
 
     def testFileBySymbol(self):
-        requests = (reflection_pb2.ServerReflectionRequest(
-            file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME
-        ), reflection_pb2.ServerReflectionRequest(
-            file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo'),)
+        requests = (
+            reflection_pb2.ServerReflectionRequest(
+                file_containing_symbol=_EMPTY_PROTO_SYMBOL_NAME),
+            reflection_pb2.ServerReflectionRequest(
+                file_containing_symbol='i.donut.exist.co.uk.org.net.me.name.foo'
+            ),
+        )
         responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
         expected_responses = (
             reflection_pb2.ServerReflectionResponse(
@@ -92,18 +102,23 @@
                 error_response=reflection_pb2.ErrorResponse(
                     error_code=grpc.StatusCode.NOT_FOUND.value[0],
                     error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
-                )),)
+                )),
+        )
         self.assertSequenceEqual(expected_responses, responses)
 
     def testFileContainingExtension(self):
-        requests = (reflection_pb2.ServerReflectionRequest(
-            file_containing_extension=reflection_pb2.ExtensionRequest(
-                containing_type=_EMPTY_EXTENSIONS_SYMBOL_NAME,
-                extension_number=125,),
-        ), reflection_pb2.ServerReflectionRequest(
-            file_containing_extension=reflection_pb2.ExtensionRequest(
-                containing_type='i.donut.exist.co.uk.org.net.me.name.foo',
-                extension_number=55,),),)
+        requests = (
+            reflection_pb2.ServerReflectionRequest(
+                file_containing_extension=reflection_pb2.ExtensionRequest(
+                    containing_type=_EMPTY_EXTENSIONS_SYMBOL_NAME,
+                    extension_number=125,
+                ),),
+            reflection_pb2.ServerReflectionRequest(
+                file_containing_extension=reflection_pb2.ExtensionRequest(
+                    containing_type='i.donut.exist.co.uk.org.net.me.name.foo',
+                    extension_number=55,
+                ),),
+        )
         responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
         expected_responses = (
             reflection_pb2.ServerReflectionResponse(
@@ -116,14 +131,18 @@
                 error_response=reflection_pb2.ErrorResponse(
                     error_code=grpc.StatusCode.NOT_FOUND.value[0],
                     error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
-                )),)
+                )),
+        )
         self.assertSequenceEqual(expected_responses, responses)
 
     def testExtensionNumbersOfType(self):
-        requests = (reflection_pb2.ServerReflectionRequest(
-            all_extension_numbers_of_type=_EMPTY_EXTENSIONS_SYMBOL_NAME
-        ), reflection_pb2.ServerReflectionRequest(
-            all_extension_numbers_of_type='i.donut.exist.co.uk.net.name.foo'),)
+        requests = (
+            reflection_pb2.ServerReflectionRequest(
+                all_extension_numbers_of_type=_EMPTY_EXTENSIONS_SYMBOL_NAME),
+            reflection_pb2.ServerReflectionRequest(
+                all_extension_numbers_of_type='i.donut.exist.co.uk.net.name.foo'
+            ),
+        )
         responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
         expected_responses = (
             reflection_pb2.ServerReflectionResponse(
@@ -137,12 +156,12 @@
                 error_response=reflection_pb2.ErrorResponse(
                     error_code=grpc.StatusCode.NOT_FOUND.value[0],
                     error_message=grpc.StatusCode.NOT_FOUND.value[1].encode(),
-                )),)
+                )),
+        )
         self.assertSequenceEqual(expected_responses, responses)
 
     def testListServices(self):
-        requests = (reflection_pb2.ServerReflectionRequest(
-            list_services='',),)
+        requests = (reflection_pb2.ServerReflectionRequest(list_services='',),)
         responses = tuple(self._stub.ServerReflectionInfo(iter(requests)))
         expected_responses = (reflection_pb2.ServerReflectionResponse(
             valid_host='',
diff --git a/src/python/grpcio_tests/tests/stress/client.py b/src/python/grpcio_tests/tests/stress/client.py
index 40caa39..41f2e1b 100644
--- a/src/python/grpcio_tests/tests/stress/client.py
+++ b/src/python/grpcio_tests/tests/stress/client.py
@@ -102,8 +102,10 @@
             root_certificates = None  # will load default roots.
         channel_credentials = grpc.ssl_channel_credentials(
             root_certificates=root_certificates)
-        options = (('grpc.ssl_target_name_override',
-                    args.server_host_override,),)
+        options = ((
+            'grpc.ssl_target_name_override',
+            args.server_host_override,
+        ),)
         channel = grpc.secure_channel(
             target, channel_credentials, options=options)
     else:
diff --git a/src/python/grpcio_tests/tests/testing/_client_application.py b/src/python/grpcio_tests/tests/testing/_client_application.py
index aff32fb..7d0d74c 100644
--- a/src/python/grpcio_tests/tests/testing/_client_application.py
+++ b/src/python/grpcio_tests/tests/testing/_client_application.py
@@ -235,8 +235,8 @@
         elif scenario is Scenario.INFINITE_REQUEST_STREAM:
             return _run_infinite_request_stream(stub)
     except grpc.RpcError as rpc_error:
-        return Outcome(Outcome.Kind.RPC_ERROR,
-                       rpc_error.code(), rpc_error.details())
+        return Outcome(Outcome.Kind.RPC_ERROR, rpc_error.code(),
+                       rpc_error.details())
 
 
 _IMPLEMENTATIONS = {
@@ -256,5 +256,5 @@
     try:
         return _IMPLEMENTATIONS[scenario](stub)
     except grpc.RpcError as rpc_error:
-        return Outcome(Outcome.Kind.RPC_ERROR,
-                       rpc_error.code(), rpc_error.details())
+        return Outcome(Outcome.Kind.RPC_ERROR, rpc_error.code(),
+                       rpc_error.details())
diff --git a/src/python/grpcio_tests/tests/testing/_client_test.py b/src/python/grpcio_tests/tests/testing/_client_test.py
index 172f386..5b051c3 100644
--- a/src/python/grpcio_tests/tests/testing/_client_test.py
+++ b/src/python/grpcio_tests/tests/testing/_client_test.py
@@ -193,8 +193,10 @@
             rpc.take_request()
             rpc.take_request()
             rpc.requests_closed()
-            rpc.send_initial_metadata((
-                ('my_metadata_key', 'My Metadata Value!',),))
+            rpc.send_initial_metadata(((
+                'my_metadata_key',
+                'My Metadata Value!',
+            ),))
         for rpc in rpcs[:-1]:
             rpc.terminate(_application_common.STREAM_UNARY_RESPONSE, (),
                           grpc.StatusCode.OK, '')
diff --git a/src/python/grpcio_tests/tests/testing/_server_application.py b/src/python/grpcio_tests/tests/testing/_server_application.py
index 06f09c8..02769ca 100644
--- a/src/python/grpcio_tests/tests/testing/_server_application.py
+++ b/src/python/grpcio_tests/tests/testing/_server_application.py
@@ -41,8 +41,10 @@
         yield services_pb2.Strange()
 
     def StreUn(self, request_iterator, context):
-        context.send_initial_metadata((
-            ('server_application_metadata_key', 'Hi there!',),))
+        context.send_initial_metadata(((
+            'server_application_metadata_key',
+            'Hi there!',
+        ),))
         for request in request_iterator:
             if request != _application_common.STREAM_UNARY_REQUEST:
                 context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
diff --git a/src/python/grpcio_tests/tests/testing/_server_test.py b/src/python/grpcio_tests/tests/testing/_server_test.py
index 7897bcc..4f4abd7 100644
--- a/src/python/grpcio_tests/tests/testing/_server_test.py
+++ b/src/python/grpcio_tests/tests/testing/_server_test.py
@@ -110,14 +110,19 @@
         second_termination = rpc.termination()
         third_termination = rpc.termination()
 
-        for later_initial_metadata in (second_initial_metadata,
-                                       third_initial_metadata,):
+        for later_initial_metadata in (
+                second_initial_metadata,
+                third_initial_metadata,
+        ):
             self.assertEqual(first_initial_metadata, later_initial_metadata)
         response = first_termination[0]
         terminal_metadata = first_termination[1]
         code = first_termination[2]
         details = first_termination[3]
-        for later_termination in (second_termination, third_termination,):
+        for later_termination in (
+                second_termination,
+                third_termination,
+        ):
             self.assertEqual(response, later_termination[0])
             self.assertEqual(terminal_metadata, later_termination[1])
             self.assertIs(code, later_termination[2])
diff --git a/src/python/grpcio_tests/tests/testing/_time_test.py b/src/python/grpcio_tests/tests/testing/_time_test.py
index 797394a..9dfe36f 100644
--- a/src/python/grpcio_tests/tests/testing/_time_test.py
+++ b/src/python/grpcio_tests/tests/testing/_time_test.py
@@ -105,8 +105,8 @@
                 test_event.set, _QUANTUM * (2 + random.random()))
         for _ in range(_MANY):
             background_noise_futures.append(
-                self._time.call_in(threading.Event().set, _QUANTUM * 1000 *
-                                   random.random()))
+                self._time.call_in(threading.Event().set,
+                                   _QUANTUM * 1000 * random.random()))
         self._time.sleep_for(_QUANTUM)
         cancelled = set()
         for test_event, test_future in possibly_cancelled_futures.items():
diff --git a/src/python/grpcio_tests/tests/unit/_api_test.py b/src/python/grpcio_tests/tests/unit/_api_test.py
index d6f4447..f6245be 100644
--- a/src/python/grpcio_tests/tests/unit/_api_test.py
+++ b/src/python/grpcio_tests/tests/unit/_api_test.py
@@ -26,28 +26,57 @@
 
     def testAll(self):
         expected_grpc_code_elements = (
-            'FutureTimeoutError', 'FutureCancelledError', 'Future',
-            'ChannelConnectivity', 'StatusCode', 'RpcError', 'RpcContext',
-            'Call', 'ChannelCredentials', 'CallCredentials',
-            'AuthMetadataContext', 'AuthMetadataPluginCallback',
-            'AuthMetadataPlugin', 'ServerCertificateConfiguration',
-            'ServerCredentials', 'UnaryUnaryMultiCallable',
-            'UnaryStreamMultiCallable', 'StreamUnaryMultiCallable',
-            'StreamStreamMultiCallable', 'UnaryUnaryClientInterceptor',
-            'UnaryStreamClientInterceptor', 'StreamUnaryClientInterceptor',
-            'StreamStreamClientInterceptor', 'Channel', 'ServicerContext',
-            'RpcMethodHandler', 'HandlerCallDetails', 'GenericRpcHandler',
-            'ServiceRpcHandler', 'Server', 'ServerInterceptor',
-            'unary_unary_rpc_method_handler', 'unary_stream_rpc_method_handler',
-            'stream_unary_rpc_method_handler', 'ClientCallDetails',
+            'FutureTimeoutError',
+            'FutureCancelledError',
+            'Future',
+            'ChannelConnectivity',
+            'StatusCode',
+            'RpcError',
+            'RpcContext',
+            'Call',
+            'ChannelCredentials',
+            'CallCredentials',
+            'AuthMetadataContext',
+            'AuthMetadataPluginCallback',
+            'AuthMetadataPlugin',
+            'ServerCertificateConfiguration',
+            'ServerCredentials',
+            'UnaryUnaryMultiCallable',
+            'UnaryStreamMultiCallable',
+            'StreamUnaryMultiCallable',
+            'StreamStreamMultiCallable',
+            'UnaryUnaryClientInterceptor',
+            'UnaryStreamClientInterceptor',
+            'StreamUnaryClientInterceptor',
+            'StreamStreamClientInterceptor',
+            'Channel',
+            'ServicerContext',
+            'RpcMethodHandler',
+            'HandlerCallDetails',
+            'GenericRpcHandler',
+            'ServiceRpcHandler',
+            'Server',
+            'ServerInterceptor',
+            'unary_unary_rpc_method_handler',
+            'unary_stream_rpc_method_handler',
+            'stream_unary_rpc_method_handler',
+            'ClientCallDetails',
             'stream_stream_rpc_method_handler',
-            'method_handlers_generic_handler', 'ssl_channel_credentials',
-            'metadata_call_credentials', 'access_token_call_credentials',
-            'composite_call_credentials', 'composite_channel_credentials',
-            'ssl_server_credentials', 'ssl_server_certificate_configuration',
-            'dynamic_ssl_server_credentials', 'channel_ready_future',
-            'insecure_channel', 'secure_channel', 'intercept_channel',
-            'server',)
+            'method_handlers_generic_handler',
+            'ssl_channel_credentials',
+            'metadata_call_credentials',
+            'access_token_call_credentials',
+            'composite_call_credentials',
+            'composite_channel_credentials',
+            'ssl_server_credentials',
+            'ssl_server_certificate_configuration',
+            'dynamic_ssl_server_credentials',
+            'channel_ready_future',
+            'insecure_channel',
+            'secure_channel',
+            'intercept_channel',
+            'server',
+        )
 
         six.assertCountEqual(self, expected_grpc_code_elements,
                              _from_grpc_import_star.GRPC_ELEMENTS)
@@ -56,12 +85,13 @@
 class ChannelConnectivityTest(unittest.TestCase):
 
     def testChannelConnectivity(self):
-        self.assertSequenceEqual(
-            (grpc.ChannelConnectivity.IDLE, grpc.ChannelConnectivity.CONNECTING,
-             grpc.ChannelConnectivity.READY,
-             grpc.ChannelConnectivity.TRANSIENT_FAILURE,
-             grpc.ChannelConnectivity.SHUTDOWN,),
-            tuple(grpc.ChannelConnectivity))
+        self.assertSequenceEqual((
+            grpc.ChannelConnectivity.IDLE,
+            grpc.ChannelConnectivity.CONNECTING,
+            grpc.ChannelConnectivity.READY,
+            grpc.ChannelConnectivity.TRANSIENT_FAILURE,
+            grpc.ChannelConnectivity.SHUTDOWN,
+        ), tuple(grpc.ChannelConnectivity))
 
 
 class ChannelTest(unittest.TestCase):
diff --git a/src/python/grpcio_tests/tests/unit/_auth_context_test.py b/src/python/grpcio_tests/tests/unit/_auth_context_test.py
index c6a0a23..468869a 100644
--- a/src/python/grpcio_tests/tests/unit/_auth_context_test.py
+++ b/src/python/grpcio_tests/tests/unit/_auth_context_test.py
@@ -18,11 +18,9 @@
 
 import grpc
 from grpc import _channel
-from grpc.framework.foundation import logging_pool
 import six
 
 from tests.unit import test_common
-from tests.unit.framework.common import test_constants
 from tests.unit import resources
 
 _REQUEST = b'\x00\x00\x00'
@@ -31,8 +29,12 @@
 _UNARY_UNARY = '/test/UnaryUnary'
 
 _SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
-_CLIENT_IDS = (b'*.test.google.fr', b'waterzooi.test.google.be',
-               b'*.test.youtube.com', b'192.168.1.3',)
+_CLIENT_IDS = (
+    b'*.test.google.fr',
+    b'waterzooi.test.google.be',
+    b'*.test.youtube.com',
+    b'192.168.1.3',
+)
 _ID = 'id'
 _ID_KEY = 'id_key'
 _AUTH_CTX = 'auth_ctx'
@@ -41,7 +43,10 @@
 _CERTIFICATE_CHAIN = resources.certificate_chain()
 _TEST_ROOT_CERTIFICATES = resources.test_root_certificates()
 _SERVER_CERTS = ((_PRIVATE_KEY, _CERTIFICATE_CHAIN),)
-_PROPERTY_OPTIONS = (('grpc.ssl_target_name_override', _SERVER_HOST_OVERRIDE,),)
+_PROPERTY_OPTIONS = ((
+    'grpc.ssl_target_name_override',
+    _SERVER_HOST_OVERRIDE,
+),)
 
 
 def handle_unary_unary(request, servicer_context):
@@ -55,12 +60,12 @@
 class AuthContextTest(unittest.TestCase):
 
     def testInsecure(self):
-        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
         handler = grpc.method_handlers_generic_handler('test', {
             'UnaryUnary':
             grpc.unary_unary_rpc_method_handler(handle_unary_unary)
         })
-        server = grpc.server(server_pool, (handler,))
+        server = test_common.test_server()
+        server.add_generic_rpc_handlers((handler,))
         port = server.add_insecure_port('[::]:0')
         server.start()
 
@@ -74,12 +79,12 @@
         self.assertDictEqual({}, auth_data[_AUTH_CTX])
 
     def testSecureNoCert(self):
-        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
         handler = grpc.method_handlers_generic_handler('test', {
             'UnaryUnary':
             grpc.unary_unary_rpc_method_handler(handle_unary_unary)
         })
-        server = grpc.server(server_pool, (handler,))
+        server = test_common.test_server()
+        server.add_generic_rpc_handlers((handler,))
         server_cred = grpc.ssl_server_credentials(_SERVER_CERTS)
         port = server.add_secure_port('[::]:0', server_cred)
         server.start()
@@ -101,12 +106,12 @@
         }, auth_data[_AUTH_CTX])
 
     def testSecureClientCert(self):
-        server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
         handler = grpc.method_handlers_generic_handler('test', {
             'UnaryUnary':
             grpc.unary_unary_rpc_method_handler(handle_unary_unary)
         })
-        server = grpc.server(server_pool, (handler,))
+        server = test_common.test_server()
+        server.add_generic_rpc_handlers((handler,))
         server_cred = grpc.ssl_server_credentials(
             _SERVER_CERTS,
             root_certificates=_TEST_ROOT_CERTIFICATES,
diff --git a/src/python/grpcio_tests/tests/unit/_channel_args_test.py b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
index 0a6b512..1a2d2c0 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_args_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_args_test.py
@@ -24,8 +24,13 @@
         return 123456
 
 
-TEST_CHANNEL_ARGS = (('arg1', b'bytes_val'), ('arg2', 'str_val'), ('arg3', 1),
-                     (b'arg4', 'str_val'), ('arg6', TestPointerWrapper()),)
+TEST_CHANNEL_ARGS = (
+    ('arg1', b'bytes_val'),
+    ('arg2', 'str_val'),
+    ('arg3', 1),
+    (b'arg4', 'str_val'),
+    ('arg6', TestPointerWrapper()),
+)
 
 
 class ChannelArgsTest(unittest.TestCase):
diff --git a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
index f8c6127..f9eb001 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_connectivity_test.py
@@ -83,7 +83,7 @@
 
     def test_immediately_connectable_channel_connectivity(self):
         thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
-        server = grpc.server(thread_pool)
+        server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),))
         port = server.add_insecure_port('[::]:0')
         server.start()
         first_callback = _Callback()
@@ -125,7 +125,7 @@
 
     def test_reachable_then_unreachable_channel_connectivity(self):
         thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
-        server = grpc.server(thread_pool)
+        server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),))
         port = server.add_insecure_port('[::]:0')
         server.start()
         callback = _Callback()
diff --git a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
index bdd2d86..30b4860 100644
--- a/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
+++ b/src/python/grpcio_tests/tests/unit/_channel_ready_future_test.py
@@ -61,7 +61,7 @@
 
     def test_immediately_connectable_channel_connectivity(self):
         thread_pool = _thread_pool.RecordingThreadPool(max_workers=None)
-        server = grpc.server(thread_pool)
+        server = grpc.server(thread_pool, options=(('grpc.so_reuseport', 0),))
         port = server.add_insecure_port('[::]:0')
         server.start()
         channel = grpc.insecure_channel('localhost:{}'.format(port))
diff --git a/src/python/grpcio_tests/tests/unit/_compression_test.py b/src/python/grpcio_tests/tests/unit/_compression_test.py
index e576a5a..7550cd3 100644
--- a/src/python/grpcio_tests/tests/unit/_compression_test.py
+++ b/src/python/grpcio_tests/tests/unit/_compression_test.py
@@ -17,7 +17,6 @@
 
 import grpc
 from grpc import _grpcio_metadata
-from grpc.framework.foundation import logging_pool
 
 from tests.unit import test_common
 from tests.unit.framework.common import test_constants
@@ -27,16 +26,16 @@
 
 
 def handle_unary(request, servicer_context):
-    servicer_context.send_initial_metadata(
-        [('grpc-internal-encoding-request', 'gzip')])
+    servicer_context.send_initial_metadata([('grpc-internal-encoding-request',
+                                             'gzip')])
     return request
 
 
 def handle_stream(request_iterator, servicer_context):
     # TODO(issue:#6891) We should be able to remove this loop,
     # and replace with return; yield
-    servicer_context.send_initial_metadata(
-        [('grpc-internal-encoding-request', 'gzip')])
+    servicer_context.send_initial_metadata([('grpc-internal-encoding-request',
+                                             'gzip')])
     for request in request_iterator:
         yield request
 
@@ -72,9 +71,8 @@
 class CompressionTest(unittest.TestCase):
 
     def setUp(self):
-        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        self._server = grpc.server(
-            self._server_pool, handlers=(_GenericHandler(),))
+        self._server = test_common.test_server()
+        self._server.add_generic_rpc_handlers((_GenericHandler(),))
         self._port = self._server.add_insecure_port('[::]:0')
         self._server.start()
 
diff --git a/src/python/grpcio_tests/tests/unit/_credentials_test.py b/src/python/grpcio_tests/tests/unit/_credentials_test.py
index 097898b..f487fe6 100644
--- a/src/python/grpcio_tests/tests/unit/_credentials_test.py
+++ b/src/python/grpcio_tests/tests/unit/_credentials_test.py
@@ -26,8 +26,8 @@
         third = grpc.access_token_call_credentials('ghi')
 
         first_and_second = grpc.composite_call_credentials(first, second)
-        first_second_and_third = grpc.composite_call_credentials(first, second,
-                                                                 third)
+        first_second_and_third = grpc.composite_call_credentials(
+            first, second, third)
 
         self.assertIsInstance(first_and_second, grpc.CallCredentials)
         self.assertIsInstance(first_second_and_third, grpc.CallCredentials)
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
index 458b4fe..b81d6fb 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_cancel_many_calls_test.py
@@ -53,7 +53,7 @@
         self._state = state
         self._lock = threading.Lock()
         self._completion_queue = completion_queue
-        self._call = rpc_event.operation_call
+        self._call = rpc_event.call
 
     def __call__(self):
         with self._state.condition:
@@ -81,7 +81,8 @@
                     cygrpc.SendMessageOperation(b'\x79\x57', _EMPTY_FLAGS),
                     cygrpc.SendStatusFromServerOperation(
                         _EMPTY_METADATA, cygrpc.StatusCode.ok, b'test details!',
-                        _EMPTY_FLAGS),)
+                        _EMPTY_FLAGS),
+                )
                 self._call.start_server_batch(operations,
                                               _SERVER_COMPLETE_CALL_TAG)
             self._completion_queue.poll()
@@ -141,7 +142,8 @@
             test_constants.THREAD_CONCURRENCY)
 
         server_completion_queue = cygrpc.CompletionQueue()
-        server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         server.register_completion_queue(server_completion_queue)
         port = server.add_http2_port(b'[::]:0')
         server.start()
@@ -150,8 +152,12 @@
 
         state = _State()
 
-        server_thread_args = (state, server, server_completion_queue,
-                              server_thread_pool,)
+        server_thread_args = (
+            state,
+            server,
+            server_completion_queue,
+            server_thread_pool,
+        )
         server_thread = threading.Thread(target=_serve, args=server_thread_args)
         server_thread.start()
 
@@ -175,7 +181,8 @@
                     cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
                     cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
                     cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
-                    cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),)
+                    cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
+                )
                 tag = 'client_complete_call_{0:04d}_tag'.format(index)
                 client_call.start_client_batch(operations, tag)
                 client_due.add(tag)
@@ -192,8 +199,8 @@
                     state.condition.notify_all()
                     break
 
-        client_driver.events(test_constants.RPC_CONCURRENCY *
-                             _SUCCESS_CALL_FRACTION)
+        client_driver.events(
+            test_constants.RPC_CONCURRENCY * _SUCCESS_CALL_FRACTION)
         with client_condition:
             for client_call in client_calls:
                 client_call.cancel()
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
index 1d57ea7..4eeb34b 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_channel_test.py
@@ -56,7 +56,10 @@
 
     def test_single_channel_lonely_connectivity(self):
         channel, completion_queue = _channel_and_completion_queue()
-        _in_parallel(_connectivity_loop, (channel, completion_queue,))
+        _in_parallel(_connectivity_loop, (
+            channel,
+            completion_queue,
+        ))
         completion_queue.shutdown()
 
     def test_multiple_channels_lonely_connectivity(self):
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_common.py b/src/python/grpcio_tests/tests/unit/_cython/_common.py
index 96f0f15..ffd226f 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_common.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_common.py
@@ -23,14 +23,20 @@
 INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
 EMPTY_FLAGS = 0
 
-INVOCATION_METADATA = (('client-md-key', 'client-md-key'),
-                       ('client-md-key-bin', b'\x00\x01' * 3000),)
+INVOCATION_METADATA = (
+    ('client-md-key', 'client-md-key'),
+    ('client-md-key-bin', b'\x00\x01' * 3000),
+)
 
-INITIAL_METADATA = (('server-initial-md-key', 'server-initial-md-value'),
-                    ('server-initial-md-key-bin', b'\x00\x02' * 3000),)
+INITIAL_METADATA = (
+    ('server-initial-md-key', 'server-initial-md-value'),
+    ('server-initial-md-key-bin', b'\x00\x02' * 3000),
+)
 
-TRAILING_METADATA = (('server-trailing-md-key', 'server-trailing-md-value'),
-                     ('server-trailing-md-key-bin', b'\x00\x03' * 3000),)
+TRAILING_METADATA = (
+    ('server-trailing-md-key', 'server-trailing-md-value'),
+    ('server-trailing-md-key-bin', b'\x00\x03' * 3000),
+)
 
 
 class QueueDriver(object):
@@ -76,7 +82,10 @@
 
 class OperationResult(
         collections.namedtuple('OperationResult', (
-            'start_batch_result', 'completion_type', 'success',))):
+            'start_batch_result',
+            'completion_type',
+            'success',
+        ))):
     pass
 
 
@@ -88,7 +97,8 @@
 
     def setUp(self):
         self.server_completion_queue = cygrpc.CompletionQueue()
-        self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        self.server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         self.server.register_completion_queue(self.server_completion_queue)
         port = self.server.add_http2_port(b'[::]:0')
         self.server.start()
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
index 41291cc..4ef4ad3 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_server_completion_queue_per_call_test.py
@@ -72,7 +72,7 @@
 
         with server_call_condition:
             server_send_initial_metadata_start_batch_result = (
-                server_request_call_event.operation_call.start_server_batch([
+                server_request_call_event.call.start_server_batch([
                     cygrpc.SendInitialMetadataOperation(
                         _common.INITIAL_METADATA, _common.EMPTY_FLAGS),
                 ], server_send_initial_metadata_tag))
@@ -84,7 +84,7 @@
 
         with server_call_condition:
             server_complete_rpc_start_batch_result = (
-                server_request_call_event.operation_call.start_server_batch([
+                server_request_call_event.call.start_server_batch([
                     cygrpc.ReceiveCloseOnServerOperation(_common.EMPTY_FLAGS),
                     cygrpc.SendStatusFromServerOperation(
                         _common.TRAILING_METADATA, cygrpc.StatusCode.ok,
@@ -101,27 +101,29 @@
         client_complete_rpc_event = self.client_driver.event_with_tag(
             client_complete_rpc_tag)
 
-        return (_common.OperationResult(server_request_call_start_batch_result,
-                                        server_request_call_event.type,
-                                        server_request_call_event.success),
-                _common.OperationResult(
-                    client_receive_initial_metadata_start_batch_result,
-                    client_receive_initial_metadata_event.type,
-                    client_receive_initial_metadata_event.success),
-                _common.OperationResult(client_complete_rpc_start_batch_result,
-                                        client_complete_rpc_event.type,
-                                        client_complete_rpc_event.success),
-                _common.OperationResult(
-                    server_send_initial_metadata_start_batch_result,
-                    server_send_initial_metadata_event.type,
-                    server_send_initial_metadata_event.success),
-                _common.OperationResult(server_complete_rpc_start_batch_result,
-                                        server_complete_rpc_event.type,
-                                        server_complete_rpc_event.success),)
+        return (
+            _common.OperationResult(server_request_call_start_batch_result,
+                                    server_request_call_event.completion_type,
+                                    server_request_call_event.success),
+            _common.OperationResult(
+                client_receive_initial_metadata_start_batch_result,
+                client_receive_initial_metadata_event.completion_type,
+                client_receive_initial_metadata_event.success),
+            _common.OperationResult(client_complete_rpc_start_batch_result,
+                                    client_complete_rpc_event.completion_type,
+                                    client_complete_rpc_event.success),
+            _common.OperationResult(
+                server_send_initial_metadata_start_batch_result,
+                server_send_initial_metadata_event.completion_type,
+                server_send_initial_metadata_event.success),
+            _common.OperationResult(server_complete_rpc_start_batch_result,
+                                    server_complete_rpc_event.completion_type,
+                                    server_complete_rpc_event.success),
+        )
 
     def test_rpcs(self):
-        expecteds = [(_common.SUCCESSFUL_OPERATION_RESULT,) *
-                     5] * _common.RPC_COUNT
+        expecteds = [(
+            _common.SUCCESSFUL_OPERATION_RESULT,) * 5] * _common.RPC_COUNT
         actuallys = _common.execute_many_times(self._do_rpcs)
         self.assertSequenceEqual(expecteds, actuallys)
 
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
index b429a20..85395c9 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_no_messages_single_server_completion_queue_test.py
@@ -63,7 +63,7 @@
 
         with self.server_condition:
             server_send_initial_metadata_start_batch_result = (
-                server_request_call_event.operation_call.start_server_batch([
+                server_request_call_event.call.start_server_batch([
                     cygrpc.SendInitialMetadataOperation(
                         _common.INITIAL_METADATA, _common.EMPTY_FLAGS),
                 ], server_send_initial_metadata_tag))
@@ -75,7 +75,7 @@
 
         with self.server_condition:
             server_complete_rpc_start_batch_result = (
-                server_request_call_event.operation_call.start_server_batch([
+                server_request_call_event.call.start_server_batch([
                     cygrpc.ReceiveCloseOnServerOperation(_common.EMPTY_FLAGS),
                     cygrpc.SendStatusFromServerOperation(
                         _common.TRAILING_METADATA, cygrpc.StatusCode.ok,
@@ -92,27 +92,29 @@
         client_complete_rpc_event = self.client_driver.event_with_tag(
             client_complete_rpc_tag)
 
-        return (_common.OperationResult(server_request_call_start_batch_result,
-                                        server_request_call_event.type,
-                                        server_request_call_event.success),
-                _common.OperationResult(
-                    client_receive_initial_metadata_start_batch_result,
-                    client_receive_initial_metadata_event.type,
-                    client_receive_initial_metadata_event.success),
-                _common.OperationResult(client_complete_rpc_start_batch_result,
-                                        client_complete_rpc_event.type,
-                                        client_complete_rpc_event.success),
-                _common.OperationResult(
-                    server_send_initial_metadata_start_batch_result,
-                    server_send_initial_metadata_event.type,
-                    server_send_initial_metadata_event.success),
-                _common.OperationResult(server_complete_rpc_start_batch_result,
-                                        server_complete_rpc_event.type,
-                                        server_complete_rpc_event.success),)
+        return (
+            _common.OperationResult(server_request_call_start_batch_result,
+                                    server_request_call_event.completion_type,
+                                    server_request_call_event.success),
+            _common.OperationResult(
+                client_receive_initial_metadata_start_batch_result,
+                client_receive_initial_metadata_event.completion_type,
+                client_receive_initial_metadata_event.success),
+            _common.OperationResult(client_complete_rpc_start_batch_result,
+                                    client_complete_rpc_event.completion_type,
+                                    client_complete_rpc_event.success),
+            _common.OperationResult(
+                server_send_initial_metadata_start_batch_result,
+                server_send_initial_metadata_event.completion_type,
+                server_send_initial_metadata_event.success),
+            _common.OperationResult(server_complete_rpc_start_batch_result,
+                                    server_complete_rpc_event.completion_type,
+                                    server_complete_rpc_event.success),
+        )
 
     def test_rpcs(self):
-        expecteds = [(_common.SUCCESSFUL_OPERATION_RESULT,) *
-                     5] * _common.RPC_COUNT
+        expecteds = [(
+            _common.SUCCESSFUL_OPERATION_RESULT,) * 5] * _common.RPC_COUNT
         actuallys = _common.execute_many_times(self._do_rpcs)
         self.assertSequenceEqual(expecteds, actuallys)
 
diff --git a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
index a6d92f2..82ef25b 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/_read_some_but_not_all_responses_test.py
@@ -112,7 +112,8 @@
 
     def testReadSomeButNotAllResponses(self):
         server_completion_queue = cygrpc.CompletionQueue()
-        server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         server.register_completion_queue(server_completion_queue)
         port = server.add_http2_port(b'[::]:0')
         server.start()
@@ -136,9 +137,12 @@
         server_send_first_message_tag = 'server_send_first_message_tag'
         server_send_second_message_tag = 'server_send_second_message_tag'
         server_complete_rpc_tag = 'server_complete_rpc_tag'
-        server_call_due = set(
-            (server_send_initial_metadata_tag, server_send_first_message_tag,
-             server_send_second_message_tag, server_complete_rpc_tag,))
+        server_call_due = set((
+            server_send_initial_metadata_tag,
+            server_send_first_message_tag,
+            server_send_second_message_tag,
+            server_complete_rpc_tag,
+        ))
         server_call_completion_queue = cygrpc.CompletionQueue()
         server_call_driver = _QueueDriver(server_call_condition,
                                           server_call_completion_queue,
@@ -174,12 +178,12 @@
 
         with server_call_condition:
             server_send_initial_metadata_start_batch_result = (
-                server_rpc_event.operation_call.start_server_batch([
+                server_rpc_event.call.start_server_batch([
                     cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
                                                         _EMPTY_FLAGS),
                 ], server_send_initial_metadata_tag))
             server_send_first_message_start_batch_result = (
-                server_rpc_event.operation_call.start_server_batch([
+                server_rpc_event.call.start_server_batch([
                     cygrpc.SendMessageOperation(b'\x07', _EMPTY_FLAGS),
                 ], server_send_first_message_tag))
         server_send_initial_metadata_event = server_call_driver.event_with_tag(
@@ -188,11 +192,11 @@
             server_send_first_message_tag)
         with server_call_condition:
             server_send_second_message_start_batch_result = (
-                server_rpc_event.operation_call.start_server_batch([
+                server_rpc_event.call.start_server_batch([
                     cygrpc.SendMessageOperation(b'\x07', _EMPTY_FLAGS),
                 ], server_send_second_message_tag))
             server_complete_rpc_start_batch_result = (
-                server_rpc_event.operation_call.start_server_batch([
+                server_rpc_event.call.start_server_batch([
                     cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
                     cygrpc.SendStatusFromServerOperation(
                         (), cygrpc.StatusCode.ok, b'test details',
@@ -231,9 +235,8 @@
         self.assertEqual(cygrpc.CallError.ok, client_call_cancel_result)
         self.assertIs(server_rpc_tag, server_rpc_event.tag)
         self.assertEqual(cygrpc.CompletionType.operation_complete,
-                         server_rpc_event.type)
-        self.assertIsInstance(server_rpc_event.operation_call, cygrpc.Call)
-        self.assertEqual(0, len(server_rpc_event.batch_operations))
+                         server_rpc_event.completion_type)
+        self.assertIsInstance(server_rpc_event.call, cygrpc.Call)
 
 
 if __name__ == '__main__':
diff --git a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
index 002fb9b..5f9b74b 100644
--- a/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_cython/cygrpc_test.py
@@ -29,8 +29,10 @@
 
 
 def _metadata_plugin(context, callback):
-    callback(((_CALL_CREDENTIALS_METADATA_KEY,
-               _CALL_CREDENTIALS_METADATA_VALUE,),), cygrpc.StatusCode.ok, b'')
+    callback(((
+        _CALL_CREDENTIALS_METADATA_KEY,
+        _CALL_CREDENTIALS_METADATA_VALUE,
+    ),), cygrpc.StatusCode.ok, b'')
 
 
 class TypeSmokeTest(unittest.TestCase):
@@ -55,7 +57,8 @@
         del completion_queue
 
     def testServerUpDown(self):
-        server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         del server
 
     def testChannelUpDown(self):
@@ -67,7 +70,8 @@
                                              b'test plugin name!')
 
     def testServerStartNoExplicitShutdown(self):
-        server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         completion_queue = cygrpc.CompletionQueue()
         server.register_completion_queue(completion_queue)
         port = server.add_http2_port(b'[::]:0')
@@ -77,14 +81,16 @@
 
     def testServerStartShutdown(self):
         completion_queue = cygrpc.CompletionQueue()
-        server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         server.add_http2_port(b'[::]:0')
         server.register_completion_queue(completion_queue)
         server.start()
         shutdown_tag = object()
         server.shutdown(completion_queue, shutdown_tag)
         event = completion_queue.poll()
-        self.assertEqual(cygrpc.CompletionType.operation_complete, event.type)
+        self.assertEqual(cygrpc.CompletionType.operation_complete,
+                         event.completion_type)
         self.assertIs(shutdown_tag, event.tag)
         del server
         del completion_queue
@@ -94,7 +100,8 @@
 
     def setUpMixin(self, server_credentials, client_credentials, host_override):
         self.server_completion_queue = cygrpc.CompletionQueue()
-        self.server = cygrpc.Server(cygrpc.ChannelArgs([]))
+        self.server = cygrpc.Server(
+            cygrpc.ChannelArgs([cygrpc.ChannelArg(b'grpc.so_reuseport', 0)]))
         self.server.register_completion_queue(self.server_completion_queue)
         if server_credentials:
             self.port = self.server.add_http2_port(b'[::]:0',
@@ -108,13 +115,12 @@
                 cygrpc.ChannelArg(cygrpc.ChannelArgKey.ssl_target_name_override,
                                   host_override)
             ])
-            self.client_channel = cygrpc.Channel(
-                'localhost:{}'.format(self.port).encode(),
-                client_channel_arguments, client_credentials)
+            self.client_channel = cygrpc.Channel('localhost:{}'.format(
+                self.port).encode(), client_channel_arguments,
+                                                 client_credentials)
         else:
-            self.client_channel = cygrpc.Channel(
-                'localhost:{}'.format(self.port).encode(),
-                cygrpc.ChannelArgs([]))
+            self.client_channel = cygrpc.Channel('localhost:{}'.format(
+                self.port).encode(), cygrpc.ChannelArgs([]))
         if host_override:
             self.host_argument = None  # default host
             self.expected_host = host_override
@@ -143,12 +149,12 @@
                 self.assertEqual(cygrpc.CallError.ok, call_result)
                 event = queue.poll(deadline)
                 self.assertEqual(cygrpc.CompletionType.operation_complete,
-                                 event.type)
+                                 event.completion_type)
                 self.assertTrue(event.success)
                 self.assertIs(tag, event.tag)
             except Exception as error:
-                raise Exception(
-                    "Error in '{}': {}".format(description, error.message))
+                raise Exception("Error in '{}': {}".format(
+                    description, error.message))
             return event
 
         return test_utilities.SimpleFuture(performer)
@@ -184,8 +190,15 @@
             None, 0, self.client_completion_queue, METHOD, self.host_argument,
             cygrpc_deadline)
         client_initial_metadata = (
-            (CLIENT_METADATA_ASCII_KEY, CLIENT_METADATA_ASCII_VALUE,),
-            (CLIENT_METADATA_BIN_KEY, CLIENT_METADATA_BIN_VALUE,),)
+            (
+                CLIENT_METADATA_ASCII_KEY,
+                CLIENT_METADATA_ASCII_VALUE,
+            ),
+            (
+                CLIENT_METADATA_BIN_KEY,
+                CLIENT_METADATA_BIN_VALUE,
+            ),
+        )
         client_start_batch_result = client_call.start_client_batch([
             cygrpc.SendInitialMetadataOperation(client_initial_metadata,
                                                 _EMPTY_FLAGS),
@@ -201,30 +214,32 @@
 
         request_event = self.server_completion_queue.poll(cygrpc_deadline)
         self.assertEqual(cygrpc.CompletionType.operation_complete,
-                         request_event.type)
-        self.assertIsInstance(request_event.operation_call, cygrpc.Call)
+                         request_event.completion_type)
+        self.assertIsInstance(request_event.call, cygrpc.Call)
         self.assertIs(server_request_tag, request_event.tag)
-        self.assertEqual(0, len(request_event.batch_operations))
         self.assertTrue(
             test_common.metadata_transmitted(client_initial_metadata,
-                                             request_event.request_metadata))
-        self.assertEqual(METHOD, request_event.request_call_details.method)
-        self.assertEqual(self.expected_host,
-                         request_event.request_call_details.host)
+                                             request_event.invocation_metadata))
+        self.assertEqual(METHOD, request_event.call_details.method)
+        self.assertEqual(self.expected_host, request_event.call_details.host)
         self.assertLess(
-            abs(DEADLINE - float(request_event.request_call_details.deadline)),
+            abs(DEADLINE - float(request_event.call_details.deadline)),
             DEADLINE_TOLERANCE)
 
         server_call_tag = object()
-        server_call = request_event.operation_call
-        server_initial_metadata = (
-            (SERVER_INITIAL_METADATA_KEY, SERVER_INITIAL_METADATA_VALUE,),)
-        server_trailing_metadata = (
-            (SERVER_TRAILING_METADATA_KEY, SERVER_TRAILING_METADATA_VALUE,),)
+        server_call = request_event.call
+        server_initial_metadata = ((
+            SERVER_INITIAL_METADATA_KEY,
+            SERVER_INITIAL_METADATA_VALUE,
+        ),)
+        server_trailing_metadata = ((
+            SERVER_TRAILING_METADATA_KEY,
+            SERVER_TRAILING_METADATA_VALUE,
+        ),)
         server_start_batch_result = server_call.start_server_batch([
-            cygrpc.SendInitialMetadataOperation(
-                server_initial_metadata,
-                _EMPTY_FLAGS), cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
+            cygrpc.SendInitialMetadataOperation(server_initial_metadata,
+                                                _EMPTY_FLAGS),
+            cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
             cygrpc.SendMessageOperation(RESPONSE, _EMPTY_FLAGS),
             cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
             cygrpc.SendStatusFromServerOperation(
@@ -318,7 +333,7 @@
         ], "Client prologue")
 
         request_event = self.server_completion_queue.poll(cygrpc_deadline)
-        server_call = request_event.operation_call
+        server_call = request_event.call
 
         def perform_server_operations(operations, description):
             return self._perform_operations(operations, server_call,
@@ -374,10 +389,11 @@
 class SecureServerSecureClient(unittest.TestCase, ServerClientMixin):
 
     def setUp(self):
-        server_credentials = cygrpc.server_credentials_ssl(None, [
-            cygrpc.SslPemKeyCertPair(resources.private_key(),
-                                     resources.certificate_chain())
-        ], False)
+        server_credentials = cygrpc.server_credentials_ssl(
+            None, [
+                cygrpc.SslPemKeyCertPair(resources.private_key(),
+                                         resources.certificate_chain())
+            ], False)
         client_credentials = cygrpc.SSLChannelCredentials(
             resources.test_root_certificates(), None, None)
         self.setUpMixin(server_credentials, client_credentials,
diff --git a/src/python/grpcio_tests/tests/unit/_empty_message_test.py b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
index 62077e7..c55ef61 100644
--- a/src/python/grpcio_tests/tests/unit/_empty_message_test.py
+++ b/src/python/grpcio_tests/tests/unit/_empty_message_test.py
@@ -15,8 +15,8 @@
 import unittest
 
 import grpc
-from grpc.framework.foundation import logging_pool
 
+from tests.unit import test_common
 from tests.unit.framework.common import test_constants
 
 _REQUEST = b''
@@ -87,9 +87,8 @@
 class EmptyMessageTest(unittest.TestCase):
 
     def setUp(self):
-        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        self._server = grpc.server(
-            self._server_pool, handlers=(_GenericHandler(),))
+        self._server = test_common.test_server()
+        self._server.add_generic_rpc_handlers((_GenericHandler(),))
         port = self._server.add_insecure_port('[::]:0')
         self._server.start()
         self._channel = grpc.insecure_channel('localhost:%d' % port)
@@ -107,13 +106,13 @@
                                  list(response_iterator))
 
     def testStreamUnary(self):
-        response = self._channel.stream_unary(_STREAM_UNARY)(
-            iter([_REQUEST] * test_constants.STREAM_LENGTH))
+        response = self._channel.stream_unary(_STREAM_UNARY)(iter(
+            [_REQUEST] * test_constants.STREAM_LENGTH))
         self.assertEqual(_RESPONSE, response)
 
     def testStreamStream(self):
-        response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
-            iter([_REQUEST] * test_constants.STREAM_LENGTH))
+        response_iterator = self._channel.stream_stream(_STREAM_STREAM)(iter(
+            [_REQUEST] * test_constants.STREAM_LENGTH))
         self.assertSequenceEqual([_RESPONSE] * test_constants.STREAM_LENGTH,
                                  list(response_iterator))
 
diff --git a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
index 7c13dab..0a0239a 100644
--- a/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
+++ b/src/python/grpcio_tests/tests/unit/_exit_scenarios.py
@@ -168,11 +168,11 @@
     args = parser.parse_args()
 
     if args.scenario == UNSTARTED_SERVER:
-        server = grpc.server(DaemonPool())
+        server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
         if args.wait_for_interrupt:
             time.sleep(WAIT_TIME)
     elif args.scenario == RUNNING_SERVER:
-        server = grpc.server(DaemonPool())
+        server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
         port = server.add_insecure_port('[::]:0')
         server.start()
         if args.wait_for_interrupt:
@@ -187,7 +187,7 @@
         if args.wait_for_interrupt:
             time.sleep(WAIT_TIME)
     elif args.scenario == POLL_CONNECTIVITY:
-        server = grpc.server(DaemonPool())
+        server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
         port = server.add_insecure_port('[::]:0')
         server.start()
         channel = grpc.insecure_channel('localhost:%d' % port)
@@ -201,7 +201,7 @@
 
     else:
         handler = GenericHandler()
-        server = grpc.server(DaemonPool())
+        server = grpc.server(DaemonPool(), options=(('grpc.so_reuseport', 0),))
         port = server.add_insecure_port('[::]:0')
         server.add_generic_rpc_handlers((handler,))
         server.start()
diff --git a/src/python/grpcio_tests/tests/unit/_interceptor_test.py b/src/python/grpcio_tests/tests/unit/_interceptor_test.py
index cf875ed..3d547b7 100644
--- a/src/python/grpcio_tests/tests/unit/_interceptor_test.py
+++ b/src/python/grpcio_tests/tests/unit/_interceptor_test.py
@@ -22,6 +22,7 @@
 import grpc
 from grpc.framework.foundation import logging_pool
 
+from tests.unit import test_common
 from tests.unit.framework.common import test_constants
 from tests.unit.framework.common import test_control
 
@@ -64,7 +65,10 @@
     def handle_unary_unary(self, request, servicer_context):
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         return request
 
     def handle_unary_stream(self, request, servicer_context):
@@ -73,7 +77,10 @@
             yield request
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
 
     def handle_stream_unary(self, request_iterator, servicer_context):
         if servicer_context is not None:
@@ -85,13 +92,19 @@
             response_elements.append(request)
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         return b''.join(response_elements)
 
     def handle_stream_stream(self, request_iterator, servicer_context):
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         for request in request_iterator:
             self._control.control()
             yield request
@@ -161,9 +174,10 @@
 
 
 class _ClientCallDetails(
-        collections.namedtuple('_ClientCallDetails',
-                               ('method', 'timeout', 'metadata',
-                                'credentials')), grpc.ClientCallDetails):
+        collections.namedtuple(
+            '_ClientCallDetails',
+            ('method', 'timeout', 'metadata', 'credentials')),
+        grpc.ClientCallDetails):
     pass
 
 
@@ -261,7 +275,10 @@
         metadata = []
         if client_call_details.metadata:
             metadata = list(client_call_details.metadata)
-        metadata.append((header, value,))
+        metadata.append((
+            header,
+            value,
+        ))
         client_call_details = _ClientCallDetails(
             client_call_details.method, client_call_details.timeout, metadata,
             client_call_details.credentials)
@@ -304,9 +321,12 @@
 
         self._server = grpc.server(
             self._server_pool,
-            interceptors=(_LoggingInterceptor('s1', self._record),
-                          conditional_interceptor,
-                          _LoggingInterceptor('s2', self._record),))
+            options=(('grpc.so_reuseport', 0),),
+            interceptors=(
+                _LoggingInterceptor('s1', self._record),
+                conditional_interceptor,
+                _LoggingInterceptor('s2', self._record),
+            ))
         port = self._server.add_insecure_port('[::]:0')
         self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
         self._server.start()
@@ -331,8 +351,8 @@
 
         interceptor = _wrap_request_iterator_stream_interceptor(triple)
         channel = grpc.intercept_channel(self._channel, interceptor)
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
 
         multi_callable = _stream_stream_multi_callable(channel)
         response_iterator = multi_callable(
@@ -363,8 +383,8 @@
         multi_callable = _unary_unary_multi_callable(defective_channel)
         call_future = multi_callable.future(
             request,
-            metadata=(
-                ('test', 'InterceptedUnaryRequestBlockingUnaryResponse'),))
+            metadata=(('test',
+                       'InterceptedUnaryRequestBlockingUnaryResponse'),))
 
         self.assertIsNotNone(call_future.exception())
         self.assertEqual(call_future.code(), grpc.StatusCode.INTERNAL)
@@ -372,12 +392,14 @@
     def testInterceptedHeaderManipulationWithServerSideVerification(self):
         request = b'\x07\x08'
 
-        channel = grpc.intercept_channel(
-            self._channel, _append_request_header_interceptor('secret', '42'))
-        channel = grpc.intercept_channel(
-            channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _append_request_header_interceptor(
+                                             'secret', '42'))
+        channel = grpc.intercept_channel(channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         self._record[:] = []
 
@@ -399,16 +421,17 @@
 
         self._record[:] = []
 
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _unary_unary_multi_callable(channel)
         multi_callable(
             request,
-            metadata=(
-                ('test', 'InterceptedUnaryRequestBlockingUnaryResponse'),))
+            metadata=(('test',
+                       'InterceptedUnaryRequestBlockingUnaryResponse'),))
 
         self.assertSequenceEqual(self._record, [
             'c1:intercept_unary_unary', 'c2:intercept_unary_unary',
@@ -418,10 +441,11 @@
     def testInterceptedUnaryRequestBlockingUnaryResponseWithCall(self):
         request = b'\x07\x08'
 
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         self._record[:] = []
 
@@ -441,10 +465,11 @@
         request = b'\x07\x08'
 
         self._record[:] = []
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _unary_unary_multi_callable(channel)
         response_future = multi_callable.future(
@@ -461,10 +486,11 @@
         request = b'\x37\x58'
 
         self._record[:] = []
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _unary_stream_multi_callable(channel)
         response_iterator = multi_callable(
@@ -478,21 +504,22 @@
         ])
 
     def testInterceptedStreamRequestBlockingUnaryResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         self._record[:] = []
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _stream_unary_multi_callable(channel)
         multi_callable(
             request_iterator,
-            metadata=(
-                ('test', 'InterceptedStreamRequestBlockingUnaryResponse'),))
+            metadata=(('test',
+                       'InterceptedStreamRequestBlockingUnaryResponse'),))
 
         self.assertSequenceEqual(self._record, [
             'c1:intercept_stream_unary', 'c2:intercept_stream_unary',
@@ -500,15 +527,16 @@
         ])
 
     def testInterceptedStreamRequestBlockingUnaryResponseWithCall(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         self._record[:] = []
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _stream_unary_multi_callable(channel)
         multi_callable.with_call(
@@ -523,15 +551,16 @@
         ])
 
     def testInterceptedStreamRequestFutureUnaryResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         self._record[:] = []
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _stream_unary_multi_callable(channel)
         response_future = multi_callable.future(
@@ -545,15 +574,16 @@
         ])
 
     def testInterceptedStreamRequestStreamResponse(self):
-        requests = tuple(b'\x77\x58'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         self._record[:] = []
-        channel = grpc.intercept_channel(
-            self._channel,
-            _LoggingInterceptor('c1', self._record),
-            _LoggingInterceptor('c2', self._record))
+        channel = grpc.intercept_channel(self._channel,
+                                         _LoggingInterceptor(
+                                             'c1', self._record),
+                                         _LoggingInterceptor(
+                                             'c2', self._record))
 
         multi_callable = _stream_stream_multi_callable(channel)
         response_iterator = multi_callable(
diff --git a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
index 8f4c075..4edf0fc 100644
--- a/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invalid_metadata_test.py
@@ -106,8 +106,8 @@
         self.assertEqual(response_iterator.code(), grpc.StatusCode.INTERNAL)
 
     def testStreamRequestBlockingUnaryResponse(self):
-        request_iterator = (b'\x07\x08'
-                            for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = (
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponse'),)
         expected_error_details = "metadata was invalid: %s" % metadata
         with self.assertRaises(ValueError) as exception_context:
@@ -115,8 +115,8 @@
         self.assertIn(expected_error_details, str(exception_context.exception))
 
     def testStreamRequestBlockingUnaryResponseWithCall(self):
-        request_iterator = (b'\x07\x08'
-                            for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = (
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         metadata = (('InVaLiD', 'StreamRequestBlockingUnaryResponseWithCall'),)
         expected_error_details = "metadata was invalid: %s" % metadata
         multi_callable = _stream_unary_multi_callable(self._channel)
@@ -125,8 +125,8 @@
         self.assertIn(expected_error_details, str(exception_context.exception))
 
     def testStreamRequestFutureUnaryResponse(self):
-        request_iterator = (b'\x07\x08'
-                            for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = (
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         metadata = (('InVaLiD', 'StreamRequestFutureUnaryResponse'),)
         expected_error_details = "metadata was invalid: %s" % metadata
         response_future = self._stream_unary.future(
@@ -141,8 +141,8 @@
         self.assertEqual(response_future.code(), grpc.StatusCode.INTERNAL)
 
     def testStreamRequestStreamResponse(self):
-        request_iterator = (b'\x07\x08'
-                            for _ in range(test_constants.STREAM_LENGTH))
+        request_iterator = (
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         metadata = (('InVaLiD', 'StreamRequestStreamResponse'),)
         expected_error_details = "metadata was invalid: %s" % metadata
         response_iterator = self._stream_stream(
diff --git a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
index 2a1a49c..e40cca8 100644
--- a/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
+++ b/src/python/grpcio_tests/tests/unit/_invocation_defects_test.py
@@ -15,11 +15,10 @@
 import itertools
 import threading
 import unittest
-from concurrent import futures
 
 import grpc
-from grpc.framework.foundation import logging_pool
 
+from tests.unit import test_common
 from tests.unit.framework.common import test_constants
 from tests.unit.framework.common import test_control
 
@@ -63,7 +62,10 @@
     def handle_unary_unary(self, request, servicer_context):
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         return request
 
     def handle_unary_stream(self, request, servicer_context):
@@ -72,7 +74,10 @@
             yield request
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
 
     def handle_stream_unary(self, request_iterator, servicer_context):
         if servicer_context is not None:
@@ -84,13 +89,19 @@
             response_elements.append(request)
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         return b''.join(response_elements)
 
     def handle_stream_stream(self, request_iterator, servicer_context):
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         for request in request_iterator:
             self._control.control()
             yield request
@@ -191,9 +202,8 @@
     def setUp(self):
         self._control = test_control.PauseFailControl()
         self._handler = _Handler(self._control)
-        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
 
-        self._server = grpc.server(self._server_pool)
+        self._server = test_common.test_server()
         port = self._server.add_insecure_port('[::]:0')
         self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
         self._server.start()
@@ -210,8 +220,8 @@
         with self.assertRaises(grpc.RpcError):
             response = multi_callable(
                 requests,
-                metadata=(
-                    ('test', 'IterableStreamRequestBlockingUnaryResponse'),))
+                metadata=(('test',
+                           'IterableStreamRequestBlockingUnaryResponse'),))
 
     def testIterableStreamRequestFutureUnaryResponse(self):
         requests = [b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH)]
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
index cb59cd3..bb6ac70 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_code_details_test.py
@@ -17,7 +17,6 @@
 import unittest
 
 import grpc
-from grpc.framework.foundation import logging_pool
 
 from tests.unit import test_common
 from tests.unit.framework.common import test_constants
@@ -37,16 +36,16 @@
 _STREAM_UNARY = 'StreamUnary'
 _STREAM_STREAM = 'StreamStream'
 
-_CLIENT_METADATA = (('client-md-key', 'client-md-key'),
-                    ('client-md-key-bin', b'\x00\x01'))
+_CLIENT_METADATA = (('client-md-key', 'client-md-key'), ('client-md-key-bin',
+                                                         b'\x00\x01'))
 
-_SERVER_INITIAL_METADATA = (
-    ('server-initial-md-key', 'server-initial-md-value'),
-    ('server-initial-md-key-bin', b'\x00\x02'))
+_SERVER_INITIAL_METADATA = (('server-initial-md-key',
+                             'server-initial-md-value'),
+                            ('server-initial-md-key-bin', b'\x00\x02'))
 
-_SERVER_TRAILING_METADATA = (
-    ('server-trailing-md-key', 'server-trailing-md-value'),
-    ('server-trailing-md-key-bin', b'\x00\x03'))
+_SERVER_TRAILING_METADATA = (('server-trailing-md-key',
+                              'server-trailing-md-value'),
+                             ('server-trailing-md-key-bin', b'\x00\x03'))
 
 _NON_OK_CODE = grpc.StatusCode.NOT_FOUND
 _DETAILS = 'Test details!'
@@ -186,25 +185,41 @@
 
     def setUp(self):
         self._servicer = _Servicer()
-        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        self._server = grpc.server(
-            self._server_pool, handlers=(_generic_handler(self._servicer),))
+        self._server = test_common.test_server()
+        self._server.add_generic_rpc_handlers(
+            (_generic_handler(self._servicer),))
         port = self._server.add_insecure_port('[::]:0')
         self._server.start()
 
         channel = grpc.insecure_channel('localhost:{}'.format(port))
         self._unary_unary = channel.unary_unary(
-            '/'.join(('', _SERVICE, _UNARY_UNARY,)),
+            '/'.join((
+                '',
+                _SERVICE,
+                _UNARY_UNARY,
+            )),
             request_serializer=_REQUEST_SERIALIZER,
-            response_deserializer=_RESPONSE_DESERIALIZER,)
-        self._unary_stream = channel.unary_stream(
-            '/'.join(('', _SERVICE, _UNARY_STREAM,)),)
-        self._stream_unary = channel.stream_unary(
-            '/'.join(('', _SERVICE, _STREAM_UNARY,)),)
+            response_deserializer=_RESPONSE_DESERIALIZER,
+        )
+        self._unary_stream = channel.unary_stream('/'.join((
+            '',
+            _SERVICE,
+            _UNARY_STREAM,
+        )),)
+        self._stream_unary = channel.stream_unary('/'.join((
+            '',
+            _SERVICE,
+            _STREAM_UNARY,
+        )),)
         self._stream_stream = channel.stream_stream(
-            '/'.join(('', _SERVICE, _STREAM_STREAM,)),
+            '/'.join((
+                '',
+                _SERVICE,
+                _STREAM_STREAM,
+            )),
             request_serializer=_REQUEST_SERIALIZER,
-            response_deserializer=_RESPONSE_DESERIALIZER,)
+            response_deserializer=_RESPONSE_DESERIALIZER,
+        )
 
     def testSuccessfulUnaryUnary(self):
         self._servicer.set_details(_DETAILS)
diff --git a/src/python/grpcio_tests/tests/unit/_metadata_test.py b/src/python/grpcio_tests/tests/unit/_metadata_test.py
index 0669486..a918066 100644
--- a/src/python/grpcio_tests/tests/unit/_metadata_test.py
+++ b/src/python/grpcio_tests/tests/unit/_metadata_test.py
@@ -18,7 +18,6 @@
 
 import grpc
 from grpc import _channel
-from grpc.framework.foundation import logging_pool
 
 from tests.unit import test_common
 from tests.unit.framework.common import test_constants
@@ -34,18 +33,50 @@
 _STREAM_UNARY = '/test/StreamUnary'
 _STREAM_STREAM = '/test/StreamStream'
 
-_INVOCATION_METADATA = ((b'invocation-md-key', u'invocation-md-value',),
-                        (u'invocation-md-key-bin', b'\x00\x01',),)
-_EXPECTED_INVOCATION_METADATA = (('invocation-md-key', 'invocation-md-value',),
-                                 ('invocation-md-key-bin', b'\x00\x01',),)
+_INVOCATION_METADATA = (
+    (
+        b'invocation-md-key',
+        u'invocation-md-value',
+    ),
+    (
+        u'invocation-md-key-bin',
+        b'\x00\x01',
+    ),
+)
+_EXPECTED_INVOCATION_METADATA = (
+    (
+        'invocation-md-key',
+        'invocation-md-value',
+    ),
+    (
+        'invocation-md-key-bin',
+        b'\x00\x01',
+    ),
+)
 
 _INITIAL_METADATA = ((b'initial-md-key', u'initial-md-value'),
                      (u'initial-md-key-bin', b'\x00\x02'))
-_EXPECTED_INITIAL_METADATA = (('initial-md-key', 'initial-md-value',),
-                              ('initial-md-key-bin', b'\x00\x02',),)
+_EXPECTED_INITIAL_METADATA = (
+    (
+        'initial-md-key',
+        'initial-md-value',
+    ),
+    (
+        'initial-md-key-bin',
+        b'\x00\x02',
+    ),
+)
 
-_TRAILING_METADATA = (('server-trailing-md-key', 'server-trailing-md-value',),
-                      ('server-trailing-md-key-bin', b'\x00\x03',),)
+_TRAILING_METADATA = (
+    (
+        'server-trailing-md-key',
+        'server-trailing-md-value',
+    ),
+    (
+        'server-trailing-md-key-bin',
+        b'\x00\x03',
+    ),
+)
 _EXPECTED_TRAILING_METADATA = _TRAILING_METADATA
 
 
@@ -146,9 +177,9 @@
 class MetadataTest(unittest.TestCase):
 
     def setUp(self):
-        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        self._server = grpc.server(
-            self._server_pool, handlers=(_GenericHandler(weakref.proxy(self)),))
+        self._server = test_common.test_server()
+        self._server.add_generic_rpc_handlers((_GenericHandler(
+            weakref.proxy(self)),))
         port = self._server.add_insecure_port('[::]:0')
         self._server.start()
         self._channel = grpc.insecure_channel(
diff --git a/src/python/grpcio_tests/tests/unit/_reconnect_test.py b/src/python/grpcio_tests/tests/unit/_reconnect_test.py
index 53fd1c2..10aee9f 100644
--- a/src/python/grpcio_tests/tests/unit/_reconnect_test.py
+++ b/src/python/grpcio_tests/tests/unit/_reconnect_test.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 """Tests that a channel will reconnect if a connection is dropped"""
 
+import socket
 import unittest
 
 import grpc
@@ -30,6 +31,44 @@
     return _RESPONSE
 
 
+def _get_reuse_socket_option():
+    try:
+        return socket.SO_REUSEPORT
+    except AttributeError:
+        # SO_REUSEPORT is unavailable on Windows, but SO_REUSEADDR
+        # allows forcibly re-binding to a port
+        return socket.SO_REUSEADDR
+
+
+def _pick_and_bind_port(sock_opt):
+    # Reserve a port, when we restart the server we want
+    # to hold onto the port
+    port = 0
+    for address_family in (socket.AF_INET6, socket.AF_INET):
+        try:
+            s = socket.socket(address_family, socket.SOCK_STREAM)
+        except socket.error:
+            continue  # this address family is unavailable
+        s.setsockopt(socket.SOL_SOCKET, sock_opt, 1)
+        try:
+            s.bind(('localhost', port))
+            # for socket.SOCK_STREAM sockets, it is necessary to call
+            # listen to get the desired behavior.
+            s.listen(1)
+            port = s.getsockname()[1]
+        except socket.error:
+            # port was not available on the current address family
+            # try again
+            port = 0
+            break
+        finally:
+            s.close()
+    if s:
+        return port if port != 0 else _pick_and_bind_port(sock_opt)
+    else:
+        return None  # no address family was available
+
+
 class ReconnectTest(unittest.TestCase):
 
     def test_reconnect(self):
@@ -38,8 +77,12 @@
             'UnaryUnary':
             grpc.unary_unary_rpc_method_handler(_handle_unary_unary)
         })
+        sock_opt = _get_reuse_socket_option()
+        port = _pick_and_bind_port(sock_opt)
+        self.assertIsNotNone(port)
+
         server = grpc.server(server_pool, (handler,))
-        port = server.add_insecure_port('[::]:0')
+        server.add_insecure_port('[::]:{}'.format(port))
         server.start()
         channel = grpc.insecure_channel('localhost:%d' % port)
         multi_callable = channel.unary_unary(_UNARY_UNARY)
diff --git a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
index e425a0a..df4b129 100644
--- a/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
+++ b/src/python/grpcio_tests/tests/unit/_resource_exhausted_test.py
@@ -139,6 +139,7 @@
         self._server = grpc.server(
             self._server_pool,
             handlers=(_GenericHandler(self._trigger),),
+            options=(('grpc.so_reuseport', 0),),
             maximum_concurrent_rpcs=test_constants.THREAD_CONCURRENCY)
         port = self._server.add_insecure_port('[::]:0')
         self._server.start()
diff --git a/src/python/grpcio_tests/tests/unit/_rpc_test.py b/src/python/grpcio_tests/tests/unit/_rpc_test.py
index 74d8541..54f01d9 100644
--- a/src/python/grpcio_tests/tests/unit/_rpc_test.py
+++ b/src/python/grpcio_tests/tests/unit/_rpc_test.py
@@ -21,6 +21,7 @@
 import grpc
 from grpc.framework.foundation import logging_pool
 
+from tests.unit import test_common
 from tests.unit.framework.common import test_constants
 from tests.unit.framework.common import test_control
 
@@ -63,7 +64,10 @@
     def handle_unary_unary(self, request, servicer_context):
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
             # TODO(https://github.com/grpc/grpc/issues/8483): test the values
             # returned by these methods rather than only "smoke" testing that
             # the return after having been called.
@@ -77,7 +81,10 @@
             yield request
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
 
     def handle_stream_unary(self, request_iterator, servicer_context):
         if servicer_context is not None:
@@ -89,13 +96,19 @@
             response_elements.append(request)
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         return b''.join(response_elements)
 
     def handle_stream_stream(self, request_iterator, servicer_context):
         self._control.control()
         if servicer_context is not None:
-            servicer_context.set_trailing_metadata((('testkey', 'testvalue',),))
+            servicer_context.set_trailing_metadata(((
+                'testkey',
+                'testvalue',
+            ),))
         for request in request_iterator:
             self._control.control()
             yield request
@@ -169,9 +182,8 @@
     def setUp(self):
         self._control = test_control.PauseFailControl()
         self._handler = _Handler(self._control)
-        self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
 
-        self._server = grpc.server(self._server_pool)
+        self._server = test_common.test_server()
         port = self._server.add_insecure_port('[::]:0')
         self._server.add_generic_rpc_handlers((_GenericHandler(self._handler),))
         self._server.start()
@@ -180,7 +192,6 @@
 
     def tearDown(self):
         self._server.stop(None)
-        self._server_pool.shutdown(wait=True)
 
     def testUnrecognizedMethod(self):
         request = b'abc'
@@ -245,8 +256,8 @@
         self.assertSequenceEqual(expected_responses, responses)
 
     def testSuccessfulStreamRequestBlockingUnaryResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         expected_response = self._handler.handle_stream_unary(
             iter(requests), None)
         request_iterator = iter(requests)
@@ -254,14 +265,14 @@
         multi_callable = _stream_unary_multi_callable(self._channel)
         response = multi_callable(
             request_iterator,
-            metadata=(
-                ('test', 'SuccessfulStreamRequestBlockingUnaryResponse'),))
+            metadata=(('test',
+                       'SuccessfulStreamRequestBlockingUnaryResponse'),))
 
         self.assertEqual(expected_response, response)
 
     def testSuccessfulStreamRequestBlockingUnaryResponseWithCall(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         expected_response = self._handler.handle_stream_unary(
             iter(requests), None)
         request_iterator = iter(requests)
@@ -277,8 +288,8 @@
         self.assertIs(grpc.StatusCode.OK, call.code())
 
     def testSuccessfulStreamRequestFutureUnaryResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         expected_response = self._handler.handle_stream_unary(
             iter(requests), None)
         request_iterator = iter(requests)
@@ -294,8 +305,8 @@
         self.assertIsNone(response_future.traceback())
 
     def testSuccessfulStreamRequestStreamResponse(self):
-        requests = tuple(b'\x77\x58'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x77\x58' for _ in range(test_constants.STREAM_LENGTH))
         expected_responses = tuple(
             self._handler.handle_stream_stream(iter(requests), None))
         request_iterator = iter(requests)
@@ -327,8 +338,8 @@
 
     def testConcurrentBlockingInvocations(self):
         pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         expected_response = self._handler.handle_stream_unary(
             iter(requests), None)
         expected_responses = [expected_response
@@ -343,15 +354,15 @@
                 request_iterator,
                 metadata=(('test', 'ConcurrentBlockingInvocations'),))
             response_futures[index] = response_future
-        responses = tuple(response_future.result()
-                          for response_future in response_futures)
+        responses = tuple(
+            response_future.result() for response_future in response_futures)
 
         pool.shutdown(wait=True)
         self.assertSequenceEqual(expected_responses, responses)
 
     def testConcurrentFutureInvocations(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         expected_response = self._handler.handle_stream_unary(
             iter(requests), None)
         expected_responses = [expected_response
@@ -365,8 +376,8 @@
                 request_iterator,
                 metadata=(('test', 'ConcurrentFutureInvocations'),))
             response_futures[index] = response_future
-        responses = tuple(response_future.result()
-                          for response_future in response_futures)
+        responses = tuple(
+            response_future.result() for response_future in response_futures)
 
         self.assertSequenceEqual(expected_responses, responses)
 
@@ -425,14 +436,14 @@
         multi_callable = _unary_stream_multi_callable(self._channel)
         response_iterator = multi_callable(
             request,
-            metadata=(
-                ('test', 'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
+            metadata=(('test',
+                       'ConsumingSomeButNotAllStreamResponsesUnaryRequest'),))
         for _ in range(test_constants.STREAM_LENGTH // 2):
             next(response_iterator)
 
     def testConsumingSomeButNotAllStreamResponsesStreamRequest(self):
-        requests = tuple(b'\x67\x88'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_stream_multi_callable(self._channel)
@@ -444,15 +455,15 @@
             next(response_iterator)
 
     def testConsumingTooManyStreamResponsesStreamRequest(self):
-        requests = tuple(b'\x67\x88'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_stream_multi_callable(self._channel)
         response_iterator = multi_callable(
             request_iterator,
-            metadata=(
-                ('test', 'ConsumingTooManyStreamResponsesStreamRequest'),))
+            metadata=(('test',
+                       'ConsumingTooManyStreamResponsesStreamRequest'),))
         for _ in range(test_constants.STREAM_LENGTH):
             next(response_iterator)
         for _ in range(test_constants.STREAM_LENGTH):
@@ -504,8 +515,8 @@
         self.assertIsNotNone(response_iterator.trailing_metadata())
 
     def testCancelledStreamRequestUnaryResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_unary_multi_callable(self._channel)
@@ -529,8 +540,8 @@
         self.assertIsNotNone(response_future.trailing_metadata())
 
     def testCancelledStreamRequestStreamResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_stream_multi_callable(self._channel)
@@ -556,8 +567,8 @@
                 multi_callable.with_call(
                     request,
                     timeout=test_constants.SHORT_TIMEOUT,
-                    metadata=(
-                        ('test', 'ExpiredUnaryRequestBlockingUnaryResponse'),))
+                    metadata=(('test',
+                               'ExpiredUnaryRequestBlockingUnaryResponse'),))
 
         self.assertIsInstance(exception_context.exception, grpc.Call)
         self.assertIsNotNone(exception_context.exception.initial_metadata())
@@ -611,8 +622,8 @@
                       response_iterator.code())
 
     def testExpiredStreamRequestBlockingUnaryResponse(self):
-        requests = tuple(b'\x07\x08'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x08' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_unary_multi_callable(self._channel)
@@ -621,8 +632,8 @@
                 multi_callable(
                     request_iterator,
                     timeout=test_constants.SHORT_TIMEOUT,
-                    metadata=(
-                        ('test', 'ExpiredStreamRequestBlockingUnaryResponse'),))
+                    metadata=(('test',
+                               'ExpiredStreamRequestBlockingUnaryResponse'),))
 
         self.assertIsInstance(exception_context.exception, grpc.RpcError)
         self.assertIsInstance(exception_context.exception, grpc.Call)
@@ -633,8 +644,8 @@
         self.assertIsNotNone(exception_context.exception.trailing_metadata())
 
     def testExpiredStreamRequestFutureUnaryResponse(self):
-        requests = tuple(b'\x07\x18'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
         callback = _Callback()
 
@@ -645,8 +656,8 @@
                 timeout=test_constants.SHORT_TIMEOUT,
                 metadata=(('test', 'ExpiredStreamRequestFutureUnaryResponse'),))
             with self.assertRaises(grpc.FutureTimeoutError):
-                response_future.result(timeout=test_constants.SHORT_TIMEOUT /
-                                       2.0)
+                response_future.result(
+                    timeout=test_constants.SHORT_TIMEOUT / 2.0)
             response_future.add_done_callback(callback)
             value_passed_to_callback = callback.value()
 
@@ -664,8 +675,8 @@
         self.assertIsNotNone(response_future.trailing_metadata())
 
     def testExpiredStreamRequestStreamResponse(self):
-        requests = tuple(b'\x67\x18'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x67\x18' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_stream_multi_callable(self._channel)
@@ -690,8 +701,8 @@
             with self.assertRaises(grpc.RpcError) as exception_context:
                 multi_callable.with_call(
                     request,
-                    metadata=(
-                        ('test', 'FailedUnaryRequestBlockingUnaryResponse'),))
+                    metadata=(('test',
+                               'FailedUnaryRequestBlockingUnaryResponse'),))
 
         self.assertIs(grpc.StatusCode.UNKNOWN,
                       exception_context.exception.code())
@@ -735,8 +746,8 @@
                       exception_context.exception.code())
 
     def testFailedStreamRequestBlockingUnaryResponse(self):
-        requests = tuple(b'\x47\x58'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x47\x58' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_unary_multi_callable(self._channel)
@@ -744,15 +755,15 @@
             with self.assertRaises(grpc.RpcError) as exception_context:
                 multi_callable(
                     request_iterator,
-                    metadata=(
-                        ('test', 'FailedStreamRequestBlockingUnaryResponse'),))
+                    metadata=(('test',
+                               'FailedStreamRequestBlockingUnaryResponse'),))
 
         self.assertIs(grpc.StatusCode.UNKNOWN,
                       exception_context.exception.code())
 
     def testFailedStreamRequestFutureUnaryResponse(self):
-        requests = tuple(b'\x07\x18'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
         callback = _Callback()
 
@@ -774,8 +785,8 @@
         self.assertIs(response_future, value_passed_to_callback)
 
     def testFailedStreamRequestStreamResponse(self):
-        requests = tuple(b'\x67\x88'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_stream_multi_callable(self._channel)
@@ -806,8 +817,8 @@
             request, metadata=(('test', 'IgnoredUnaryRequestStreamResponse'),))
 
     def testIgnoredStreamRequestFutureUnaryResponse(self):
-        requests = tuple(b'\x07\x18'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x07\x18' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_unary_multi_callable(self._channel)
@@ -816,8 +827,8 @@
             metadata=(('test', 'IgnoredStreamRequestFutureUnaryResponse'),))
 
     def testIgnoredStreamRequestStreamResponse(self):
-        requests = tuple(b'\x67\x88'
-                         for _ in range(test_constants.STREAM_LENGTH))
+        requests = tuple(
+            b'\x67\x88' for _ in range(test_constants.STREAM_LENGTH))
         request_iterator = iter(requests)
 
         multi_callable = _stream_stream_multi_callable(self._channel)
diff --git a/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py b/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py
index 005d16e..0d78034 100644
--- a/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py
+++ b/src/python/grpcio_tests/tests/unit/_server_ssl_cert_config_test.py
@@ -40,6 +40,7 @@
 
 import grpc
 from tests.unit import resources
+from tests.unit import test_common
 from tests.testing import _application_common
 from tests.testing import _server_application
 from tests.testing.proto import services_pb2_grpc
@@ -73,7 +74,8 @@
         expect_success,
         root_certificates=None,
         private_key=None,
-        certificate_chain=None,):
+        certificate_chain=None,
+):
     channel = grpc.secure_channel('localhost:{}'.format(port),
                                   grpc.ssl_channel_credentials(
                                       root_certificates=root_certificates,
@@ -135,7 +137,7 @@
         raise NotImplementedError()
 
     def setUp(self):
-        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+        self.server = test_common.test_server()
         services_pb2_grpc.add_FirstServiceServicer_to_server(
             _server_application.FirstServiceServicer(), self.server)
         switch_cert_on_client_num = 10
@@ -407,7 +409,7 @@
         return True
 
     def setUp(self):
-        self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
+        self.server = test_common.test_server()
         services_pb2_grpc.add_FirstServiceServicer_to_server(
             _server_application.FirstServiceServicer(), self.server)
         self.cert_config_A = grpc.ssl_server_certificate_configuration(
diff --git a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
index fe3e71d..18f5af0 100644
--- a/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
+++ b/src/python/grpcio_tests/tests/unit/_thread_cleanup_test.py
@@ -52,7 +52,9 @@
             target=target,
             name='test-name',
             args=('arg1', 'arg2'),
-            kwargs={'arg3': 'arg3'})
+            kwargs={
+                'arg3': 'arg3'
+            })
         cleanup_thread.start()
         cleanup_thread.join()
         self.assertEqual(cleanup_thread.name, 'test-name')
diff --git a/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
index eb8dc80..61c03f6 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_beta_features_test.py
@@ -163,7 +163,10 @@
         self._server = implementations.server(
             method_implementations, options=server_options)
         server_credentials = implementations.ssl_server_credentials([
-            (resources.private_key(), resources.certificate_chain(),),
+            (
+                resources.private_key(),
+                resources.certificate_chain(),
+            ),
         ])
         port = self._server.add_secure_port('[::]:0', server_credentials)
         self._server.start()
@@ -289,7 +292,10 @@
         self._server_options = implementations.server_options(
             thread_pool_size=test_constants.POOL_SIZE)
         self._server_credentials = implementations.ssl_server_credentials([
-            (resources.private_key(), resources.certificate_chain(),),
+            (
+                resources.private_key(),
+                resources.certificate_chain(),
+            ),
         ])
         self._channel_credentials = implementations.ssl_channel_credentials(
             resources.test_root_certificates())
diff --git a/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
index e4b81e7..c99738e 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_face_interface_test.py
@@ -32,8 +32,11 @@
 
 class _SerializationBehaviors(
         collections.namedtuple('_SerializationBehaviors', (
-            'request_serializers', 'request_deserializers',
-            'response_serializers', 'response_deserializers',))):
+            'request_serializers',
+            'request_deserializers',
+            'response_serializers',
+            'response_deserializers',
+        ))):
     pass
 
 
@@ -73,7 +76,10 @@
         server = implementations.server(
             method_implementations, options=server_options)
         server_credentials = implementations.ssl_server_credentials([
-            (resources.private_key(), resources.certificate_chain(),),
+            (
+                resources.private_key(),
+                resources.certificate_chain(),
+            ),
         ])
         port = server.add_secure_port('[::]:0', server_credentials)
         server.start()
@@ -116,9 +122,10 @@
 
 
 def load_tests(loader, tests, pattern):
-    return unittest.TestSuite(tests=tuple(
-        loader.loadTestsFromTestCase(test_case_class)
-        for test_case_class in test_cases.test_cases(_Implementation())))
+    return unittest.TestSuite(
+        tests=tuple(
+            loader.loadTestsFromTestCase(test_case_class)
+            for test_case_class in test_cases.test_cases(_Implementation())))
 
 
 if __name__ == '__main__':
diff --git a/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
index 75a615e..5a53766 100644
--- a/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
+++ b/src/python/grpcio_tests/tests/unit/beta/_implementations_test.py
@@ -41,8 +41,8 @@
     def test_google_call_credentials(self):
         creds = oauth2client_client.GoogleCredentials(
             'token', 'client_id', 'secret', 'refresh_token',
-            datetime.datetime(2008, 6, 24), 'https://refresh.uri.com/',
-            'user_agent')
+            datetime.datetime(2008, 6,
+                              24), 'https://refresh.uri.com/', 'user_agent')
         call_creds = implementations.google_call_credentials(creds)
         self.assertIsInstance(call_creds, implementations.CallCredentials)
 
diff --git a/src/python/grpcio_tests/tests/unit/beta/test_utilities.py b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
index 65da0f2..c8d920d 100644
--- a/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
+++ b/src/python/grpcio_tests/tests/unit/beta/test_utilities.py
@@ -33,6 +33,8 @@
       conducted.
   """
     target = '%s:%d' % (host, port)
-    channel = grpc.secure_channel(target, channel_credentials, (
-        ('grpc.ssl_target_name_override', server_host_override,),))
+    channel = grpc.secure_channel(target, channel_credentials, ((
+        'grpc.ssl_target_name_override',
+        server_host_override,
+    ),))
     return implementations.Channel(channel)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
index 45fd321..5d8679a 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_blocking_invocation_inline_service.py
@@ -70,8 +70,8 @@
         self.implementation.destantiate(self._memo)
 
     def testSuccessfulUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -81,8 +81,8 @@
                 test_messages.verify(request, response, self)
 
     def testSuccessfulUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -93,8 +93,8 @@
                 test_messages.verify(request, responses, self)
 
     def testSuccessfulStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
@@ -104,8 +104,8 @@
                 test_messages.verify(requests, response, self)
 
     def testSuccessfulStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
@@ -116,8 +116,8 @@
                 test_messages.verify(requests, responses, self)
 
     def testSequentialInvocations(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 first_request = test_messages.request()
                 second_request = test_messages.request()
@@ -134,8 +134,8 @@
 
     def testParallelInvocations(self):
         pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = []
                 response_futures = []
@@ -158,8 +158,8 @@
 
     def testWaitingForSomeButNotAllParallelInvocations(self):
         pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = []
                 response_futures_to_indices = {}
@@ -197,8 +197,8 @@
         raise NotImplementedError()
 
     def testExpiredUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -208,8 +208,8 @@
                         request, _3069_test_constant.REALLY_SHORT_TIMEOUT)
 
     def testExpiredUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -220,33 +220,33 @@
                     list(response_iterator)
 
     def testExpiredStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
                 with self._control.pause(), self.assertRaises(
                         face.ExpirationError):
-                    self._invoker.blocking(group, method)(
-                        iter(requests),
-                        _3069_test_constant.REALLY_SHORT_TIMEOUT)
+                    self._invoker.blocking(
+                        group, method)(iter(requests),
+                                       _3069_test_constant.REALLY_SHORT_TIMEOUT)
 
     def testExpiredStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
                 with self._control.pause(), self.assertRaises(
                         face.ExpirationError):
-                    response_iterator = self._invoker.blocking(group, method)(
-                        iter(requests),
-                        _3069_test_constant.REALLY_SHORT_TIMEOUT)
+                    response_iterator = self._invoker.blocking(
+                        group, method)(iter(requests),
+                                       _3069_test_constant.REALLY_SHORT_TIMEOUT)
                     list(response_iterator)
 
     def testFailedUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -255,8 +255,8 @@
                         request, test_constants.LONG_TIMEOUT)
 
     def testFailedUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -266,8 +266,8 @@
                     list(response_iterator)
 
     def testFailedStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
@@ -276,8 +276,8 @@
                         iter(requests), test_constants.LONG_TIMEOUT)
 
     def testFailedStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
index 0e399c4..b1c33da 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_digest.py
@@ -34,11 +34,15 @@
 
 class TestServiceDigest(
         collections.namedtuple('TestServiceDigest', (
-            'methods', 'inline_method_implementations',
-            'event_method_implementations', 'multi_method_implementation',
-            'unary_unary_messages_sequences', 'unary_stream_messages_sequences',
+            'methods',
+            'inline_method_implementations',
+            'event_method_implementations',
+            'multi_method_implementation',
+            'unary_unary_messages_sequences',
+            'unary_stream_messages_sequences',
             'stream_unary_messages_sequences',
-            'stream_stream_messages_sequences',))):
+            'stream_stream_messages_sequences',
+        ))):
     """A transformation of a service.TestService.
 
   Attributes:
@@ -421,8 +425,8 @@
     events.update(stream_unary.events)
     events.update(stream_stream.events)
 
-    return TestServiceDigest(
-        methods, inlines, events,
-        _MultiMethodImplementation(adaptations, control, pool),
-        unary_unary.messages, unary_stream.messages, stream_unary.messages,
-        stream_stream.messages)
+    return TestServiceDigest(methods, inlines, events,
+                             _MultiMethodImplementation(adaptations, control,
+                                                        pool),
+                             unary_unary.messages, unary_stream.messages,
+                             stream_unary.messages, stream_stream.messages)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
index bc65bf4..3d9b281 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_future_invocation_asynchronous_event_service.py
@@ -134,8 +134,8 @@
         self._digest_pool.shutdown(wait=True)
 
     def testSuccessfulUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
                 callback = _Callback()
@@ -151,8 +151,8 @@
                 self.assertIsNone(response_future.traceback())
 
     def testSuccessfulUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -163,8 +163,8 @@
                 test_messages.verify(request, responses, self)
 
     def testSuccessfulStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
                 request_iterator = _PauseableIterator(iter(requests))
@@ -185,8 +185,8 @@
                 self.assertIsNone(response_future.traceback())
 
     def testSuccessfulStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
                 request_iterator = _PauseableIterator(iter(requests))
@@ -201,8 +201,8 @@
                 test_messages.verify(requests, responses, self)
 
     def testSequentialInvocations(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 first_request = test_messages.request()
                 second_request = test_messages.request()
@@ -220,8 +220,8 @@
                 test_messages.verify(second_request, second_response, self)
 
     def testParallelInvocations(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 first_request = test_messages.request()
                 second_request = test_messages.request()
@@ -236,8 +236,8 @@
                 test_messages.verify(first_request, first_response, self)
                 test_messages.verify(second_request, second_response, self)
 
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = []
                 response_futures = []
@@ -258,8 +258,8 @@
 
     def testWaitingForSomeButNotAllParallelInvocations(self):
         pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = []
                 response_futures_to_indices = {}
@@ -282,8 +282,8 @@
         pool.shutdown(wait=True)
 
     def testCancelledUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
                 callback = _Callback()
@@ -305,8 +305,8 @@
                     response_future.traceback()
 
     def testCancelledUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -319,8 +319,8 @@
                     next(response_iterator)
 
     def testCancelledStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
                 callback = _Callback()
@@ -342,8 +342,8 @@
                     response_future.traceback()
 
     def testCancelledStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
@@ -356,8 +356,8 @@
                     next(response_iterator)
 
     def testExpiredUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
                 callback = _Callback()
@@ -376,8 +376,8 @@
                     self.assertIsNotNone(response_future.traceback())
 
     def testExpiredUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -388,16 +388,16 @@
                         list(response_iterator)
 
     def testExpiredStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
                 callback = _Callback()
 
                 with self._control.pause():
-                    response_future = self._invoker.future(group, method)(
-                        iter(requests),
-                        _3069_test_constant.REALLY_SHORT_TIMEOUT)
+                    response_future = self._invoker.future(
+                        group, method)(iter(requests),
+                                       _3069_test_constant.REALLY_SHORT_TIMEOUT)
                     response_future.add_done_callback(callback)
                     self.assertIs(callback.future(), response_future)
                     self.assertIsInstance(response_future.exception(),
@@ -409,21 +409,21 @@
                     self.assertIsNotNone(response_future.traceback())
 
     def testExpiredStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
                 with self._control.pause():
-                    response_iterator = self._invoker.future(group, method)(
-                        iter(requests),
-                        _3069_test_constant.REALLY_SHORT_TIMEOUT)
+                    response_iterator = self._invoker.future(
+                        group, method)(iter(requests),
+                                       _3069_test_constant.REALLY_SHORT_TIMEOUT)
                     with self.assertRaises(face.ExpirationError):
                         list(response_iterator)
 
     def testFailedUnaryRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
                 callback = _Callback()
@@ -448,8 +448,8 @@
                     self.assertIsNotNone(abortion_callback.future())
 
     def testFailedUnaryRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.unary_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.unary_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 request = test_messages.request()
 
@@ -464,17 +464,17 @@
                     list(response_iterator)
 
     def testFailedStreamRequestUnaryResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_unary_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_unary_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
                 callback = _Callback()
                 abortion_callback = _Callback()
 
                 with self._control.fail():
-                    response_future = self._invoker.future(group, method)(
-                        iter(requests),
-                        _3069_test_constant.REALLY_SHORT_TIMEOUT)
+                    response_future = self._invoker.future(
+                        group, method)(iter(requests),
+                                       _3069_test_constant.REALLY_SHORT_TIMEOUT)
                     response_future.add_done_callback(callback)
                     response_future.add_abortion_callback(abortion_callback)
 
@@ -491,8 +491,8 @@
                     self.assertIsNotNone(abortion_callback.future())
 
     def testFailedStreamRequestStreamResponse(self):
-        for (group, method), test_messages_sequence in (
-                six.iteritems(self._digest.stream_stream_messages_sequences)):
+        for (group, method), test_messages_sequence in (six.iteritems(
+                self._digest.stream_stream_messages_sequences)):
             for test_messages in test_messages_sequence:
                 requests = test_messages.requests()
 
@@ -502,7 +502,7 @@
                 # expiration of the RPC.
                 with self._control.fail(), self.assertRaises(
                         face.ExpirationError):
-                    response_iterator = self._invoker.future(group, method)(
-                        iter(requests),
-                        _3069_test_constant.REALLY_SHORT_TIMEOUT)
+                    response_iterator = self._invoker.future(
+                        group, method)(iter(requests),
+                                       _3069_test_constant.REALLY_SHORT_TIMEOUT)
                     list(response_iterator)
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
index fd55f4e..efc93d5 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_invocation.py
@@ -191,5 +191,8 @@
   Returns:
     A sequence of InvokerConstructors.
   """
-    return (_GenericInvokerConstructor(), _MultiCallableInvokerConstructor(),
-            _DynamicInvokerConstructor(),)
+    return (
+        _GenericInvokerConstructor(),
+        _MultiCallableInvokerConstructor(),
+        _DynamicInvokerConstructor(),
+    )
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
index 69c7ac2..a84e02a 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/_stock_service.py
@@ -33,8 +33,8 @@
     if active():
         stock_reply_callback(
             stock_pb2.StockReply(
-                symbol=stock_request.symbol, price=_price(
-                    stock_request.symbol)))
+                symbol=stock_request.symbol,
+                price=_price(stock_request.symbol)))
     else:
         raise abandonment.Abandoned()
 
diff --git a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
index d1c5b8f..cff4b7c 100644
--- a/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
+++ b/src/python/grpcio_tests/tests/unit/framework/interfaces/face/test_cases.py
@@ -24,7 +24,8 @@
 
 _TEST_CASE_SUPERCLASSES = (
     _blocking_invocation_inline_service.TestCase,
-    _future_invocation_asynchronous_event_service.TestCase,)
+    _future_invocation_asynchronous_event_service.TestCase,
+)
 
 
 def test_cases(implementation):
@@ -42,8 +43,9 @@
     for invoker_constructor in _invocation.invoker_constructors():
         for super_class in _TEST_CASE_SUPERCLASSES:
             test_case_classes.append(
-                type(invoker_constructor.name() + super_class.NAME, (
-                    super_class,), {
+                type(
+                    invoker_constructor.name() + super_class.NAME,
+                    (super_class,), {
                         'implementation': implementation,
                         'invoker_constructor': invoker_constructor,
                         '__module__': implementation.__module__,
diff --git a/src/python/grpcio_tests/tests/unit/resources.py b/src/python/grpcio_tests/tests/unit/resources.py
index 11ef9e8..51a8979 100644
--- a/src/python/grpcio_tests/tests/unit/resources.py
+++ b/src/python/grpcio_tests/tests/unit/resources.py
@@ -58,7 +58,8 @@
 def cert_hier_1_client_1_cert():
     return pkg_resources.resource_string(
         __name__,
-        'credentials/certificate_hierarchy_1/intermediate/certs/client.cert.pem')
+        'credentials/certificate_hierarchy_1/intermediate/certs/client.cert.pem'
+    )
 
 
 def cert_hier_1_server_1_key():
@@ -97,7 +98,8 @@
 def cert_hier_2_client_1_cert():
     return pkg_resources.resource_string(
         __name__,
-        'credentials/certificate_hierarchy_2/intermediate/certs/client.cert.pem')
+        'credentials/certificate_hierarchy_2/intermediate/certs/client.cert.pem'
+    )
 
 
 def cert_hier_2_server_1_key():
diff --git a/src/python/grpcio_tests/tests/unit/test_common.py b/src/python/grpcio_tests/tests/unit/test_common.py
index ed71cc9..61717ae 100644
--- a/src/python/grpcio_tests/tests/unit/test_common.py
+++ b/src/python/grpcio_tests/tests/unit/test_common.py
@@ -15,12 +15,25 @@
 
 import collections
 
+from concurrent import futures
 import grpc
 import six
 
-INVOCATION_INITIAL_METADATA = (('0', 'abc'), ('1', 'def'), ('2', 'ghi'),)
-SERVICE_INITIAL_METADATA = (('3', 'jkl'), ('4', 'mno'), ('5', 'pqr'),)
-SERVICE_TERMINAL_METADATA = (('6', 'stu'), ('7', 'vwx'), ('8', 'yza'),)
+INVOCATION_INITIAL_METADATA = (
+    ('0', 'abc'),
+    ('1', 'def'),
+    ('2', 'ghi'),
+)
+SERVICE_INITIAL_METADATA = (
+    ('3', 'jkl'),
+    ('4', 'mno'),
+    ('5', 'pqr'),
+)
+SERVICE_TERMINAL_METADATA = (
+    ('6', 'stu'),
+    ('7', 'vwx'),
+    ('8', 'yza'),
+)
 DETAILS = 'test details'
 
 
@@ -79,6 +92,18 @@
     An implementations.Channel to the remote host through which RPCs may be
       conducted.
   """
-    channel = grpc.secure_channel(target, channel_credentials, (
-        ('grpc.ssl_target_name_override', server_host_override,),))
+    channel = grpc.secure_channel(target, channel_credentials, ((
+        'grpc.ssl_target_name_override',
+        server_host_override,
+    ),))
     return channel
+
+
+def test_server(max_workers=10):
+    """Creates an insecure grpc server.
+
+     These servers have SO_REUSEPORT disabled to prevent cross-talk.
+     """
+    return grpc.server(
+        futures.ThreadPoolExecutor(max_workers=max_workers),
+        options=(('grpc.so_reuseport', 0),))
diff --git a/src/ruby/README.md b/src/ruby/README.md
index 5c7dae6..f6fce3e 100644
--- a/src/ruby/README.md
+++ b/src/ruby/README.md
@@ -22,6 +22,12 @@
 ---------------------
 - Clone this repository
 
+- Init submodules
+
+```sh
+git submodule update --init
+```
+
 - Install Ruby 2.x. Consider doing this with [RVM](http://rvm.io), it's a nice way of controlling
   the exact ruby version that's used.
 ```sh
diff --git a/src/ruby/end2end/channel_closing_client.rb b/src/ruby/end2end/channel_closing_client.rb
index 8f6888c..62c7421 100755
--- a/src/ruby/end2end/channel_closing_client.rb
+++ b/src/ruby/end2end/channel_closing_client.rb
@@ -44,7 +44,7 @@
   ch = GRPC::Core::Channel.new("localhost:#{server_port}", {},
                                :this_channel_is_insecure)
 
-  srv = GRPC::RpcServer.new
+  srv = new_rpc_server_for_testing
   thd = Thread.new do
     srv.add_http2_port("0.0.0.0:#{client_control_port}", :this_port_is_insecure)
     srv.handle(ChannelClosingClientController.new(ch))
diff --git a/src/ruby/end2end/end2end_common.rb b/src/ruby/end2end/end2end_common.rb
index 790fc23..ffbaa19 100755
--- a/src/ruby/end2end/end2end_common.rb
+++ b/src/ruby/end2end/end2end_common.rb
@@ -29,6 +29,9 @@
 require 'thread'
 require 'timeout'
 require 'English' # see https://github.com/bbatsov/rubocop/issues/1747
+require_relative '../spec/support/helpers'
+
+include GRPC::Spec::Helpers
 
 # GreeterServer is simple server that implements the Helloworld Greeter server.
 class EchoServerImpl < Echo::EchoServer::Service
@@ -46,7 +49,7 @@
   end
 
   def run
-    @srv = GRPC::RpcServer.new(@rpc_server_args)
+    @srv = new_rpc_server_for_testing(@rpc_server_args)
     port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     @srv.handle(@service_impl)
 
diff --git a/src/ruby/end2end/multiple_killed_watching_threads_driver.rb b/src/ruby/end2end/multiple_killed_watching_threads_driver.rb
index 59f6f27..8f078cf 100755
--- a/src/ruby/end2end/multiple_killed_watching_threads_driver.rb
+++ b/src/ruby/end2end/multiple_killed_watching_threads_driver.rb
@@ -20,7 +20,7 @@
 
 include GRPC::Core::ConnectivityStates
 
-def watch_state(ch)
+def watch_state(ch, sleep_time)
   thd = Thread.new do
     state = ch.connectivity_state(false)
     fail "non-idle state: #{state}" unless state == IDLE
@@ -28,23 +28,34 @@
   end
   # sleep to get the thread into the middle of a
   # "watch connectivity state" call
-  sleep 0.1
+  sleep sleep_time
   thd.kill
 end
 
-def main
+def run_multiple_killed_watches(num_threads, sleep_time)
   channels = []
-  10.times do
+  num_threads.times do
     ch = GRPC::Core::Channel.new('dummy_host',
                                  nil, :this_channel_is_insecure)
-    watch_state(ch)
+    watch_state(ch, sleep_time)
     channels << ch
   end
 
   # checking state should still be safe to call
   channels.each do |c|
-    fail unless c.connectivity_state(false) == FATAL_FAILURE
+    connectivity_state = c.connectivity_state(false)
+    # The state should be FATAL_FAILURE in the case that it was interrupted
+    # while watching connectivity state, and IDLE if it we never started
+    # watching the channel's connectivity state
+    unless [FATAL_FAILURE, IDLE].include?(connectivity_state)
+      fail "unexpected connectivity state: #{connectivity_state}"
+    end
   end
 end
 
+def main
+  run_multiple_killed_watches(10, 0.1)
+  run_multiple_killed_watches(1000, 0.001)
+end
+
 main
diff --git a/src/ruby/end2end/sig_handling_client.rb b/src/ruby/end2end/sig_handling_client.rb
index 129ad7c..6cd289a 100755
--- a/src/ruby/end2end/sig_handling_client.rb
+++ b/src/ruby/end2end/sig_handling_client.rb
@@ -66,7 +66,7 @@
 
   # The "shutdown" RPC should end very quickly.
   # Allow a few seconds to be safe.
-  srv = GRPC::RpcServer.new(poll_period: 3)
+  srv = new_rpc_server_for_testing(poll_period: 3)
   srv.add_http2_port("0.0.0.0:#{client_control_port}",
                      :this_port_is_insecure)
   stub = Echo::EchoServer::Stub.new("localhost:#{server_port}",
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.c b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
index 648d515..56f1d4c 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.c
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.c
@@ -223,21 +223,6 @@
 gpr_cmdline_usage_string_type gpr_cmdline_usage_string_import;
 gpr_cpu_num_cores_type gpr_cpu_num_cores_import;
 gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import;
-gpr_histogram_create_type gpr_histogram_create_import;
-gpr_histogram_destroy_type gpr_histogram_destroy_import;
-gpr_histogram_add_type gpr_histogram_add_import;
-gpr_histogram_merge_type gpr_histogram_merge_import;
-gpr_histogram_percentile_type gpr_histogram_percentile_import;
-gpr_histogram_mean_type gpr_histogram_mean_import;
-gpr_histogram_stddev_type gpr_histogram_stddev_import;
-gpr_histogram_variance_type gpr_histogram_variance_import;
-gpr_histogram_maximum_type gpr_histogram_maximum_import;
-gpr_histogram_minimum_type gpr_histogram_minimum_import;
-gpr_histogram_count_type gpr_histogram_count_import;
-gpr_histogram_sum_type gpr_histogram_sum_import;
-gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import;
-gpr_histogram_get_contents_type gpr_histogram_get_contents_import;
-gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import;
 gpr_join_host_port_type gpr_join_host_port_import;
 gpr_split_host_port_type gpr_split_host_port_import;
 gpr_log_severity_string_type gpr_log_severity_string_import;
@@ -510,21 +495,6 @@
   gpr_cmdline_usage_string_import = (gpr_cmdline_usage_string_type) GetProcAddress(library, "gpr_cmdline_usage_string");
   gpr_cpu_num_cores_import = (gpr_cpu_num_cores_type) GetProcAddress(library, "gpr_cpu_num_cores");
   gpr_cpu_current_cpu_import = (gpr_cpu_current_cpu_type) GetProcAddress(library, "gpr_cpu_current_cpu");
-  gpr_histogram_create_import = (gpr_histogram_create_type) GetProcAddress(library, "gpr_histogram_create");
-  gpr_histogram_destroy_import = (gpr_histogram_destroy_type) GetProcAddress(library, "gpr_histogram_destroy");
-  gpr_histogram_add_import = (gpr_histogram_add_type) GetProcAddress(library, "gpr_histogram_add");
-  gpr_histogram_merge_import = (gpr_histogram_merge_type) GetProcAddress(library, "gpr_histogram_merge");
-  gpr_histogram_percentile_import = (gpr_histogram_percentile_type) GetProcAddress(library, "gpr_histogram_percentile");
-  gpr_histogram_mean_import = (gpr_histogram_mean_type) GetProcAddress(library, "gpr_histogram_mean");
-  gpr_histogram_stddev_import = (gpr_histogram_stddev_type) GetProcAddress(library, "gpr_histogram_stddev");
-  gpr_histogram_variance_import = (gpr_histogram_variance_type) GetProcAddress(library, "gpr_histogram_variance");
-  gpr_histogram_maximum_import = (gpr_histogram_maximum_type) GetProcAddress(library, "gpr_histogram_maximum");
-  gpr_histogram_minimum_import = (gpr_histogram_minimum_type) GetProcAddress(library, "gpr_histogram_minimum");
-  gpr_histogram_count_import = (gpr_histogram_count_type) GetProcAddress(library, "gpr_histogram_count");
-  gpr_histogram_sum_import = (gpr_histogram_sum_type) GetProcAddress(library, "gpr_histogram_sum");
-  gpr_histogram_sum_of_squares_import = (gpr_histogram_sum_of_squares_type) GetProcAddress(library, "gpr_histogram_sum_of_squares");
-  gpr_histogram_get_contents_import = (gpr_histogram_get_contents_type) GetProcAddress(library, "gpr_histogram_get_contents");
-  gpr_histogram_merge_contents_import = (gpr_histogram_merge_contents_type) GetProcAddress(library, "gpr_histogram_merge_contents");
   gpr_join_host_port_import = (gpr_join_host_port_type) GetProcAddress(library, "gpr_join_host_port");
   gpr_split_host_port_import = (gpr_split_host_port_type) GetProcAddress(library, "gpr_split_host_port");
   gpr_log_severity_string_import = (gpr_log_severity_string_type) GetProcAddress(library, "gpr_log_severity_string");
diff --git a/src/ruby/ext/grpc/rb_grpc_imports.generated.h b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
index c2698d1..6377008 100644
--- a/src/ruby/ext/grpc/rb_grpc_imports.generated.h
+++ b/src/ruby/ext/grpc/rb_grpc_imports.generated.h
@@ -36,7 +36,6 @@
 #include <grpc/support/avl.h>
 #include <grpc/support/cmdline.h>
 #include <grpc/support/cpu.h>
-#include <grpc/support/histogram.h>
 #include <grpc/support/host_port.h>
 #include <grpc/support/log.h>
 #include <grpc/support/log_windows.h>
@@ -559,7 +558,7 @@
 typedef void(*grpc_slice_buffer_move_first_no_ref_type)(grpc_slice_buffer* src, size_t n, grpc_slice_buffer* dst);
 extern grpc_slice_buffer_move_first_no_ref_type grpc_slice_buffer_move_first_no_ref_import;
 #define grpc_slice_buffer_move_first_no_ref grpc_slice_buffer_move_first_no_ref_import
-typedef void(*grpc_slice_buffer_move_first_into_buffer_type)(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* src, size_t n, void* dst);
+typedef void(*grpc_slice_buffer_move_first_into_buffer_type)(grpc_slice_buffer* src, size_t n, void* dst);
 extern grpc_slice_buffer_move_first_into_buffer_type grpc_slice_buffer_move_first_into_buffer_import;
 #define grpc_slice_buffer_move_first_into_buffer grpc_slice_buffer_move_first_into_buffer_import
 typedef grpc_slice(*grpc_slice_buffer_take_first_type)(grpc_slice_buffer* src);
@@ -649,51 +648,6 @@
 typedef unsigned(*gpr_cpu_current_cpu_type)(void);
 extern gpr_cpu_current_cpu_type gpr_cpu_current_cpu_import;
 #define gpr_cpu_current_cpu gpr_cpu_current_cpu_import
-typedef gpr_histogram*(*gpr_histogram_create_type)(double resolution, double max_bucket_start);
-extern gpr_histogram_create_type gpr_histogram_create_import;
-#define gpr_histogram_create gpr_histogram_create_import
-typedef void(*gpr_histogram_destroy_type)(gpr_histogram* h);
-extern gpr_histogram_destroy_type gpr_histogram_destroy_import;
-#define gpr_histogram_destroy gpr_histogram_destroy_import
-typedef void(*gpr_histogram_add_type)(gpr_histogram* h, double x);
-extern gpr_histogram_add_type gpr_histogram_add_import;
-#define gpr_histogram_add gpr_histogram_add_import
-typedef int(*gpr_histogram_merge_type)(gpr_histogram* dst, const gpr_histogram* src);
-extern gpr_histogram_merge_type gpr_histogram_merge_import;
-#define gpr_histogram_merge gpr_histogram_merge_import
-typedef double(*gpr_histogram_percentile_type)(gpr_histogram* histogram, double percentile);
-extern gpr_histogram_percentile_type gpr_histogram_percentile_import;
-#define gpr_histogram_percentile gpr_histogram_percentile_import
-typedef double(*gpr_histogram_mean_type)(gpr_histogram* histogram);
-extern gpr_histogram_mean_type gpr_histogram_mean_import;
-#define gpr_histogram_mean gpr_histogram_mean_import
-typedef double(*gpr_histogram_stddev_type)(gpr_histogram* histogram);
-extern gpr_histogram_stddev_type gpr_histogram_stddev_import;
-#define gpr_histogram_stddev gpr_histogram_stddev_import
-typedef double(*gpr_histogram_variance_type)(gpr_histogram* histogram);
-extern gpr_histogram_variance_type gpr_histogram_variance_import;
-#define gpr_histogram_variance gpr_histogram_variance_import
-typedef double(*gpr_histogram_maximum_type)(gpr_histogram* histogram);
-extern gpr_histogram_maximum_type gpr_histogram_maximum_import;
-#define gpr_histogram_maximum gpr_histogram_maximum_import
-typedef double(*gpr_histogram_minimum_type)(gpr_histogram* histogram);
-extern gpr_histogram_minimum_type gpr_histogram_minimum_import;
-#define gpr_histogram_minimum gpr_histogram_minimum_import
-typedef double(*gpr_histogram_count_type)(gpr_histogram* histogram);
-extern gpr_histogram_count_type gpr_histogram_count_import;
-#define gpr_histogram_count gpr_histogram_count_import
-typedef double(*gpr_histogram_sum_type)(gpr_histogram* histogram);
-extern gpr_histogram_sum_type gpr_histogram_sum_import;
-#define gpr_histogram_sum gpr_histogram_sum_import
-typedef double(*gpr_histogram_sum_of_squares_type)(gpr_histogram* histogram);
-extern gpr_histogram_sum_of_squares_type gpr_histogram_sum_of_squares_import;
-#define gpr_histogram_sum_of_squares gpr_histogram_sum_of_squares_import
-typedef const uint32_t*(*gpr_histogram_get_contents_type)(gpr_histogram* histogram, size_t* count);
-extern gpr_histogram_get_contents_type gpr_histogram_get_contents_import;
-#define gpr_histogram_get_contents gpr_histogram_get_contents_import
-typedef void(*gpr_histogram_merge_contents_type)(gpr_histogram* histogram, const uint32_t* data, size_t data_count, double min_seen, double max_seen, double sum, double sum_of_squares, double count);
-extern gpr_histogram_merge_contents_type gpr_histogram_merge_contents_import;
-#define gpr_histogram_merge_contents gpr_histogram_merge_contents_import
 typedef int(*gpr_join_host_port_type)(char** out, const char* host, int port);
 extern gpr_join_host_port_type gpr_join_host_port_import;
 #define gpr_join_host_port gpr_join_host_port_import
@@ -814,7 +768,7 @@
 typedef intptr_t(*gpr_stats_read_type)(const gpr_stats_counter* c);
 extern gpr_stats_read_type gpr_stats_read_import;
 #define gpr_stats_read gpr_stats_read_import
-typedef int(*gpr_thd_new_type)(gpr_thd_id* t, void (*thd_body)(void* arg), void* arg, const gpr_thd_options* options);
+typedef int(*gpr_thd_new_type)(gpr_thd_id* t, const char* thd_name, void (*thd_body)(void* arg), void* arg, const gpr_thd_options* options);
 extern gpr_thd_new_type gpr_thd_new_import;
 #define gpr_thd_new gpr_thd_new_import
 typedef gpr_thd_options(*gpr_thd_options_default_type)(void);
diff --git a/src/ruby/lib/grpc/version.rb b/src/ruby/lib/grpc/version.rb
index 4308231..be14125 100644
--- a/src/ruby/lib/grpc/version.rb
+++ b/src/ruby/lib/grpc/version.rb
@@ -14,5 +14,5 @@
 
 # GRPC contains the General RPC module.
 module GRPC
-  VERSION = '1.8.3'
+  VERSION = '1.9.0.dev'
 end
diff --git a/src/ruby/spec/channel_connection_spec.rb b/src/ruby/spec/channel_connection_spec.rb
index ce3e3b1..5c31f41 100644
--- a/src/ruby/spec/channel_connection_spec.rb
+++ b/src/ruby/spec/channel_connection_spec.rb
@@ -16,9 +16,10 @@
 
 include Timeout
 include GRPC::Core
+include GRPC::Spec::Helpers
 
 def start_server(port = 0)
-  @srv = GRPC::RpcServer.new(pool_size: 1)
+  @srv = new_rpc_server_for_testing(pool_size: 1)
   server_port = @srv.add_http2_port("localhost:#{port}", :this_port_is_insecure)
   @srv.handle(EchoService)
   @server_thd = Thread.new { @srv.run }
diff --git a/src/ruby/spec/client_auth_spec.rb b/src/ruby/spec/client_auth_spec.rb
index 79c9192..b955ad2 100644
--- a/src/ruby/spec/client_auth_spec.rb
+++ b/src/ruby/spec/client_auth_spec.rb
@@ -95,7 +95,7 @@
     server_opts = {
       poll_period: 1
     }
-    @srv = RpcServer.new(**server_opts)
+    @srv = new_rpc_server_for_testing(**server_opts)
     port = @srv.add_http2_port('0.0.0.0:0', create_server_creds)
     @srv.handle(SslTestService)
     @srv_thd = Thread.new { @srv.run }
diff --git a/src/ruby/spec/client_server_spec.rb b/src/ruby/spec/client_server_spec.rb
index adab8c9..14ad369 100644
--- a/src/ruby/spec/client_server_spec.rb
+++ b/src/ruby/spec/client_server_spec.rb
@@ -542,7 +542,7 @@
 describe 'the http client/server' do
   before(:example) do
     server_host = '0.0.0.0:0'
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     server_port = @server.add_http2_port(server_host, :this_port_is_insecure)
     @server.start
     @ch = Channel.new("0.0.0.0:#{server_port}", nil, :this_channel_is_insecure)
@@ -574,7 +574,7 @@
     server_host = '0.0.0.0:0'
     server_creds = GRPC::Core::ServerCredentials.new(
       nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     server_port = @server.add_http2_port(server_host, server_creds)
     @server.start
     args = { Channel::SSL_TARGET => 'foo.test.google.fr' }
diff --git a/src/ruby/spec/generic/active_call_spec.rb b/src/ruby/spec/generic/active_call_spec.rb
index 120acc3..135d1f2 100644
--- a/src/ruby/spec/generic/active_call_spec.rb
+++ b/src/ruby/spec/generic/active_call_spec.rb
@@ -40,7 +40,7 @@
   before(:each) do
     @pass_through = proc { |x| x }
     host = '0.0.0.0:0'
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     server_port = @server.add_http2_port(host, :this_port_is_insecure)
     @server.start
     @ch = GRPC::Core::Channel.new("0.0.0.0:#{server_port}", nil,
diff --git a/src/ruby/spec/generic/client_stub_spec.rb b/src/ruby/spec/generic/client_stub_spec.rb
index 9539e56..79eeca9 100644
--- a/src/ruby/spec/generic/client_stub_spec.rb
+++ b/src/ruby/spec/generic/client_stub_spec.rb
@@ -888,12 +888,12 @@
     secure_credentials = GRPC::Core::ServerCredentials.new(
       nil, [{ private_key: certs[1], cert_chain: certs[2] }], false)
 
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     @server.add_http2_port('0.0.0.0:0', secure_credentials)
   end
 
   def create_test_server
-    @server = GRPC::Core::Server.new(nil)
+    @server = new_core_server_for_testing(nil)
     @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
   end
 
diff --git a/src/ruby/spec/generic/interceptor_registry_spec.rb b/src/ruby/spec/generic/interceptor_registry_spec.rb
index f93f5ce..eb75d1e 100644
--- a/src/ruby/spec/generic/interceptor_registry_spec.rb
+++ b/src/ruby/spec/generic/interceptor_registry_spec.rb
@@ -14,7 +14,7 @@
 require 'spec_helper'
 
 describe GRPC::InterceptorRegistry do
-  let(:server) { RpcServer.new }
+  let(:server) { new_rpc_server_for_testing }
   let(:interceptor) { TestServerInterceptor.new }
   let(:interceptors) { [interceptor] }
   let(:registry) { described_class.new(interceptors) }
diff --git a/src/ruby/spec/generic/rpc_server_spec.rb b/src/ruby/spec/generic/rpc_server_spec.rb
index 05059fb..e072d0c 100644
--- a/src/ruby/spec/generic/rpc_server_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_spec.rb
@@ -172,7 +172,7 @@
     it 'can be created with just some args' do
       opts = { server_args: { a_channel_arg: 'an_arg' } }
       blk = proc do
-        RpcServer.new(**opts)
+        new_rpc_server_for_testing(**opts)
       end
       expect(&blk).not_to raise_error
     end
@@ -183,7 +183,7 @@
           server_args: { a_channel_arg: 'an_arg' },
           creds: Object.new
         }
-        RpcServer.new(**opts)
+        new_rpc_server_for_testing(**opts)
       end
       expect(&blk).to raise_error
     end
@@ -192,7 +192,7 @@
   describe '#stopped?' do
     before(:each) do
       opts = { server_args: { a_channel_arg: 'an_arg' }, poll_period: 1.5 }
-      @srv = RpcServer.new(**opts)
+      @srv = new_rpc_server_for_testing(**opts)
       @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     end
 
@@ -224,7 +224,7 @@
       opts = {
         server_args: { a_channel_arg: 'an_arg' }
       }
-      r = RpcServer.new(**opts)
+      r = new_rpc_server_for_testing(**opts)
       expect(r.running?).to be(false)
     end
 
@@ -233,7 +233,7 @@
         server_args: { a_channel_arg: 'an_arg' },
         poll_period: 2
       }
-      r = RpcServer.new(**opts)
+      r = new_rpc_server_for_testing(**opts)
       r.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
       expect { r.run }.to raise_error(RuntimeError)
     end
@@ -243,7 +243,7 @@
         server_args: { a_channel_arg: 'an_arg' },
         poll_period: 2.5
       }
-      r = RpcServer.new(**opts)
+      r = new_rpc_server_for_testing(**opts)
       r.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
       r.handle(EchoService)
       t = Thread.new { r.run }
@@ -257,7 +257,7 @@
   describe '#handle' do
     before(:each) do
       @opts = { server_args: { a_channel_arg: 'an_arg' }, poll_period: 1 }
-      @srv = RpcServer.new(**@opts)
+      @srv = new_rpc_server_for_testing(**@opts)
       @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     end
 
@@ -303,7 +303,7 @@
         server_opts = {
           poll_period: 1
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         server_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @host = "localhost:#{server_port}"
         @ch = GRPC::Core::Channel.new(@host, nil, :this_channel_is_insecure)
@@ -474,7 +474,7 @@
           poll_period: 1,
           max_waiting_requests: 1
         }
-        alt_srv = RpcServer.new(**opts)
+        alt_srv = new_rpc_server_for_testing(**opts)
         alt_srv.handle(SlowService)
         alt_port = alt_srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         alt_host = "0.0.0.0:#{alt_port}"
@@ -538,7 +538,7 @@
           poll_period: 1,
           connect_md_proc: test_md_proc
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @alt_host = "0.0.0.0:#{alt_port}"
       end
@@ -573,7 +573,7 @@
         server_opts = {
           poll_period: 1
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @alt_host = "0.0.0.0:#{alt_port}"
       end
@@ -624,7 +624,7 @@
         server_opts = {
           poll_period: 1
         }
-        @srv = RpcServer.new(**server_opts)
+        @srv = new_rpc_server_for_testing(**server_opts)
         alt_port = @srv.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @alt_host = "0.0.0.0:#{alt_port}"
 
diff --git a/src/ruby/spec/google_rpc_status_utils_spec.rb b/src/ruby/spec/google_rpc_status_utils_spec.rb
index 6f2a06b..3263589 100644
--- a/src/ruby/spec/google_rpc_status_utils_spec.rb
+++ b/src/ruby/spec/google_rpc_status_utils_spec.rb
@@ -19,6 +19,7 @@
 require 'google/protobuf/well_known_types'
 
 include GRPC::Core
+include GRPC::Spec::Helpers
 
 describe 'conversion from a status struct to a google protobuf status' do
   it 'fails if the input is not a status struct' do
@@ -150,7 +151,7 @@
 
 describe 'receving a google rpc status from a remote endpoint' do
   def start_server(encoded_rpc_status)
-    @srv = GRPC::RpcServer.new(pool_size: 1)
+    @srv = new_rpc_server_for_testing(pool_size: 1)
     @server_port = @srv.add_http2_port('localhost:0',
                                        :this_port_is_insecure)
     @srv.handle(GoogleRpcStatusTestService.new(encoded_rpc_status))
@@ -238,7 +239,7 @@
 
 describe 'when the endpoint doesnt send grpc-status-details-bin' do
   def start_server
-    @srv = GRPC::RpcServer.new(pool_size: 1)
+    @srv = new_rpc_server_for_testing(pool_size: 1)
     @server_port = @srv.add_http2_port('localhost:0',
                                        :this_port_is_insecure)
     @srv.handle(NoStatusDetailsBinTestService)
diff --git a/src/ruby/spec/pb/health/checker_spec.rb b/src/ruby/spec/pb/health/checker_spec.rb
index c79ccfd..58a6023 100644
--- a/src/ruby/spec/pb/health/checker_spec.rb
+++ b/src/ruby/spec/pb/health/checker_spec.rb
@@ -192,7 +192,7 @@
       server_opts = {
         poll_period: 1
       }
-      @srv = RpcServer.new(**server_opts)
+      @srv = new_rpc_server_for_testing(**server_opts)
       server_port = @srv.add_http2_port(server_host, :this_port_is_insecure)
       @host = "localhost:#{server_port}"
       @ch = GRPC::Core::Channel.new(@host, nil, :this_channel_is_insecure)
diff --git a/src/ruby/spec/server_spec.rb b/src/ruby/spec/server_spec.rb
index c0a5957..a0d27b6 100644
--- a/src/ruby/spec/server_spec.rb
+++ b/src/ruby/spec/server_spec.rb
@@ -30,12 +30,12 @@
 
   describe '#start' do
     it 'runs without failing' do
-      blk = proc { Server.new(nil).start }
+      blk = proc { new_core_server_for_testing(nil).start }
       expect(&blk).to_not raise_error
     end
 
     it 'fails if the server is closed' do
-      s = Server.new(nil)
+      s = new_core_server_for_testing(nil)
       s.close
       expect { s.start }.to raise_error(RuntimeError)
     end
@@ -85,7 +85,7 @@
     describe 'for insecure servers' do
       it 'runs without failing' do
         blk = proc do
-          s = Server.new(nil)
+          s = new_core_server_for_testing(nil)
           s.add_http2_port('localhost:0', :this_port_is_insecure)
           s.close
         end
@@ -93,7 +93,7 @@
       end
 
       it 'fails if the server is closed' do
-        s = Server.new(nil)
+        s = new_core_server_for_testing(nil)
         s.close
         blk = proc do
           s.add_http2_port('localhost:0', :this_port_is_insecure)
@@ -106,7 +106,7 @@
       let(:cert) { create_test_cert }
       it 'runs without failing' do
         blk = proc do
-          s = Server.new(nil)
+          s = new_core_server_for_testing(nil)
           s.add_http2_port('localhost:0', cert)
           s.close
         end
@@ -114,7 +114,7 @@
       end
 
       it 'fails if the server is closed' do
-        s = Server.new(nil)
+        s = new_core_server_for_testing(nil)
         s.close
         blk = proc { s.add_http2_port('localhost:0', cert) }
         expect(&blk).to raise_error(RuntimeError)
@@ -124,7 +124,7 @@
 
   shared_examples '#new' do
     it 'takes nil channel args' do
-      expect { Server.new(nil) }.to_not raise_error
+      expect { new_core_server_for_testing(nil) }.to_not raise_error
     end
 
     it 'does not take a hash with bad keys as channel args' do
@@ -175,14 +175,14 @@
 
   describe '#new with an insecure channel' do
     def construct_with_args(a)
-      proc { Server.new(a) }
+      proc { new_core_server_for_testing(a) }
     end
 
     it_behaves_like '#new'
   end
 
   def start_a_server
-    s = Server.new(nil)
+    s = new_core_server_for_testing(nil)
     s.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
     s.start
     s
diff --git a/src/ruby/spec/support/helpers.rb b/src/ruby/spec/support/helpers.rb
index 65fffff..29028df 100644
--- a/src/ruby/spec/support/helpers.rb
+++ b/src/ruby/spec/support/helpers.rb
@@ -31,7 +31,7 @@
       #
       def build_rpc_server(server_opts: {},
                            client_opts: {})
-        @server = RpcServer.new({ poll_period: 1 }.merge(server_opts))
+        @server = new_rpc_server_for_testing({ poll_period: 1 }.merge(server_opts))
         @port = @server.add_http2_port('0.0.0.0:0', :this_port_is_insecure)
         @host = "0.0.0.0:#{@port}"
         @client_opts = client_opts
@@ -68,6 +68,40 @@
         opts ||= @client_opts
         klass.new(host, :this_channel_is_insecure, **opts)
       end
+
+      ##
+      # Build an RPCServer for use in tests. Adds args
+      # that are useful for all tests.
+      #
+      # @param [Hash] server_opts
+      #
+      def new_rpc_server_for_testing(server_opts = {})
+        server_opts[:server_args] ||= {}
+        update_server_args_hash(server_opts[:server_args])
+        RpcServer.new(**server_opts)
+      end
+
+      ##
+      # Build an GRPC::Core::Server for use in tests. Adds args
+      # that are useful for all tests.
+      #
+      # @param [Hash] server_args
+      #
+      def new_core_server_for_testing(server_args)
+        server_args.nil? && server_args = {}
+        update_server_args_hash(server_args)
+        GRPC::Core::Server.new(server_args)
+      end
+
+      def update_server_args_hash(server_args)
+        so_reuseport_arg = 'grpc.so_reuseport'
+        unless server_args[so_reuseport_arg].nil?
+          fail 'Unexpected. grpc.so_reuseport already set.'
+        end
+        # Run tests without so_reuseport to eliminate the chance of
+        # cross-talk.
+        server_args[so_reuseport_arg] = 0
+      end
     end
   end
 end
diff --git a/src/ruby/tools/version.rb b/src/ruby/tools/version.rb
index 453a889..48aad39 100644
--- a/src/ruby/tools/version.rb
+++ b/src/ruby/tools/version.rb
@@ -14,6 +14,6 @@
 
 module GRPC
   module Tools
-    VERSION = '1.8.3'
+    VERSION = '1.9.0.dev'
   end
 end
diff --git a/templates/CMakeLists.txt.template b/templates/CMakeLists.txt.template
index 0be5d14..8de0ccd 100644
--- a/templates/CMakeLists.txt.template
+++ b/templates/CMakeLists.txt.template
@@ -147,181 +147,12 @@
     set(_gRPC_PROTOBUF_LIBRARY_NAME "libprotobuf")
   endif()
 
-  if("<%text>${gRPC_ZLIB_PROVIDER}</%text>" STREQUAL "module")
-    if(NOT ZLIB_ROOT_DIR)
-      set(ZLIB_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/zlib)
-    endif()
-    set(ZLIB_INCLUDE_DIR "<%text>${ZLIB_ROOT_DIR}</%text>")
-    if(EXISTS "<%text>${ZLIB_ROOT_DIR}</%text>/CMakeLists.txt")
-        # TODO(jtattermusch): workaround for https://github.com/madler/zlib/issues/218
-        include_directories(<%text>${ZLIB_INCLUDE_DIR}</%text>)
-
-        add_subdirectory(<%text>${ZLIB_ROOT_DIR}</%text> third_party/zlib)
-        if(TARGET zlibstatic)
-            set(_gRPC_ZLIB_LIBRARIES zlibstatic)
-        endif()
-    else()
-        message(WARNING "gRPC_ZLIB_PROVIDER is \"module\" but ZLIB_ROOT_DIR is wrong")
-    endif()
-    if(gRPC_INSTALL)
-      message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_ZLIB_PROVIDER is \"module\"")
-      set(gRPC_INSTALL FALSE)
-    endif()
-  elseif("<%text>${gRPC_ZLIB_PROVIDER}</%text>" STREQUAL "package")
-    find_package(ZLIB REQUIRED)
-    set(_gRPC_ZLIB_LIBRARIES <%text>${ZLIB_LIBRARIES}</%text>)
-    set(_gRPC_FIND_ZLIB "if(NOT ZLIB_FOUND)\n  find_package(ZLIB)\nendif()")
-  endif()
-
-  if("<%text>${gRPC_CARES_PROVIDER}</%text>" STREQUAL "module")
-    if(NOT CARES_ROOT_DIR)
-      set(CARES_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/cares/cares)
-    endif()
-    set(CARES_SHARED OFF CACHE BOOL "disable shared library")
-    set(CARES_STATIC ON CACHE BOOL "link cares statically")
-    set(CARES_INCLUDE_DIR "<%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/cares/cares")
-    add_subdirectory(third_party/cares/cares)
-    if(TARGET c-ares)
-      set(_gRPC_CARES_LIBRARIES c-ares)
-    endif()
-    if(gRPC_INSTALL)
-      message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_CARES_PROVIDER is \"module\"")
-      set(gRPC_INSTALL FALSE)
-    endif()
-  elseif("<%text>${gRPC_CARES_PROVIDER}</%text>" STREQUAL "package")
-    find_package(c-ares REQUIRED CONFIG)
-    if(TARGET c-ares::cares)
-      set(_gRPC_CARES_LIBRARIES c-ares::cares)
-    endif()
-    set(_gRPC_FIND_CARES "if(NOT c-ares_FOUND)\n  find_package(c-ares CONFIG)\nendif()")
-  endif()
-
-  if("<%text>${gRPC_PROTOBUF_PROVIDER}</%text>" STREQUAL "module")
-    # Building the protobuf tests require gmock what is not part of a standard protobuf checkout.
-    # Disable them unless they are explicitly requested from the cmake command line (when we assume
-    # gmock is downloaded to the right location inside protobuf).
-    if(NOT protobuf_BUILD_TESTS)
-      set(protobuf_BUILD_TESTS OFF CACHE BOOL "Build protobuf tests")
-    endif()
-    # Disable building protobuf with zlib. Building protobuf with zlib breaks
-    # the build if zlib is not installed on the system.
-    if(NOT protobuf_WITH_ZLIB)
-      set(protobuf_WITH_ZLIB OFF CACHE BOOL "Build protobuf with zlib.")
-    endif()
-    if(NOT PROTOBUF_ROOT_DIR)
-      set(PROTOBUF_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/protobuf)
-    endif()
-    set(PROTOBUF_WELLKNOWN_IMPORT_DIR <%text>${PROTOBUF_ROOT_DIR}</%text>/src)
-    if(EXISTS "<%text>${PROTOBUF_ROOT_DIR}</%text>/cmake/CMakeLists.txt")
-      set(protobuf_MSVC_STATIC_RUNTIME OFF CACHE BOOL "Link static runtime libraries")
-      add_subdirectory(<%text>${PROTOBUF_ROOT_DIR}</%text>/cmake third_party/protobuf)
-      if(TARGET <%text>${_gRPC_PROTOBUF_LIBRARY_NAME}</%text>)
-        set(_gRPC_PROTOBUF_LIBRARIES <%text>${_gRPC_PROTOBUF_LIBRARY_NAME}</%text>)
-      endif()
-      if(TARGET libprotoc)
-        set(_gRPC_PROTOBUF_PROTOC_LIBRARIES libprotoc)
-      endif()
-      if(TARGET protoc)
-        set(_gRPC_PROTOBUF_PROTOC protoc)
-        set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protoc>)
-      endif()
-    else()
-        message(WARNING "gRPC_PROTOBUF_PROVIDER is \"module\" but PROTOBUF_ROOT_DIR is wrong")
-    endif()
-    if(gRPC_INSTALL)
-      message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_PROTOBUF_PROVIDER is \"module\"")
-      set(gRPC_INSTALL FALSE)
-    endif()
-  elseif("<%text>${gRPC_PROTOBUF_PROVIDER}</%text>" STREQUAL "package")
-    find_package(Protobuf REQUIRED <%text>${gRPC_PROTOBUF_PACKAGE_TYPE}</%text>)
-    if(Protobuf_FOUND OR PROTOBUF_FOUND)
-      if(TARGET protobuf::<%text>${_gRPC_PROTOBUF_LIBRARY_NAME}</%text>)
-        set(_gRPC_PROTOBUF_LIBRARIES protobuf::<%text>${_gRPC_PROTOBUF_LIBRARY_NAME}</%text>)
-      else()
-        set(_gRPC_PROTOBUF_LIBRARIES <%text>${PROTOBUF_LIBRARIES}</%text>)
-      endif()
-      if(TARGET protobuf::libprotoc)
-        set(_gRPC_PROTOBUF_PROTOC_LIBRARIES protobuf::libprotoc)
-      else()
-        set(_gRPC_PROTOBUF_PROTOC_LIBRARIES <%text>${PROTOBUF_PROTOC_LIBRARIES}</%text>)
-      endif()
-      if(TARGET protobuf::protoc)
-        set(_gRPC_PROTOBUF_PROTOC protobuf::protoc)
-        set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE $<TARGET_FILE:protobuf::protoc>)
-      else()
-        set(_gRPC_PROTOBUF_PROTOC <%text>${PROTOBUF_PROTOC_EXECUTABLE}</%text>)
-        set(_gRPC_PROTOBUF_PROTOC_EXECUTABLE <%text>${PROTOBUF_PROTOC_EXECUTABLE}</%text>)
-      endif()
-      set(_gRPC_FIND_PROTOBUF "if(NOT Protobuf_FOUND AND NOT PROTOBUF_FOUND)\n  find_package(Protobuf <%text>${gRPC_PROTOBUF_PACKAGE_TYPE}</%text>)\nendif()")
-    endif()
-    if(PROTOBUF_FOUND)
-      include_directories(<%text>${PROTOBUF_INCLUDE_DIRS}</%text>)
-    endif()
-    set(PROTOBUF_WELLKNOWN_IMPORT_DIR /usr/local/include)
-  endif()
-
-  if("<%text>${gRPC_SSL_PROVIDER}</%text>" STREQUAL "module")
-    if(NOT BORINGSSL_ROOT_DIR)
-      set(BORINGSSL_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/boringssl)
-    endif()
-    if(EXISTS "<%text>${BORINGSSL_ROOT_DIR}</%text>/CMakeLists.txt")
-      set(OPENSSL_NO_ASM ON)  # make boringssl buildable with Visual Studio
-      add_subdirectory(<%text>${BORINGSSL_ROOT_DIR}</%text> third_party/boringssl)
-      if(TARGET ssl)
-        set(_gRPC_SSL_LIBRARIES ssl)
-      endif()
-    else()
-        message(WARNING "gRPC_SSL_PROVIDER is \"module\" but BORINGSSL_ROOT_DIR is wrong")
-    endif()
-    if(gRPC_INSTALL)
-      message(WARNING "gRPC_INSTALL will be forced to FALSE because gRPC_SSL_PROVIDER is \"module\"")
-      set(gRPC_INSTALL FALSE)
-    endif()
-  elseif("<%text>${gRPC_SSL_PROVIDER}</%text>" STREQUAL "package")
-    find_package(OpenSSL REQUIRED)
-    set(_gRPC_SSL_LIBRARIES <%text>${OPENSSL_LIBRARIES}</%text>)
-    set(_gRPC_FIND_SSL "if(NOT OPENSSL_FOUND)\n  find_package(OpenSSL)\nendif()")
-  endif()
-
-  if("<%text>${gRPC_GFLAGS_PROVIDER}</%text>" STREQUAL "module")
-    if(NOT GFLAGS_ROOT_DIR)
-      set(GFLAGS_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/gflags)
-    endif()
-    if(EXISTS "<%text>${GFLAGS_ROOT_DIR}</%text>/CMakeLists.txt")
-        add_subdirectory(<%text>${GFLAGS_ROOT_DIR}</%text> third_party/gflags)
-        if(TARGET gflags_static)
-            set(_gRPC_GFLAGS_LIBRARIES gflags_static)
-        endif()
-    else()
-        message(WARNING "gRPC_GFLAGS_PROVIDER is \"module\" but GFLAGS_ROOT_DIR is wrong")
-    endif()
-  elseif("<%text>${gRPC_GFLAGS_PROVIDER}</%text>" STREQUAL "package")
-    find_package(gflags)
-    if(TARGET gflags::gflags)
-      set(_gRPC_GFLAGS_LIBRARIES gflags::gflags)
-    endif()
-    set(_gRPC_FIND_GFLAGS "if(NOT gflags_FOUND)\n  find_package(gflags)\nendif()")
-  endif()
-
-  if("<%text>${gRPC_BENCHMARK_PROVIDER}</%text>" STREQUAL "module")
-    if(NOT BENCHMARK_ROOT_DIR)
-      set(BENCHMARK_ROOT_DIR <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/third_party/benchmark)
-    endif()
-    if(EXISTS "<%text>${BENCHMARK_ROOT_DIR}</%text>/CMakeLists.txt")
-        add_subdirectory(<%text>${BENCHMARK_ROOT_DIR}</%text> third_party/benchmark)
-        if(TARGET benchmark)
-            set(_gRPC_BENCHMARK_LIBRARIES benchmark)
-        endif()
-    else()
-        message(WARNING "gRPC_BENCHMARK_PROVIDER is \"module\" but BENCHMARK_ROOT_DIR is wrong")
-    endif()
-  elseif("<%text>${gRPC_BENCHMARK_PROVIDER}</%text>" STREQUAL "package")
-    find_package(benchmark)
-    if(TARGET benchmark::benchmark)
-      set(_gRPC_BENCHMARK_LIBRARIES benchmark::benchmark)
-    endif()
-    set(_gRPC_FIND_BENCHMARK "if(NOT benchmark_FOUND)\n  find_package(benchmark)\nendif()")
-  endif()
+  include(cmake/zlib.cmake)
+  include(cmake/cares.cmake)
+  include(cmake/protobuf.cmake)
+  include(cmake/ssl.cmake)
+  include(cmake/gflags.cmake)
+  include(cmake/benchmark.cmake)
 
   if(NOT MSVC)
     set(CMAKE_C_FLAGS   "<%text>${CMAKE_C_FLAGS}</%text> -std=c99")
@@ -444,7 +275,7 @@
   % for lib in libs:
   % if lib.build in ["all", "protoc", "tool", "test", "private"] and not lib.boringssl:
   % if not lib.get('build_system', []) or 'cmake' in lib.get('build_system', []):
-  % if not lib.name in ['benchmark', 'z']:  # we build these using CMake instead
+  % if not lib.name in ['ares', 'benchmark', 'z']:  # we build these using CMake instead
   % if lib.build in ["test", "private"]:
   if (gRPC_BUILD_TESTS)
   ${cc_library(lib)}
@@ -514,7 +345,7 @@
   target_include_directories(${lib.name}
     PUBLIC <%text>$<INSTALL_INTERFACE:${gRPC_INSTALL_INCLUDEDIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include></%text>
     PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
-    PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
+    PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text>
     PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
     PRIVATE <%text>${ZLIB_INCLUDE_DIR}</%text>
     PRIVATE <%text>${BENCHMARK}</%text>/include
@@ -585,7 +416,7 @@
   target_include_directories(${tgt.name}
     PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>
     PRIVATE <%text>${CMAKE_CURRENT_SOURCE_DIR}</%text>/include
-    PRIVATE <%text>${BORINGSSL_ROOT_DIR}</%text>/include
+    PRIVATE <%text>${_gRPC_SSL_INCLUDE_DIR}</%text>
     PRIVATE <%text>${PROTOBUF_ROOT_DIR}</%text>/src
     PRIVATE <%text>${BENCHMARK_ROOT_DIR}</%text>/include
     PRIVATE <%text>${ZLIB_ROOT_DIR}</%text>
diff --git a/templates/Makefile.template b/templates/Makefile.template
index 3eb8388..954bea7 100644
--- a/templates/Makefile.template
+++ b/templates/Makefile.template
@@ -1383,14 +1383,14 @@
   install-pkg-config_c: pc_c pc_c_unsecure
   	$(E) "[INSTALL] Installing C pkg-config files"
   	$(Q) $(INSTALL) -d $(prefix)/lib/pkgconfig
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(prefix)/lib/pkgconfig/grpc.pc
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(prefix)/lib/pkgconfig/grpc_unsecure.pc
+  	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc.pc $(prefix)/lib/pkgconfig/grpc.pc
+  	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc_unsecure.pc $(prefix)/lib/pkgconfig/grpc_unsecure.pc
 
   install-pkg-config_cxx: pc_cxx pc_cxx_unsecure
   	$(E) "[INSTALL] Installing C++ pkg-config files"
   	$(Q) $(INSTALL) -d $(prefix)/lib/pkgconfig
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc $(prefix)/lib/pkgconfig/grpc++.pc
-  	$(Q) $(INSTALL) $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc $(prefix)/lib/pkgconfig/grpc++_unsecure.pc
+  	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++.pc $(prefix)/lib/pkgconfig/grpc++.pc
+  	$(Q) $(INSTALL) -m 0644 $(LIBDIR)/$(CONFIG)/pkgconfig/grpc++_unsecure.pc $(prefix)/lib/pkgconfig/grpc++_unsecure.pc
 
   install-certs: etc/roots.pem
   	$(E) "[INSTALL] Installing root certificates"
diff --git a/templates/gRPC-Core.podspec.template b/templates/gRPC-Core.podspec.template
index 04b8940..9785d15 100644
--- a/templates/gRPC-Core.podspec.template
+++ b/templates/gRPC-Core.podspec.template
@@ -22,27 +22,51 @@
   # limitations under the License.
 
   <%!
-  def grpc_private_files(libs):
+  def grpc_lib_files(libs, expect_libs, groups):
     out = []
     for lib in libs:
-      if lib.name in ("grpc", "gpr"):
-        out += lib.get('headers', [])
-        out += lib.get('src', [])
-    return [f for f in out if not f.startswith("third_party/nanopb/")]
+      if lib.name in expect_libs:
+        for group in groups:
+          out += lib.get(group, [])
+    return out
+
+  def grpc_private_files(libs):
+    out = grpc_lib_files(libs, ("grpc", "gpr"), ("headers", "src"))
+    return [file for file in out if not file.startswith("third_party/nanopb/")]
 
   def grpc_public_headers(libs):
-    out = []
-    for lib in libs:
-      if lib.name in ("grpc", "gpr"):
-        out += lib.get('public_headers', [])
+    out = grpc_lib_files(libs, ("grpc", "gpr"), ("public_headers",))
     return out
 
   def grpc_private_headers(libs):
-    out = []
-    for lib in libs:
-      if lib.name in ("grpc", "gpr"):
-        out += lib.get('headers', [])
-    return [f for f in out if not f.startswith("third_party/nanopb/")]
+    out = grpc_lib_files(libs, ("grpc", "gpr"), ("headers",))
+    return [file for file in out if not file.startswith("third_party/nanopb/")]
+
+  def grpc_cronet_files(libs):
+    out = grpc_lib_files(libs, ("grpc_cronet",), ("src", "headers"))
+    excl = grpc_private_files(libs)
+    excl += [
+        # We do not need cronet dedicated plugin registry
+        "src/core/plugin_registry/grpc_cronet_plugin_registry.cc",
+        # We do not need dummy cronet API for ObjC
+        "src/core/ext/transport/cronet/transport/cronet_api_dummy.cc",
+    ]
+    return [file for file in out if not file in excl]
+
+  def grpc_cronet_public_headers(libs):
+    out = grpc_lib_files(libs, ("grpc_cronet",), ("public_headers",))
+    excl = grpc_public_headers(libs)
+    return [file for file in out if not file in excl]
+
+  def grpc_test_util_files(libs):
+    out = grpc_lib_files(libs, ("grpc_test_util", "gpr_test_util"), ("src", "headers"))
+    excl = grpc_private_files(libs)
+    return [file for file in out if not file in excl]
+
+  def end2end_tests_files(libs):
+    out = grpc_lib_files(libs, ("end2end_tests",), ("src", "headers"))
+    excl = grpc_private_files(libs)
+    return [file for file in out if not file in excl]
 
   def ruby_multiline_list(files, indent):
     return (',\n' + indent*' ').join('\'%s\'' % f for f in files)
@@ -61,6 +85,10 @@
       :tag => "v#{version}",
     }
 
+    # gRPC podspecs depend on fix for https://github.com/CocoaPods/CocoaPods/issues/6024,
+    # which was released in Cocoapods v1.2.0.
+    s.cocoapods_version = '>= 1.2.0'
+
     s.ios.deployment_target = '7.0'
     s.osx.deployment_target = '10.9'
     s.requires_arc = false
@@ -149,7 +177,7 @@
 
     s.subspec 'Cronet-Interface' do |ss|
       ss.header_mappings_dir = 'include/grpc'
-      ss.source_files = 'include/grpc/grpc_cronet.h'
+      ss.source_files = ${ruby_multiline_list(grpc_cronet_public_headers(libs), 22)}
     end
 
     s.subspec 'Cronet-Implementation' do |ss|
@@ -159,9 +187,7 @@
       ss.dependency "#{s.name}/Implementation", version
       ss.dependency "#{s.name}/Cronet-Interface", version
 
-      ss.source_files = 'src/core/ext/transport/cronet/client/secure/cronet_channel_create.cc',
-                        'src/core/ext/transport/cronet/transport/cronet_transport.{cc,h}',
-                        'third_party/objective_c/Cronet/bidirectional_stream_c.h'
+      ss.source_files = ${ruby_multiline_list(grpc_cronet_files(libs), 22)}
     end
 
     s.subspec 'Tests' do |ss|
@@ -170,17 +196,8 @@
       ss.dependency "#{s.name}/Interface", version
       ss.dependency "#{s.name}/Implementation", version
 
-      ss.source_files = 'test/core/end2end/cq_verifier.{cc,h}',
-                        'test/core/end2end/end2end_tests.{cc,h}',
-                        'test/core/end2end/end2end_test_utils.cc',
-                        'test/core/end2end/tests/*.{cc,h}',
-                        'test/core/end2end/fixtures/*.h',
-                        'test/core/end2end/data/*.{cc,h}',
-                        'test/core/util/debugger_macros.{cc,h}',
-                        'test/core/util/test_config.{cc,h}',
-                        'test/core/util/port.h',
-                        'test/core/util/port.cc',
-                        'test/core/util/port_server_client.{cc,h}'
+      ss.source_files = ${ruby_multiline_list(grpc_test_util_files(libs), 22)},
+                        ${ruby_multiline_list(end2end_tests_files(libs), 22)}
     end
 
     # TODO (mxyan): Instead of this hack, add include path "third_party" to C core's include path?
diff --git a/templates/src/core/plugin_registry.template b/templates/src/core/plugin_registry.template
index 8d76171..805ae90 100644
--- a/templates/src/core/plugin_registry.template
+++ b/templates/src/core/plugin_registry.template
@@ -25,8 +25,8 @@
   #include <grpc/grpc.h>
 
   %for plugin in selected.plugins:
-  extern "C" void ${plugin}_init(void);
-  extern "C" void ${plugin}_shutdown(void);
+  void ${plugin}_init(void);
+  void ${plugin}_shutdown(void);
   %endfor
 
   void grpc_register_built_in_plugins(void) {
diff --git a/templates/src/python/grpcio/grpc_core_dependencies.py.template b/templates/src/python/grpcio/grpc_core_dependencies.py.template
index 02e066c..6295ed3 100644
--- a/templates/src/python/grpcio/grpc_core_dependencies.py.template
+++ b/templates/src/python/grpcio/grpc_core_dependencies.py.template
@@ -20,7 +20,7 @@
   % for lib in libs:
   % if lib.name in python_dependencies.transitive_deps:
   % for src in lib.src:
-    '${src}',
+      '${src}',
   % endfor
   % endif
   % endfor
diff --git a/templates/src/python/grpcio/grpc_version.py.template b/templates/src/python/grpcio/grpc_version.py.template
index 38ae54d..5e5d221 100644
--- a/templates/src/python/grpcio/grpc_version.py.template
+++ b/templates/src/python/grpcio/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/templates/src/python/grpcio_health_checking/grpc_version.py.template b/templates/src/python/grpcio_health_checking/grpc_version.py.template
index 558b2d1..ffb81ac 100644
--- a/templates/src/python/grpcio_health_checking/grpc_version.py.template
+++ b/templates/src/python/grpcio_health_checking/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_health_checking/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/templates/src/python/grpcio_reflection/grpc_version.py.template b/templates/src/python/grpcio_reflection/grpc_version.py.template
index 8fb42a4..cc99533 100644
--- a/templates/src/python/grpcio_reflection/grpc_version.py.template
+++ b/templates/src/python/grpcio_reflection/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/templates/src/python/grpcio_testing/grpc_version.py.template b/templates/src/python/grpcio_testing/grpc_version.py.template
index 74db811..a49392e 100644
--- a/templates/src/python/grpcio_testing/grpc_version.py.template
+++ b/templates/src/python/grpcio_testing/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_testing/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/templates/src/python/grpcio_tests/grpc_version.py.template b/templates/src/python/grpcio_tests/grpc_version.py.template
index 16fc92e..851fb7b 100644
--- a/templates/src/python/grpcio_tests/grpc_version.py.template
+++ b/templates/src/python/grpcio_tests/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_tests/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template b/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
index aacb3ec..e25791d 100644
--- a/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
+++ b/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template
@@ -16,4 +16,4 @@
 
   # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
 
-  VERSION='${settings.python_version.pep440()}'
+  VERSION = '${settings.python_version.pep440()}'
diff --git a/templates/tools/dockerfile/test/sanity/Dockerfile.template b/templates/tools/dockerfile/test/sanity/Dockerfile.template
index fef5694..c98f7d4 100644
--- a/templates/tools/dockerfile/test/sanity/Dockerfile.template
+++ b/templates/tools/dockerfile/test/sanity/Dockerfile.template
@@ -29,7 +29,8 @@
         libtool ${"\\"}
         curl ${"\\"}
         python-virtualenv ${"\\"}
-        python-lxml
+        python-lxml ${"\\"}
+        shellcheck
   RUN pip install simplejson mako
   
   #======================================
diff --git a/test/core/backoff/BUILD b/test/core/backoff/BUILD
index 4cd7acf..6fbd654 100644
--- a/test/core/backoff/BUILD
+++ b/test/core/backoff/BUILD
@@ -26,11 +26,14 @@
 grpc_cc_test(
     name = "backoff_test",
     srcs = ["backoff_test.cc"],
+    external_deps = [
+        "gtest",
+    ],
     language = "C++",
     deps = [
-        "//:grpc",
-        "//test/core/util:grpc_test_util",
         "//:gpr",
+        "//:grpc",
         "//test/core/util:gpr_test_util",
+        "//test/core/util:grpc_test_util",
     ],
 )
diff --git a/test/core/backoff/backoff_test.cc b/test/core/backoff/backoff_test.cc
index ef2de8d..7bc4d14 100644
--- a/test/core/backoff/backoff_test.cc
+++ b/test/core/backoff/backoff_test.cc
@@ -18,137 +18,131 @@
 
 #include "src/core/lib/backoff/backoff.h"
 
+#include <algorithm>
+
 #include <grpc/support/log.h>
 #include <grpc/support/useful.h>
 
+#include <gtest/gtest.h>
 #include "test/core/util/test_config.h"
 
-static void test_constant_backoff(void) {
-  grpc_backoff backoff;
+namespace grpc {
+namespace testing {
+namespace {
+
+using grpc_core::BackOff;
+
+TEST(BackOffTest, ConstantBackOff) {
   const grpc_millis initial_backoff = 200;
   const double multiplier = 1.0;
   const double jitter = 0.0;
-  const grpc_millis min_connect_timeout = 100;
   const grpc_millis max_backoff = 1000;
-  grpc_backoff_init(&backoff, initial_backoff, multiplier, jitter,
-                    min_connect_timeout, max_backoff);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_backoff_result next_deadlines = grpc_backoff_begin(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline - grpc_exec_ctx_now(&exec_ctx) ==
-             initial_backoff);
-  GPR_ASSERT(next_deadlines.next_attempt_start_time -
-                 grpc_exec_ctx_now(&exec_ctx) ==
-             initial_backoff);
+  grpc_core::ExecCtx exec_ctx;
+  BackOff::Options options;
+  options.set_initial_backoff(initial_backoff)
+      .set_multiplier(multiplier)
+      .set_jitter(jitter)
+      .set_max_backoff(max_backoff);
+  BackOff backoff(options);
+
+  grpc_millis next_attempt_start_time = backoff.Begin();
+  EXPECT_EQ(next_attempt_start_time - grpc_core::ExecCtx::Get()->Now(),
+            initial_backoff);
   for (int i = 0; i < 10000; i++) {
-    next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-    GPR_ASSERT(next_deadlines.current_deadline - grpc_exec_ctx_now(&exec_ctx) ==
-               initial_backoff);
-    GPR_ASSERT(next_deadlines.next_attempt_start_time -
-                   grpc_exec_ctx_now(&exec_ctx) ==
-               initial_backoff);
-    exec_ctx.now = next_deadlines.current_deadline;
+    next_attempt_start_time = backoff.Step();
+    EXPECT_EQ(next_attempt_start_time - grpc_core::ExecCtx::Get()->Now(),
+              initial_backoff);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void test_min_connect(void) {
-  grpc_backoff backoff;
+TEST(BackOffTest, MinConnect) {
   const grpc_millis initial_backoff = 100;
   const double multiplier = 1.0;
   const double jitter = 0.0;
-  const grpc_millis min_connect_timeout = 200;
   const grpc_millis max_backoff = 1000;
-  grpc_backoff_init(&backoff, initial_backoff, multiplier, jitter,
-                    min_connect_timeout, max_backoff);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_backoff_result next = grpc_backoff_begin(&exec_ctx, &backoff);
-  // Because the min_connect_timeout > initial_backoff, current_deadline is used
-  // as the deadline for the current attempt.
-  GPR_ASSERT(next.current_deadline - grpc_exec_ctx_now(&exec_ctx) ==
-             min_connect_timeout);
-  // ... while, if the current attempt fails, the next one will happen after
-  // initial_backoff.
-  GPR_ASSERT(next.next_attempt_start_time - grpc_exec_ctx_now(&exec_ctx) ==
-             initial_backoff);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  BackOff::Options options;
+  options.set_initial_backoff(initial_backoff)
+      .set_multiplier(multiplier)
+      .set_jitter(jitter)
+      .set_max_backoff(max_backoff);
+  BackOff backoff(options);
+  grpc_millis next = backoff.Begin();
+  EXPECT_EQ(next - grpc_core::ExecCtx::Get()->Now(), initial_backoff);
 }
 
-static void test_no_jitter_backoff(void) {
-  grpc_backoff backoff;
+TEST(BackOffTest, NoJitterBackOff) {
   const grpc_millis initial_backoff = 2;
   const double multiplier = 2.0;
   const double jitter = 0.0;
-  const grpc_millis min_connect_timeout = 1;
   const grpc_millis max_backoff = 513;
-  grpc_backoff_init(&backoff, initial_backoff, multiplier, jitter,
-                    min_connect_timeout, max_backoff);
+  BackOff::Options options;
+  options.set_initial_backoff(initial_backoff)
+      .set_multiplier(multiplier)
+      .set_jitter(jitter)
+      .set_max_backoff(max_backoff);
+  BackOff backoff(options);
   // x_1 = 2
   // x_n = 2**i + x_{i-1} ( = 2**(n+1) - 2 )
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  exec_ctx.now = 0;
-  exec_ctx.now_is_valid = true;
-  grpc_backoff_result next_deadlines = grpc_backoff_begin(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline ==
-             next_deadlines.next_attempt_start_time);
-  GPR_ASSERT(next_deadlines.current_deadline == 2);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 6);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 14);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 30);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 62);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 126);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 254);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 510);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 1022);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(0);
+  grpc_millis next = backoff.Begin();
+  EXPECT_EQ(next, 2);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 6);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 14);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 30);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 62);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 126);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 254);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 510);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 1022);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
   // Hit the maximum timeout. From this point onwards, retries will increase
   // only by max timeout.
-  GPR_ASSERT(next_deadlines.current_deadline == 1535);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 2048);
-  exec_ctx.now = next_deadlines.current_deadline;
-  next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline == 2561);
-  grpc_exec_ctx_finish(&exec_ctx);
+  EXPECT_EQ(next, 1535);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 2048);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(next);
+  next = backoff.Step();
+  EXPECT_EQ(next, 2561);
 }
 
-static void test_jitter_backoff(void) {
+TEST(BackOffTest, JitterBackOff) {
   const grpc_millis initial_backoff = 500;
   grpc_millis current_backoff = initial_backoff;
   const grpc_millis max_backoff = 1000;
-  const grpc_millis min_connect_timeout = 100;
   const double multiplier = 1.0;
   const double jitter = 0.1;
-  grpc_backoff backoff;
-  grpc_backoff_init(&backoff, initial_backoff, multiplier, jitter,
-                    min_connect_timeout, max_backoff);
+  BackOff::Options options;
+  options.set_initial_backoff(initial_backoff)
+      .set_multiplier(multiplier)
+      .set_jitter(jitter)
+      .set_max_backoff(max_backoff);
+  BackOff backoff(options);
 
-  backoff.rng_state = 0;  // force consistent PRNG
+  backoff.SetRandomSeed(0);  // force consistent PRNG
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_backoff_result next_deadlines = grpc_backoff_begin(&exec_ctx, &backoff);
-  GPR_ASSERT(next_deadlines.current_deadline - grpc_exec_ctx_now(&exec_ctx) ==
-             initial_backoff);
-  GPR_ASSERT(next_deadlines.next_attempt_start_time -
-                 grpc_exec_ctx_now(&exec_ctx) ==
-             initial_backoff);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_millis next = backoff.Begin();
+  EXPECT_EQ(next - grpc_core::ExecCtx::Get()->Now(), initial_backoff);
 
   grpc_millis expected_next_lower_bound =
       (grpc_millis)((double)current_backoff * (1 - jitter));
@@ -156,32 +150,27 @@
       (grpc_millis)((double)current_backoff * (1 + jitter));
 
   for (int i = 0; i < 10000; i++) {
-    next_deadlines = grpc_backoff_step(&exec_ctx, &backoff);
+    next = backoff.Step();
     // next-now must be within (jitter*100)% of the current backoff (which
     // increases by * multiplier up to max_backoff).
-    const grpc_millis timeout_millis =
-        next_deadlines.current_deadline - grpc_exec_ctx_now(&exec_ctx);
-    GPR_ASSERT(timeout_millis >= expected_next_lower_bound);
-    GPR_ASSERT(timeout_millis <= expected_next_upper_bound);
-    current_backoff = GPR_MIN(
+    const grpc_millis timeout_millis = next - grpc_core::ExecCtx::Get()->Now();
+    EXPECT_GE(timeout_millis, expected_next_lower_bound);
+    EXPECT_LE(timeout_millis, expected_next_upper_bound);
+    current_backoff = std::min(
         (grpc_millis)((double)current_backoff * multiplier), max_backoff);
     expected_next_lower_bound =
         (grpc_millis)((double)current_backoff * (1 - jitter));
     expected_next_upper_bound =
         (grpc_millis)((double)current_backoff * (1 + jitter));
-    exec_ctx.now = next_deadlines.current_deadline;
   }
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
+}  // namespace
+}  // namespace testing
+}  // namespace grpc
+
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
-  gpr_time_init();
-
-  test_constant_backoff();
-  test_min_connect();
-  test_no_jitter_backoff();
-  test_jitter_backoff();
-
-  return 0;
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
 }
diff --git a/test/core/bad_client/bad_client.cc b/test/core/bad_client/bad_client.cc
index 0a7b1c7..4c1642a 100644
--- a/test/core/bad_client/bad_client.cc
+++ b/test/core/bad_client/bad_client.cc
@@ -50,20 +50,19 @@
   gpr_event_set(&a->done_thd, (void*)1);
 }
 
-static void done_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void done_write(void* arg, grpc_error* error) {
   thd_args* a = (thd_args*)arg;
   gpr_event_set(&a->done_write, (void*)1);
 }
 
 static void server_setup_transport(void* ts, grpc_transport* transport) {
   thd_args* a = (thd_args*)ts;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_server_setup_transport(&exec_ctx, a->server, transport, nullptr,
+  grpc_core::ExecCtx exec_ctx;
+  grpc_server_setup_transport(a->server, transport, nullptr,
                               grpc_server_get_channel_args(a->server));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void read_done(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void read_done(void* arg, grpc_error* error) {
   gpr_event* read_done = (gpr_event*)arg;
   gpr_event_set(read_done, (void*)1);
 }
@@ -81,7 +80,7 @@
       grpc_slice_from_copied_buffer(client_payload, client_payload_length);
   grpc_slice_buffer outgoing;
   grpc_closure done_write_closure;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_completion_queue* shutdown_cq;
 
   if (client_payload_length < 4 * 1024) {
@@ -115,21 +114,19 @@
                                   GRPC_BAD_CLIENT_REGISTERED_HOST,
                                   GRPC_SRM_PAYLOAD_READ_INITIAL_BYTE_BUFFER, 0);
   grpc_server_start(a.server);
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, nullptr, sfd.server, false);
+  transport = grpc_create_chttp2_transport(nullptr, sfd.server, false);
   server_setup_transport(&a, transport);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 
   /* Bind everything into the same pollset */
-  grpc_endpoint_add_to_pollset(&exec_ctx, sfd.client, grpc_cq_pollset(a.cq));
-  grpc_endpoint_add_to_pollset(&exec_ctx, sfd.server, grpc_cq_pollset(a.cq));
+  grpc_endpoint_add_to_pollset(sfd.client, grpc_cq_pollset(a.cq));
+  grpc_endpoint_add_to_pollset(sfd.server, grpc_cq_pollset(a.cq));
 
   /* Check a ground truth */
   GPR_ASSERT(grpc_server_has_open_connections(a.server));
 
   /* Start validator */
-  gpr_thd_new(&id, thd_func, &a, nullptr);
+  gpr_thd_new(&id, "grpc_bad_client", thd_func, &a, nullptr);
 
   grpc_slice_buffer_init(&outgoing);
   grpc_slice_buffer_add(&outgoing, slice);
@@ -137,8 +134,8 @@
                     grpc_schedule_on_exec_ctx);
 
   /* Write data */
-  grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_endpoint_write(sfd.client, &outgoing, &done_write_closure);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Await completion, unless the request is large and write may not finish
    * before the peer shuts down. */
@@ -149,10 +146,9 @@
 
   if (flags & GRPC_BAD_CLIENT_DISCONNECT) {
     grpc_endpoint_shutdown(
-        &exec_ctx, sfd.client,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Forced Disconnect"));
-    grpc_endpoint_destroy(&exec_ctx, sfd.client);
-    grpc_exec_ctx_finish(&exec_ctx);
+        sfd.client, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Forced Disconnect"));
+    grpc_endpoint_destroy(sfd.client);
+    grpc_core::ExecCtx::Get()->Flush();
     sfd.client = nullptr;
   }
 
@@ -171,9 +167,8 @@
         grpc_closure read_done_closure;
         GRPC_CLOSURE_INIT(&read_done_closure, read_done, &read_done_event,
                           grpc_schedule_on_exec_ctx);
-        grpc_endpoint_read(&exec_ctx, sfd.client, &incoming,
-                           &read_done_closure);
-        grpc_exec_ctx_finish(&exec_ctx);
+        grpc_endpoint_read(sfd.client, &incoming, &read_done_closure);
+        grpc_core::ExecCtx::Get()->Flush();
         do {
           GPR_ASSERT(gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) > 0);
           GPR_ASSERT(
@@ -186,14 +181,13 @@
                 "client validator failed; trying additional read "
                 "in case we didn't get all the data");
       }
-      grpc_slice_buffer_destroy_internal(&exec_ctx, &incoming);
+      grpc_slice_buffer_destroy_internal(&incoming);
     }
     // Shutdown.
     grpc_endpoint_shutdown(
-        &exec_ctx, sfd.client,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
-    grpc_endpoint_destroy(&exec_ctx, sfd.client);
-    grpc_exec_ctx_finish(&exec_ctx);
+        sfd.client, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
+    grpc_endpoint_destroy(sfd.client);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   GPR_ASSERT(
@@ -207,8 +201,7 @@
   grpc_completion_queue_destroy(shutdown_cq);
   grpc_server_destroy(a.server);
   grpc_completion_queue_destroy(a.cq);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
+  grpc_slice_buffer_destroy_internal(&outgoing);
 
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
 }
diff --git a/test/core/bad_client/tests/badreq.cc b/test/core/bad_client/tests/badreq.cc
index c30244e..eeaf4c9 100644
--- a/test/core/bad_client/tests/badreq.cc
+++ b/test/core/bad_client/tests/badreq.cc
@@ -20,6 +20,8 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
+
 #include "src/core/lib/surface/server.h"
 #include "test/core/end2end/cq_verifier.h"
 
@@ -38,6 +40,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   /* invalid content type */
   GRPC_RUN_BAD_CLIENT_TEST(
@@ -126,5 +129,6 @@
       "\x10\x0auser-agent\"bad-client grpc-c/0.12.0.0 (linux)",
       GRPC_BAD_CLIENT_DISCONNECT);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/connection_prefix.cc b/test/core/bad_client/tests/connection_prefix.cc
index 47252f9..4aab234 100644
--- a/test/core/bad_client/tests/connection_prefix.cc
+++ b/test/core/bad_client/tests/connection_prefix.cc
@@ -30,6 +30,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   GRPC_RUN_BAD_CLIENT_TEST(verifier, nullptr, "X", 0);
   GRPC_RUN_BAD_CLIENT_TEST(verifier, nullptr, "PX", 0);
@@ -57,5 +58,7 @@
                            0);
   GRPC_RUN_BAD_CLIENT_TEST(verifier, nullptr, "PRI * HTTP/2.0\r\n\r\nSM\r\n\rX",
                            0);
+
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/head_of_line_blocking.cc b/test/core/bad_client/tests/head_of_line_blocking.cc
index bbc5611..f56c4d7 100644
--- a/test/core/bad_client/tests/head_of_line_blocking.cc
+++ b/test/core/bad_client/tests/head_of_line_blocking.cc
@@ -20,6 +20,7 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/lib/surface/server.h"
@@ -109,6 +110,7 @@
 int main(int argc, char** argv) {
   int i;
   grpc_test_init(argc, argv);
+  grpc_init();
 
 #define NUM_FRAMES 10
 #define FRAME_SIZE 1000
@@ -131,6 +133,7 @@
   }
   grpc_run_bad_client_test(verifier, nullptr, g_buffer, g_count, 0);
   gpr_free(g_buffer);
+  grpc_shutdown();
 
   return 0;
 }
diff --git a/test/core/bad_client/tests/headers.cc b/test/core/bad_client/tests/headers.cc
index 50bb72c..2aa1b28 100644
--- a/test/core/bad_client/tests/headers.cc
+++ b/test/core/bad_client/tests/headers.cc
@@ -34,6 +34,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   /* partial http2 header prefixes */
   GRPC_RUN_BAD_CLIENT_TEST(verifier, nullptr, PFX_STR "\x00",
@@ -335,5 +336,6 @@
                            "15 seconds",
                            GRPC_BAD_CLIENT_DISCONNECT);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/initial_settings_frame.cc b/test/core/bad_client/tests/initial_settings_frame.cc
index edc52f5..0220000 100644
--- a/test/core/bad_client/tests/initial_settings_frame.cc
+++ b/test/core/bad_client/tests/initial_settings_frame.cc
@@ -33,6 +33,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   /* various partial prefixes */
   GRPC_RUN_BAD_CLIENT_TEST(verifier, nullptr, PFX_STR "\x00",
@@ -106,5 +107,6 @@
                            PFX_STR ONE_SETTING_HDR "\x00\x99\x00\x00\x00\x00",
                            GRPC_BAD_CLIENT_DISCONNECT);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/server_registered_method.cc b/test/core/bad_client/tests/server_registered_method.cc
index 6613c94..c2dc9c6 100644
--- a/test/core/bad_client/tests/server_registered_method.cc
+++ b/test/core/bad_client/tests/server_registered_method.cc
@@ -77,6 +77,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   /* body generated with
    * tools/codegen/core/gen_server_registered_method_bad_client_test_body.py */
@@ -123,5 +124,6 @@
       "\x00\x00\x07\x00\x01\x00\x00\x00\x01\x00\x00\x00\x00\x02\x00\x00",
       0);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/simple_request.cc b/test/core/bad_client/tests/simple_request.cc
index 9f4a03e..c80fc5c 100644
--- a/test/core/bad_client/tests/simple_request.cc
+++ b/test/core/bad_client/tests/simple_request.cc
@@ -20,6 +20,8 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
+
 #include "src/core/lib/surface/server.h"
 #include "test/core/end2end/cq_verifier.h"
 
@@ -122,6 +124,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   /* basic request: check that things are working */
   GRPC_RUN_BAD_CLIENT_TEST(verifier, nullptr, PFX_STR, 0);
@@ -164,5 +167,6 @@
   GRPC_RUN_BAD_CLIENT_TEST(failure_verifier, nullptr,
                            PFX_STR "\x00\x00\x00\x03\x10\x00\x00\x00\x01", 0);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/unknown_frame.cc b/test/core/bad_client/tests/unknown_frame.cc
index d962a42..b1b618a 100644
--- a/test/core/bad_client/tests/unknown_frame.cc
+++ b/test/core/bad_client/tests/unknown_frame.cc
@@ -33,6 +33,7 @@
 }
 
 int main(int argc, char** argv) {
+  grpc_init();
   grpc_test_init(argc, argv);
 
   /* test adding prioritization data */
@@ -40,5 +41,6 @@
                            PFX_STR "\x00\x00\x00\x88\x00\x00\x00\x00\x01",
                            GRPC_BAD_CLIENT_DISCONNECT);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/bad_client/tests/window_overflow.cc b/test/core/bad_client/tests/window_overflow.cc
index f4bd818..ed8279c 100644
--- a/test/core/bad_client/tests/window_overflow.cc
+++ b/test/core/bad_client/tests/window_overflow.cc
@@ -20,6 +20,7 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/lib/surface/server.h"
@@ -72,6 +73,7 @@
 #define SEND_SIZE (6 * 1024 * 1024)
 #define NUM_FRAMES (SEND_SIZE / FRAME_SIZE + 1)
   grpc_test_init(argc, argv);
+  grpc_init();
 
   addbuf(PFX_STR, sizeof(PFX_STR) - 1);
   for (i = 0; i < NUM_FRAMES; i++) {
@@ -93,6 +95,7 @@
   grpc_run_bad_client_test(verifier, nullptr, g_buffer, g_count,
                            GRPC_BAD_CLIENT_LARGE_REQUEST);
   gpr_free(g_buffer);
+  grpc_shutdown();
 
   return 0;
 }
diff --git a/test/core/channel/channel_args_test.cc b/test/core/channel/channel_args_test.cc
index e8b3334..4a8195e 100644
--- a/test/core/channel/channel_args_test.cc
+++ b/test/core/channel/channel_args_test.cc
@@ -26,7 +26,7 @@
 #include "test/core/util/test_config.h"
 
 static void test_create(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   grpc_arg arg_int;
   grpc_arg arg_string;
@@ -55,12 +55,11 @@
   GPR_ASSERT(strcmp(ch_args->args[1].value.string, arg_string.value.string) ==
              0);
 
-  grpc_channel_args_destroy(&exec_ctx, ch_args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_args_destroy(ch_args);
 }
 
 static void test_set_compression_algorithm(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_args* ch_args;
 
   ch_args =
@@ -70,12 +69,11 @@
                     GRPC_COMPRESSION_CHANNEL_DEFAULT_ALGORITHM) == 0);
   GPR_ASSERT(ch_args->args[0].type == GRPC_ARG_INTEGER);
 
-  grpc_channel_args_destroy(&exec_ctx, ch_args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_args_destroy(ch_args);
 }
 
 static void test_compression_algorithm_states(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_args *ch_args, *ch_args_wo_gzip, *ch_args_wo_gzip_deflate;
   unsigned states_bitset;
   size_t i;
@@ -91,10 +89,10 @@
 
   /* disable gzip and deflate */
   ch_args_wo_gzip = grpc_channel_args_compression_algorithm_set_state(
-      &exec_ctx, &ch_args, GRPC_COMPRESS_GZIP, 0);
+      &ch_args, GRPC_COMPRESS_GZIP, 0);
   GPR_ASSERT(ch_args == ch_args_wo_gzip);
   ch_args_wo_gzip_deflate = grpc_channel_args_compression_algorithm_set_state(
-      &exec_ctx, &ch_args_wo_gzip, GRPC_COMPRESS_DEFLATE, 0);
+      &ch_args_wo_gzip, GRPC_COMPRESS_DEFLATE, 0);
   GPR_ASSERT(ch_args_wo_gzip == ch_args_wo_gzip_deflate);
 
   states_bitset = (unsigned)grpc_channel_args_compression_algorithm_get_states(
@@ -109,7 +107,7 @@
 
   /* re-enabled gzip only */
   ch_args_wo_gzip = grpc_channel_args_compression_algorithm_set_state(
-      &exec_ctx, &ch_args_wo_gzip_deflate, GRPC_COMPRESS_GZIP, 1);
+      &ch_args_wo_gzip_deflate, GRPC_COMPRESS_GZIP, 1);
   GPR_ASSERT(ch_args_wo_gzip == ch_args_wo_gzip_deflate);
 
   states_bitset = (unsigned)grpc_channel_args_compression_algorithm_get_states(
@@ -122,8 +120,7 @@
     }
   }
 
-  grpc_channel_args_destroy(&exec_ctx, ch_args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_args_destroy(ch_args);
 }
 
 static void test_set_socket_mutator(void) {
@@ -137,9 +134,8 @@
   GPR_ASSERT(ch_args->args[0].type == GRPC_ARG_POINTER);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, ch_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(ch_args);
   }
 }
 
diff --git a/test/core/channel/channel_stack_builder_test.cc b/test/core/channel/channel_stack_builder_test.cc
index a67f0ef..ef6db81 100644
--- a/test/core/channel/channel_stack_builder_test.cc
+++ b/test/core/channel/channel_stack_builder_test.cc
@@ -29,34 +29,30 @@
 #include "src/core/lib/surface/channel_init.h"
 #include "test/core/util/test_config.h"
 
-static grpc_error* channel_init_func(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* channel_init_func(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* call_init_func(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* call_init_func(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void channel_destroy_func(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void channel_destroy_func(grpc_channel_element* elem) {}
 
-static void call_destroy_func(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void call_destroy_func(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
-static void call_func(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void call_func(grpc_call_element* elem,
                       grpc_transport_stream_op_batch* op) {}
 
-static void channel_func(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
-                         grpc_transport_op* op) {
+static void channel_func(grpc_channel_element* elem, grpc_transport_op* op) {
   if (op->disconnect_with_error != GRPC_ERROR_NONE) {
     GRPC_ERROR_UNREF(op->disconnect_with_error);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
 }
 
 bool g_replacement_fn_called = false;
@@ -106,8 +102,7 @@
     grpc_channel_next_get_info,
     "filter_name"};
 
-static bool add_replacement_filter(grpc_exec_ctx* exec_ctx,
-                                   grpc_channel_stack_builder* builder,
+static bool add_replacement_filter(grpc_channel_stack_builder* builder,
                                    void* arg) {
   const grpc_channel_filter* filter =
       static_cast<const grpc_channel_filter*>(arg);
@@ -118,8 +113,7 @@
       builder, filter, set_arg_once_fn, &g_replacement_fn_called);
 }
 
-static bool add_original_filter(grpc_exec_ctx* exec_ctx,
-                                grpc_channel_stack_builder* builder,
+static bool add_original_filter(grpc_channel_stack_builder* builder,
                                 void* arg) {
   return grpc_channel_stack_builder_prepend_filter(
       builder, (const grpc_channel_filter*)arg, set_arg_once_fn,
diff --git a/test/core/channel/channel_stack_test.cc b/test/core/channel/channel_stack_test.cc
index 988ea9b..ef43fac 100644
--- a/test/core/channel/channel_stack_test.cc
+++ b/test/core/channel/channel_stack_test.cc
@@ -27,8 +27,7 @@
 #include "src/core/lib/slice/slice_internal.h"
 #include "test/core/util/test_config.h"
 
-static grpc_error* channel_init_func(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* channel_init_func(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   GPR_ASSERT(args->channel_args->num_args == 1);
   GPR_ASSERT(args->channel_args->args[0].type == GRPC_ARG_INTEGER);
@@ -40,42 +39,37 @@
   return GRPC_ERROR_NONE;
 }
 
-static grpc_error* call_init_func(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* call_init_func(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   ++*(int*)(elem->channel_data);
   *(int*)(elem->call_data) = 0;
   return GRPC_ERROR_NONE;
 }
 
-static void channel_destroy_func(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void channel_destroy_func(grpc_channel_element* elem) {}
 
-static void call_destroy_func(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void call_destroy_func(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {
   ++*(int*)(elem->channel_data);
 }
 
-static void call_func(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void call_func(grpc_call_element* elem,
                       grpc_transport_stream_op_batch* op) {
   ++*(int*)(elem->call_data);
 }
 
-static void channel_func(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
-                         grpc_transport_op* op) {
+static void channel_func(grpc_channel_element* elem, grpc_transport_op* op) {
   ++*(int*)(elem->channel_data);
 }
 
-static void free_channel(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
-  grpc_channel_stack_destroy(exec_ctx, static_cast<grpc_channel_stack*>(arg));
+static void free_channel(void* arg, grpc_error* error) {
+  grpc_channel_stack_destroy(static_cast<grpc_channel_stack*>(arg));
   gpr_free(arg);
 }
 
-static void free_call(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
-  grpc_call_stack_destroy(exec_ctx, static_cast<grpc_call_stack*>(arg), nullptr,
-                          nullptr);
+static void free_call(void* arg, grpc_error* error) {
+  grpc_call_stack_destroy(static_cast<grpc_call_stack*>(arg), nullptr, nullptr);
   gpr_free(arg);
 }
 
@@ -101,7 +95,7 @@
   grpc_channel_args chan_args;
   int* channel_data;
   int* call_data;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_slice path = grpc_slice_from_static_string("/service/method");
 
   arg.type = GRPC_ARG_INTEGER;
@@ -113,8 +107,8 @@
 
   channel_stack = static_cast<grpc_channel_stack*>(
       gpr_malloc(grpc_channel_stack_size(&filters, 1)));
-  grpc_channel_stack_init(&exec_ctx, 1, free_channel, channel_stack, &filters,
-                          1, &chan_args, nullptr, "test", channel_stack);
+  grpc_channel_stack_init(1, free_channel, channel_stack, &filters, 1,
+                          &chan_args, nullptr, "test", channel_stack);
   GPR_ASSERT(channel_stack->count == 1);
   channel_elem = grpc_channel_stack_element(channel_stack, 0);
   channel_data = (int*)channel_elem->channel_data;
@@ -132,8 +126,8 @@
       nullptr,                      /* arena */
       nullptr                       /* call_combiner */
   };
-  grpc_error* error = grpc_call_stack_init(&exec_ctx, channel_stack, 1,
-                                           free_call, call_stack, &args);
+  grpc_error* error =
+      grpc_call_stack_init(channel_stack, 1, free_call, call_stack, &args);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(call_stack->count == 1);
   call_elem = grpc_call_stack_element(call_stack, 0);
@@ -143,14 +137,13 @@
   GPR_ASSERT(*call_data == 0);
   GPR_ASSERT(*channel_data == 1);
 
-  GRPC_CALL_STACK_UNREF(&exec_ctx, call_stack, "done");
-  grpc_exec_ctx_flush(&exec_ctx);
+  GRPC_CALL_STACK_UNREF(call_stack, "done");
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(*channel_data == 2);
 
-  GRPC_CHANNEL_STACK_UNREF(&exec_ctx, channel_stack, "done");
+  GRPC_CHANNEL_STACK_UNREF(channel_stack, "done");
 
-  grpc_slice_unref_internal(&exec_ctx, path);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_unref_internal(path);
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/channel/minimal_stack_is_minimal_test.cc b/test/core/channel/minimal_stack_is_minimal_test.cc
index e0cffa3..3495f60 100644
--- a/test/core/channel/minimal_stack_is_minimal_test.cc
+++ b/test/core/channel/minimal_stack_is_minimal_test.cc
@@ -125,12 +125,10 @@
     grpc_channel_stack_builder_set_transport(builder, &fake_transport);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_stack_builder_set_channel_arguments(&exec_ctx, builder,
-                                                     channel_args);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_stack_builder_set_channel_arguments(builder, channel_args);
     GPR_ASSERT(grpc_channel_init_create_stack(
-        &exec_ctx, builder, (grpc_channel_stack_type)channel_stack_type));
-    grpc_exec_ctx_finish(&exec_ctx);
+        builder, (grpc_channel_stack_type)channel_stack_type));
   }
 
   // build up our expectation list
@@ -212,10 +210,9 @@
   gpr_free(expect);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_stack_builder_destroy(&exec_ctx, builder);
-    grpc_channel_args_destroy(&exec_ctx, channel_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_stack_builder_destroy(builder);
+    grpc_channel_args_destroy(channel_args);
   }
 
   return result;
diff --git a/test/core/client_channel/lb_policies_test.cc b/test/core/client_channel/lb_policies_test.cc
index 5f8d3b8..847ea00 100644
--- a/test/core/client_channel/lb_policies_test.cc
+++ b/test/core/client_channel/lb_policies_test.cc
@@ -651,9 +651,8 @@
   grpc_channel_args* args = grpc_channel_args_copy_and_add(nullptr, &arg, 1);
   channel = grpc_insecure_channel_create("ipv4:127.0.0.1:1234", args, nullptr);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(args);
   }
   // Ensures that resolver returns.
   grpc_channel_check_connectivity_state(channel, true /* try_to_connect */);
@@ -959,7 +958,7 @@
 }
 
 int main(int argc, char** argv) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   test_spec* spec;
   size_t i;
   const size_t NUM_ITERS = 10;
@@ -969,9 +968,9 @@
   grpc_test_init(argc, argv);
   grpc_tracer_set_enabled("round_robin", 1);
 
-  GPR_ASSERT(grpc_lb_policy_create(&exec_ctx, "this-lb-policy-does-not-exist",
-                                   nullptr) == nullptr);
-  GPR_ASSERT(grpc_lb_policy_create(&exec_ctx, nullptr, nullptr) == nullptr);
+  GPR_ASSERT(grpc_lb_policy_create("this-lb-policy-does-not-exist", nullptr) ==
+             nullptr);
+  GPR_ASSERT(grpc_lb_policy_create(nullptr, nullptr) == nullptr);
 
   spec = test_spec_create(NUM_ITERS, NUM_SERVERS);
   /* everything is fine, all servers stay up the whole time and life's peachy
@@ -1025,7 +1024,6 @@
   test_ping();
   test_get_channel_info();
 
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/client_channel/parse_address_test.cc b/test/core/client_channel/parse_address_test.cc
index 94f76da..6d56961 100644
--- a/test/core/client_channel/parse_address_test.cc
+++ b/test/core/client_channel/parse_address_test.cc
@@ -24,6 +24,7 @@
 #include <sys/un.h>
 #endif
 
+#include <grpc/grpc.h>
 #include <grpc/support/log.h>
 
 #include "src/core/lib/iomgr/exec_ctx.h"
@@ -33,8 +34,8 @@
 #ifdef GRPC_HAVE_UNIX_SOCKET
 
 static void test_grpc_parse_unix(const char* uri_text, const char* pathname) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(uri_text, 0);
   grpc_resolved_address addr;
 
   GPR_ASSERT(1 == grpc_parse_unix(uri, &addr));
@@ -43,7 +44,6 @@
   GPR_ASSERT(0 == strcmp(addr_un->sun_path, pathname));
 
   grpc_uri_destroy(uri);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 #else /* GRPC_HAVE_UNIX_SOCKET */
@@ -54,8 +54,8 @@
 
 static void test_grpc_parse_ipv4(const char* uri_text, const char* host,
                                  unsigned short port) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(uri_text, 0);
   grpc_resolved_address addr;
   char ntop_buf[INET_ADDRSTRLEN];
 
@@ -68,13 +68,12 @@
   GPR_ASSERT(ntohs(addr_in->sin_port) == port);
 
   grpc_uri_destroy(uri);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_grpc_parse_ipv6(const char* uri_text, const char* host,
                                  unsigned short port, uint32_t scope_id) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(uri_text, 0);
   grpc_resolved_address addr;
   char ntop_buf[INET6_ADDRSTRLEN];
 
@@ -88,14 +87,16 @@
   GPR_ASSERT(addr_in6->sin6_scope_id == scope_id);
 
   grpc_uri_destroy(uri);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   test_grpc_parse_unix("unix:/path/name", "/path/name");
   test_grpc_parse_ipv4("ipv4:192.0.2.1:12345", "192.0.2.1", 12345);
   test_grpc_parse_ipv6("ipv6:[2001:db8::1]:12345", "2001:db8::1", 12345, 0);
   test_grpc_parse_ipv6("ipv6:[2001:db8::1%252]:12345", "2001:db8::1", 12345, 2);
+
+  grpc_shutdown();
 }
diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
index dcf315e..18a795f 100644
--- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
+++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.cc
@@ -35,8 +35,7 @@
 static bool g_fail_resolution = true;
 static grpc_combiner* g_combiner;
 
-static void my_resolve_address(grpc_exec_ctx* exec_ctx, const char* addr,
-                               const char* default_port,
+static void my_resolve_address(const char* addr, const char* default_port,
                                grpc_pollset_set* interested_parties,
                                grpc_closure* on_done,
                                grpc_resolved_addresses** addrs) {
@@ -55,13 +54,13 @@
         gpr_malloc(sizeof(*(*addrs)->addrs)));
     (*addrs)->addrs[0].len = 123;
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
+  GRPC_CLOSURE_SCHED(on_done, error);
 }
 
 static grpc_ares_request* my_dns_lookup_ares(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* addr,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** lb_addrs, bool check_grpclb,
+    const char* dns_server, const char* addr, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** lb_addrs, bool check_grpclb,
     char** service_config_json) {
   gpr_mu_lock(&g_mu);
   GPR_ASSERT(0 == strcmp("test", addr));
@@ -76,27 +75,26 @@
     grpc_lb_addresses_set_address(*lb_addrs, 0, nullptr, 0, false, nullptr,
                                   nullptr);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
+  GRPC_CLOSURE_SCHED(on_done, error);
   return nullptr;
 }
 
-static grpc_resolver* create_resolver(grpc_exec_ctx* exec_ctx,
-                                      const char* name) {
+static grpc_resolver* create_resolver(const char* name) {
   grpc_resolver_factory* factory = grpc_resolver_factory_lookup("dns");
-  grpc_uri* uri = grpc_uri_parse(exec_ctx, name, 0);
+  grpc_uri* uri = grpc_uri_parse(name, 0);
   GPR_ASSERT(uri);
   grpc_resolver_args args;
   memset(&args, 0, sizeof(args));
   args.uri = uri;
   args.combiner = g_combiner;
   grpc_resolver* resolver =
-      grpc_resolver_factory_create_resolver(exec_ctx, factory, &args);
+      grpc_resolver_factory_create_resolver(factory, &args);
   grpc_resolver_factory_unref(factory);
   grpc_uri_destroy(uri);
   return resolver;
 }
 
-static void on_done(grpc_exec_ctx* exec_ctx, void* ev, grpc_error* error) {
+static void on_done(void* ev, grpc_error* error) {
   gpr_event_set((gpr_event*)ev, (void*)1);
 }
 
@@ -107,9 +105,8 @@
     if (gpr_event_wait(ev, grpc_timeout_seconds_to_deadline(1))) return true;
     deadline_seconds--;
 
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_timer_check(&exec_ctx, nullptr);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_timer_check(nullptr);
   }
   return false;
 }
@@ -120,16 +117,14 @@
   grpc_closure* on_complete;
 } next_args;
 
-static void call_resolver_next_now_lock_taken(grpc_exec_ctx* exec_ctx,
-                                              void* arg,
+static void call_resolver_next_now_lock_taken(void* arg,
                                               grpc_error* error_unused) {
   next_args* a = static_cast<next_args*>(arg);
-  grpc_resolver_next_locked(exec_ctx, a->resolver, a->result, a->on_complete);
+  grpc_resolver_next_locked(a->resolver, a->result, a->on_complete);
   gpr_free(a);
 }
 
-static void call_resolver_next_after_locking(grpc_exec_ctx* exec_ctx,
-                                             grpc_resolver* resolver,
+static void call_resolver_next_after_locking(grpc_resolver* resolver,
                                              grpc_channel_args** result,
                                              grpc_closure* on_complete) {
   next_args* a = static_cast<next_args*>(gpr_malloc(sizeof(*a)));
@@ -137,7 +132,6 @@
   a->result = result;
   a->on_complete = on_complete;
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_CREATE(call_resolver_next_now_lock_taken, a,
                           grpc_combiner_scheduler(resolver->combiner)),
       GRPC_ERROR_NONE);
@@ -153,30 +147,31 @@
   grpc_dns_lookup_ares = my_dns_lookup_ares;
   grpc_channel_args* result = (grpc_channel_args*)1;
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_resolver* resolver = create_resolver(&exec_ctx, "dns:test");
-  gpr_event ev1;
-  gpr_event_init(&ev1);
-  call_resolver_next_after_locking(
-      &exec_ctx, resolver, &result,
-      GRPC_CLOSURE_CREATE(on_done, &ev1, grpc_schedule_on_exec_ctx));
-  grpc_exec_ctx_flush(&exec_ctx);
-  GPR_ASSERT(wait_loop(5, &ev1));
-  GPR_ASSERT(result == nullptr);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resolver* resolver = create_resolver("dns:test");
+    gpr_event ev1;
+    gpr_event_init(&ev1);
+    call_resolver_next_after_locking(
+        resolver, &result,
+        GRPC_CLOSURE_CREATE(on_done, &ev1, grpc_schedule_on_exec_ctx));
+    grpc_core::ExecCtx::Get()->Flush();
+    GPR_ASSERT(wait_loop(5, &ev1));
+    GPR_ASSERT(result == nullptr);
 
-  gpr_event ev2;
-  gpr_event_init(&ev2);
-  call_resolver_next_after_locking(
-      &exec_ctx, resolver, &result,
-      GRPC_CLOSURE_CREATE(on_done, &ev2, grpc_schedule_on_exec_ctx));
-  grpc_exec_ctx_flush(&exec_ctx);
-  GPR_ASSERT(wait_loop(30, &ev2));
-  GPR_ASSERT(result != nullptr);
+    gpr_event ev2;
+    gpr_event_init(&ev2);
+    call_resolver_next_after_locking(
+        resolver, &result,
+        GRPC_CLOSURE_CREATE(on_done, &ev2, grpc_schedule_on_exec_ctx));
+    grpc_core::ExecCtx::Get()->Flush();
+    GPR_ASSERT(wait_loop(30, &ev2));
+    GPR_ASSERT(result != nullptr);
 
-  grpc_channel_args_destroy(&exec_ctx, result);
-  GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test");
-  GRPC_COMBINER_UNREF(&exec_ctx, g_combiner, "test");
-  grpc_exec_ctx_finish(&exec_ctx);
+    grpc_channel_args_destroy(result);
+    GRPC_RESOLVER_UNREF(resolver, "test");
+    GRPC_COMBINER_UNREF(g_combiner, "test");
+  }
 
   grpc_shutdown();
   gpr_mu_destroy(&g_mu);
diff --git a/test/core/client_channel/resolvers/dns_resolver_test.cc b/test/core/client_channel/resolvers/dns_resolver_test.cc
index 4c040ca..8066790 100644
--- a/test/core/client_channel/resolvers/dns_resolver_test.cc
+++ b/test/core/client_channel/resolvers/dns_resolver_test.cc
@@ -28,8 +28,8 @@
 static grpc_combiner* g_combiner;
 
 static void test_succeeds(grpc_resolver_factory* factory, const char* string) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, string, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(string, 0);
   grpc_resolver_args args;
   grpc_resolver* resolver;
   gpr_log(GPR_DEBUG, "test: '%s' should be valid for '%s'", string,
@@ -38,16 +38,15 @@
   memset(&args, 0, sizeof(args));
   args.uri = uri;
   args.combiner = g_combiner;
-  resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
+  resolver = grpc_resolver_factory_create_resolver(factory, &args);
   GPR_ASSERT(resolver != nullptr);
-  GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_succeeds");
+  GRPC_RESOLVER_UNREF(resolver, "test_succeeds");
   grpc_uri_destroy(uri);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_fails(grpc_resolver_factory* factory, const char* string) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, string, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(string, 0);
   grpc_resolver_args args;
   grpc_resolver* resolver;
   gpr_log(GPR_DEBUG, "test: '%s' should be invalid for '%s'", string,
@@ -56,10 +55,9 @@
   memset(&args, 0, sizeof(args));
   args.uri = uri;
   args.combiner = g_combiner;
-  resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
+  resolver = grpc_resolver_factory_create_resolver(factory, &args);
   GPR_ASSERT(resolver == nullptr);
   grpc_uri_destroy(uri);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 int main(int argc, char** argv) {
@@ -82,9 +80,8 @@
 
   grpc_resolver_factory_unref(dns);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    GRPC_COMBINER_UNREF(&exec_ctx, g_combiner, "test");
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    GRPC_COMBINER_UNREF(g_combiner, "test");
   }
   grpc_shutdown();
 
diff --git a/test/core/client_channel/resolvers/fake_resolver_test.cc b/test/core/client_channel/resolvers/fake_resolver_test.cc
index d5538e9..d85cbb1 100644
--- a/test/core/client_channel/resolvers/fake_resolver_test.cc
+++ b/test/core/client_channel/resolvers/fake_resolver_test.cc
@@ -33,7 +33,7 @@
 #include "test/core/util/test_config.h"
 
 static grpc_resolver* build_fake_resolver(
-    grpc_exec_ctx* exec_ctx, grpc_combiner* combiner,
+    grpc_combiner* combiner,
     grpc_fake_resolver_response_generator* response_generator) {
   grpc_resolver_factory* factory = grpc_resolver_factory_lookup("fake");
   grpc_arg generator_arg =
@@ -44,7 +44,7 @@
   args.args = &channel_args;
   args.combiner = combiner;
   grpc_resolver* resolver =
-      grpc_resolver_factory_create_resolver(exec_ctx, factory, &args);
+      grpc_resolver_factory_create_resolver(factory, &args);
   grpc_resolver_factory_unref(factory);
   return resolver;
 }
@@ -55,7 +55,7 @@
   gpr_event ev;
 } on_resolution_arg;
 
-void on_resolution_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+void on_resolution_cb(void* arg, grpc_error* error) {
   on_resolution_arg* res = static_cast<on_resolution_arg*>(arg);
   // We only check the addresses channel arg because that's the only one
   // explicitly set by the test via
@@ -66,24 +66,23 @@
       grpc_lb_addresses_find_channel_arg(res->expected_resolver_result);
   GPR_ASSERT(
       grpc_lb_addresses_cmp(actual_lb_addresses, expected_lb_addresses) == 0);
-  grpc_channel_args_destroy(exec_ctx, res->resolver_result);
-  grpc_channel_args_destroy(exec_ctx, res->expected_resolver_result);
+  grpc_channel_args_destroy(res->resolver_result);
+  grpc_channel_args_destroy(res->expected_resolver_result);
   gpr_event_set(&res->ev, (void*)1);
 }
 
 static void test_fake_resolver() {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_combiner* combiner = grpc_combiner_create();
   // Create resolver.
   grpc_fake_resolver_response_generator* response_generator =
       grpc_fake_resolver_response_generator_create();
-  grpc_resolver* resolver =
-      build_fake_resolver(&exec_ctx, combiner, response_generator);
+  grpc_resolver* resolver = build_fake_resolver(combiner, response_generator);
   GPR_ASSERT(resolver != nullptr);
 
   // Setup expectations.
-  grpc_uri* uris[] = {grpc_uri_parse(&exec_ctx, "ipv4:10.2.1.1:1234", true),
-                      grpc_uri_parse(&exec_ctx, "ipv4:127.0.0.1:4321", true)};
+  grpc_uri* uris[] = {grpc_uri_parse("ipv4:10.2.1.1:1234", true),
+                      grpc_uri_parse("ipv4:127.0.0.1:4321", true)};
   const char* balancer_names[] = {"name1", "name2"};
   const bool is_balancer[] = {true, false};
   grpc_lb_addresses* addresses = grpc_lb_addresses_create(3, nullptr);
@@ -96,7 +95,7 @@
       grpc_lb_addresses_create_channel_arg(addresses);
   grpc_channel_args* results =
       grpc_channel_args_copy_and_add(nullptr, &addresses_arg, 1);
-  grpc_lb_addresses_destroy(&exec_ctx, addresses);
+  grpc_lb_addresses_destroy(addresses);
   on_resolution_arg on_res_arg;
   memset(&on_res_arg, 0, sizeof(on_res_arg));
   on_res_arg.expected_resolver_result = results;
@@ -106,17 +105,16 @@
 
   // Set resolver results and trigger first resolution. on_resolution_cb
   // performs the checks.
-  grpc_fake_resolver_response_generator_set_response(
-      &exec_ctx, response_generator, results);
-  grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,
+  grpc_fake_resolver_response_generator_set_response(response_generator,
+                                                     results);
+  grpc_resolver_next_locked(resolver, &on_res_arg.resolver_result,
                             on_resolution);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(gpr_event_wait(&on_res_arg.ev,
                             grpc_timeout_seconds_to_deadline(5)) != nullptr);
 
   // Setup update.
-  grpc_uri* uris_update[] = {
-      grpc_uri_parse(&exec_ctx, "ipv4:192.168.1.0:31416", true)};
+  grpc_uri* uris_update[] = {grpc_uri_parse("ipv4:192.168.1.0:31416", true)};
   const char* balancer_names_update[] = {"name3"};
   const bool is_balancer_update[] = {false};
   grpc_lb_addresses* addresses_update = grpc_lb_addresses_create(1, nullptr);
@@ -131,7 +129,7 @@
       grpc_lb_addresses_create_channel_arg(addresses_update);
   grpc_channel_args* results_update =
       grpc_channel_args_copy_and_add(nullptr, &addresses_update_arg, 1);
-  grpc_lb_addresses_destroy(&exec_ctx, addresses_update);
+  grpc_lb_addresses_destroy(addresses_update);
 
   // Setup expectations for the update.
   on_resolution_arg on_res_arg_update;
@@ -142,27 +140,27 @@
                                       grpc_combiner_scheduler(combiner));
 
   // Set updated resolver results and trigger a second resolution.
-  grpc_fake_resolver_response_generator_set_response(
-      &exec_ctx, response_generator, results_update);
-  grpc_resolver_next_locked(&exec_ctx, resolver,
-                            &on_res_arg_update.resolver_result, on_resolution);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_fake_resolver_response_generator_set_response(response_generator,
+                                                     results_update);
+  grpc_resolver_next_locked(resolver, &on_res_arg_update.resolver_result,
+                            on_resolution);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(gpr_event_wait(&on_res_arg_update.ev,
                             grpc_timeout_seconds_to_deadline(5)) != nullptr);
 
   // Requesting a new resolution without re-senting the response shouldn't
   // trigger the resolution callback.
   memset(&on_res_arg, 0, sizeof(on_res_arg));
-  grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,
+  grpc_resolver_next_locked(resolver, &on_res_arg.resolver_result,
                             on_resolution);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(gpr_event_wait(&on_res_arg.ev,
                             grpc_timeout_milliseconds_to_deadline(100)) ==
              nullptr);
 
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "test_fake_resolver");
-  GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_fake_resolver");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner, "test_fake_resolver");
+  GRPC_RESOLVER_UNREF(resolver, "test_fake_resolver");
+
   grpc_fake_resolver_response_generator_unref(response_generator);
 }
 
diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.cc b/test/core/client_channel/resolvers/sockaddr_resolver_test.cc
index dfa2d12..4d16a77 100644
--- a/test/core/client_channel/resolvers/sockaddr_resolver_test.cc
+++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.cc
@@ -35,14 +35,14 @@
   grpc_channel_args* resolver_result;
 } on_resolution_arg;
 
-void on_resolution_cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+void on_resolution_cb(void* arg, grpc_error* error) {
   on_resolution_arg* res = static_cast<on_resolution_arg*>(arg);
-  grpc_channel_args_destroy(exec_ctx, res->resolver_result);
+  grpc_channel_args_destroy(res->resolver_result);
 }
 
 static void test_succeeds(grpc_resolver_factory* factory, const char* string) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, string, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(string, 0);
   grpc_resolver_args args;
   grpc_resolver* resolver;
   gpr_log(GPR_DEBUG, "test: '%s' should be valid for '%s'", string,
@@ -51,7 +51,7 @@
   memset(&args, 0, sizeof(args));
   args.uri = uri;
   args.combiner = g_combiner;
-  resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
+  resolver = grpc_resolver_factory_create_resolver(factory, &args);
   GPR_ASSERT(resolver != nullptr);
 
   on_resolution_arg on_res_arg;
@@ -60,16 +60,16 @@
   grpc_closure* on_resolution = GRPC_CLOSURE_CREATE(
       on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx);
 
-  grpc_resolver_next_locked(&exec_ctx, resolver, &on_res_arg.resolver_result,
+  grpc_resolver_next_locked(resolver, &on_res_arg.resolver_result,
                             on_resolution);
-  GRPC_RESOLVER_UNREF(&exec_ctx, resolver, "test_succeeds");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_RESOLVER_UNREF(resolver, "test_succeeds");
+
   grpc_uri_destroy(uri);
 }
 
 static void test_fails(grpc_resolver_factory* factory, const char* string) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, string, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(string, 0);
   grpc_resolver_args args;
   grpc_resolver* resolver;
   gpr_log(GPR_DEBUG, "test: '%s' should be invalid for '%s'", string,
@@ -78,10 +78,9 @@
   memset(&args, 0, sizeof(args));
   args.uri = uri;
   args.combiner = g_combiner;
-  resolver = grpc_resolver_factory_create_resolver(&exec_ctx, factory, &args);
+  resolver = grpc_resolver_factory_create_resolver(factory, &args);
   GPR_ASSERT(resolver == nullptr);
   grpc_uri_destroy(uri);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 int main(int argc, char** argv) {
@@ -112,9 +111,8 @@
   grpc_resolver_factory_unref(ipv6);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    GRPC_COMBINER_UNREF(&exec_ctx, g_combiner, "test");
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    GRPC_COMBINER_UNREF(g_combiner, "test");
   }
   grpc_shutdown();
 
diff --git a/test/core/client_channel/uri_fuzzer_test.cc b/test/core/client_channel/uri_fuzzer_test.cc
index ba31793..ee38453 100644
--- a/test/core/client_channel/uri_fuzzer_test.cc
+++ b/test/core/client_channel/uri_fuzzer_test.cc
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/ext/filters/client_channel/uri_parser.h"
@@ -33,12 +34,18 @@
   memcpy(s, data, size);
   s[size] = 0;
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* x;
-  if ((x = grpc_uri_parse(&exec_ctx, s, 1))) {
-    grpc_uri_destroy(x);
+  grpc_init();
+
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_uri* x;
+    if ((x = grpc_uri_parse(s, 1))) {
+      grpc_uri_destroy(x);
+    }
+
+    gpr_free(s);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
-  gpr_free(s);
+
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/client_channel/uri_parser_test.cc b/test/core/client_channel/uri_parser_test.cc
index 30183f9..254bfdd 100644
--- a/test/core/client_channel/uri_parser_test.cc
+++ b/test/core/client_channel/uri_parser_test.cc
@@ -20,6 +20,7 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/log.h>
 
 #include "src/core/lib/iomgr/exec_ctx.h"
@@ -28,29 +29,28 @@
 static void test_succeeds(const char* uri_text, const char* scheme,
                           const char* authority, const char* path,
                           const char* query, const char* fragment) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_uri* uri = grpc_uri_parse(uri_text, 0);
   GPR_ASSERT(uri);
   GPR_ASSERT(0 == strcmp(scheme, uri->scheme));
   GPR_ASSERT(0 == strcmp(authority, uri->authority));
   GPR_ASSERT(0 == strcmp(path, uri->path));
   GPR_ASSERT(0 == strcmp(query, uri->query));
   GPR_ASSERT(0 == strcmp(fragment, uri->fragment));
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_uri_destroy(uri);
 }
 
 static void test_fails(const char* uri_text) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GPR_ASSERT(nullptr == grpc_uri_parse(&exec_ctx, uri_text, 0));
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GPR_ASSERT(nullptr == grpc_uri_parse(uri_text, 0));
 }
 
 static void test_query_parts() {
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     const char* uri_text = "http://foo/path?a&b=B&c=&#frag";
-    grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+    grpc_uri* uri = grpc_uri_parse(uri_text, 0);
     GPR_ASSERT(uri);
 
     GPR_ASSERT(0 == strcmp("http", uri->scheme));
@@ -77,14 +77,14 @@
     GPR_ASSERT(nullptr == grpc_uri_get_query_arg(uri, ""));
 
     GPR_ASSERT(0 == strcmp("frag", uri->fragment));
-    grpc_exec_ctx_finish(&exec_ctx);
+
     grpc_uri_destroy(uri);
   }
   {
     /* test the current behavior of multiple query part values */
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     const char* uri_text = "http://auth/path?foo=bar=baz&foobar==";
-    grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+    grpc_uri* uri = grpc_uri_parse(uri_text, 0);
     GPR_ASSERT(uri);
 
     GPR_ASSERT(0 == strcmp("http", uri->scheme));
@@ -96,14 +96,13 @@
     GPR_ASSERT(0 == strcmp("bar", grpc_uri_get_query_arg(uri, "foo")));
     GPR_ASSERT(0 == strcmp("", grpc_uri_get_query_arg(uri, "foobar")));
 
-    grpc_exec_ctx_finish(&exec_ctx);
     grpc_uri_destroy(uri);
   }
   {
     /* empty query */
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     const char* uri_text = "http://foo/path";
-    grpc_uri* uri = grpc_uri_parse(&exec_ctx, uri_text, 0);
+    grpc_uri* uri = grpc_uri_parse(uri_text, 0);
     GPR_ASSERT(uri);
 
     GPR_ASSERT(0 == strcmp("http", uri->scheme));
@@ -114,13 +113,14 @@
     GPR_ASSERT(nullptr == uri->query_parts);
     GPR_ASSERT(nullptr == uri->query_parts_values);
     GPR_ASSERT(0 == strcmp("", uri->fragment));
-    grpc_exec_ctx_finish(&exec_ctx);
+
     grpc_uri_destroy(uri);
   }
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_succeeds("http://www.google.com", "http", "www.google.com", "", "", "");
   test_succeeds("dns:///foo", "dns", "", "/foo", "", "");
   test_succeeds("http://www.google.com:90", "http", "www.google.com:90", "", "",
@@ -148,5 +148,6 @@
   test_fails("http://foo?bar#lol#");
 
   test_query_parts();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/compression/algorithm_test.cc b/test/core/compression/algorithm_test.cc
index 2f1d8bc..9e811e9 100644
--- a/test/core/compression/algorithm_test.cc
+++ b/test/core/compression/algorithm_test.cc
@@ -39,7 +39,7 @@
     grpc_compression_algorithm parsed;
     grpc_slice mdstr;
     grpc_mdelem mdelem;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     GPR_ASSERT(
         grpc_compression_algorithm_name((grpc_compression_algorithm)i, &name));
     GPR_ASSERT(grpc_compression_algorithm_parse(
@@ -51,9 +51,8 @@
     mdelem = grpc_compression_encoding_mdelem(parsed);
     GPR_ASSERT(grpc_slice_eq(GRPC_MDVALUE(mdelem), mdstr));
     GPR_ASSERT(grpc_slice_eq(GRPC_MDKEY(mdelem), GRPC_MDSTR_GRPC_ENCODING));
-    grpc_slice_unref_internal(&exec_ctx, mdstr);
-    GRPC_MDELEM_UNREF(&exec_ctx, mdelem);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_slice_unref_internal(mdstr);
+    GRPC_MDELEM_UNREF(mdelem);
   }
 
   /* test failure */
@@ -62,7 +61,7 @@
 }
 
 static void test_algorithm_failure(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_slice mdstr;
 
   gpr_log(GPR_DEBUG, "test_algorithm_failure");
@@ -83,8 +82,7 @@
       grpc_compression_algorithm_slice(static_cast<grpc_compression_algorithm>(
           static_cast<int>(GRPC_COMPRESS_ALGORITHMS_COUNT) + 1)),
       grpc_empty_slice()));
-  grpc_slice_unref_internal(&exec_ctx, mdstr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_unref_internal(mdstr);
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/compression/message_compress_test.cc b/test/core/compression/message_compress_test.cc
index 676415b..6ca07b7 100644
--- a/test/core/compression/message_compress_test.cc
+++ b/test/core/compression/message_compress_test.cc
@@ -70,10 +70,8 @@
   grpc_split_slices_to_buffer(uncompressed_split_mode, &value, 1, &input);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    was_compressed =
-        grpc_msg_compress(&exec_ctx, algorithm, &input, &compressed_raw);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    was_compressed = grpc_msg_compress(algorithm, &input, &compressed_raw);
   }
   GPR_ASSERT(input.count > 0);
 
@@ -92,11 +90,9 @@
   grpc_split_slice_buffer(compressed_split_mode, &compressed_raw, &compressed);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     GPR_ASSERT(grpc_msg_decompress(
-        &exec_ctx, was_compressed ? algorithm : GRPC_COMPRESS_NONE, &compressed,
-        &output));
-    grpc_exec_ctx_finish(&exec_ctx);
+        was_compressed ? algorithm : GRPC_COMPRESS_NONE, &compressed, &output));
   }
 
   final = grpc_slice_merge(output.slices, output.count);
@@ -156,11 +152,11 @@
 
   for (int i = 0; i < GRPC_COMPRESS_ALGORITHMS_COUNT; i++) {
     if (i == GRPC_COMPRESS_NONE) continue;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    GPR_ASSERT(0 == grpc_msg_compress(
-                        &exec_ctx, static_cast<grpc_compression_algorithm>(i),
-                        &input, &output));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    GPR_ASSERT(0 ==
+               grpc_msg_compress(static_cast<grpc_compression_algorithm>(i),
+                                 &input, &output));
+
     GPR_ASSERT(1 == output.count);
   }
 
@@ -180,9 +176,9 @@
   grpc_slice_buffer_init(&output);
   grpc_slice_buffer_add(&input, create_test_value(ONE_MB_A));
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   /* compress it */
-  grpc_msg_compress(&exec_ctx, GRPC_COMPRESS_GZIP, &input, &corrupted);
+  grpc_msg_compress(GRPC_COMPRESS_GZIP, &input, &corrupted);
   /* corrupt the output by smashing the CRC */
   GPR_ASSERT(corrupted.count > 1);
   GPR_ASSERT(GRPC_SLICE_LENGTH(corrupted.slices[1]) > 8);
@@ -190,9 +186,7 @@
   memcpy(GRPC_SLICE_START_PTR(corrupted.slices[1]) + idx, &bad, 4);
 
   /* try (and fail) to decompress the corrupted compresed buffer */
-  GPR_ASSERT(0 == grpc_msg_decompress(&exec_ctx, GRPC_COMPRESS_GZIP, &corrupted,
-                                      &output));
-  grpc_exec_ctx_finish(&exec_ctx);
+  GPR_ASSERT(0 == grpc_msg_decompress(GRPC_COMPRESS_GZIP, &corrupted, &output));
 
   grpc_slice_buffer_destroy(&input);
   grpc_slice_buffer_destroy(&corrupted);
@@ -211,10 +205,8 @@
                   "\x78\xda\x63\x60\x60\x60\x00\x00\x00\x04\x00\x01\x99", 13));
 
   /* try (and fail) to decompress the invalid compresed buffer */
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GPR_ASSERT(0 == grpc_msg_decompress(&exec_ctx, GRPC_COMPRESS_DEFLATE, &input,
-                                      &output));
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GPR_ASSERT(0 == grpc_msg_decompress(GRPC_COMPRESS_DEFLATE, &input, &output));
 
   grpc_slice_buffer_destroy(&input);
   grpc_slice_buffer_destroy(&output);
@@ -230,10 +222,8 @@
                         grpc_slice_from_copied_buffer("\x78\xda\xff\xff", 4));
 
   /* try (and fail) to decompress the invalid compresed buffer */
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GPR_ASSERT(0 == grpc_msg_decompress(&exec_ctx, GRPC_COMPRESS_DEFLATE, &input,
-                                      &output));
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GPR_ASSERT(0 == grpc_msg_decompress(GRPC_COMPRESS_DEFLATE, &input, &output));
 
   grpc_slice_buffer_destroy(&input);
   grpc_slice_buffer_destroy(&output);
@@ -249,17 +239,15 @@
   grpc_slice_buffer_add(
       &input, grpc_slice_from_copied_string("Never gonna give you up"));
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  was_compressed = grpc_msg_compress(&exec_ctx, GRPC_COMPRESS_ALGORITHMS_COUNT,
-                                     &input, &output);
+  grpc_core::ExecCtx exec_ctx;
+  was_compressed =
+      grpc_msg_compress(GRPC_COMPRESS_ALGORITHMS_COUNT, &input, &output);
   GPR_ASSERT(0 == was_compressed);
 
-  was_compressed = grpc_msg_compress(&exec_ctx,
-                                     static_cast<grpc_compression_algorithm>(
+  was_compressed = grpc_msg_compress(static_cast<grpc_compression_algorithm>(
                                          GRPC_COMPRESS_ALGORITHMS_COUNT + 123),
                                      &input, &output);
   GPR_ASSERT(0 == was_compressed);
-  grpc_exec_ctx_finish(&exec_ctx);
 
   grpc_slice_buffer_destroy(&input);
   grpc_slice_buffer_destroy(&output);
@@ -275,18 +263,16 @@
   grpc_slice_buffer_add(&input,
                         grpc_slice_from_copied_string(
                             "I'm not really compressed but it doesn't matter"));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  was_decompressed = grpc_msg_decompress(
-      &exec_ctx, GRPC_COMPRESS_ALGORITHMS_COUNT, &input, &output);
+  grpc_core::ExecCtx exec_ctx;
+  was_decompressed =
+      grpc_msg_decompress(GRPC_COMPRESS_ALGORITHMS_COUNT, &input, &output);
   GPR_ASSERT(0 == was_decompressed);
 
   was_decompressed =
-      grpc_msg_decompress(&exec_ctx,
-                          static_cast<grpc_compression_algorithm>(
+      grpc_msg_decompress(static_cast<grpc_compression_algorithm>(
                               GRPC_COMPRESS_ALGORITHMS_COUNT + 123),
                           &input, &output);
   GPR_ASSERT(0 == was_decompressed);
-  grpc_exec_ctx_finish(&exec_ctx);
 
   grpc_slice_buffer_destroy(&input);
   grpc_slice_buffer_destroy(&output);
diff --git a/test/core/debug/stats_test.cc b/test/core/debug/stats_test.cc
index 5fae61f..e60e54b 100644
--- a/test/core/debug/stats_test.cc
+++ b/test/core/debug/stats_test.cc
@@ -49,9 +49,8 @@
   for (int i = 0; i < GRPC_STATS_COUNTER_COUNT; i++) {
     Snapshot snapshot;
 
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    GRPC_STATS_INC_COUNTER(&exec_ctx, (grpc_stats_counters)i);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    GRPC_STATS_INC_COUNTER((grpc_stats_counters)i);
 
     EXPECT_EQ(snapshot.delta().counters[i], 1);
   }
@@ -60,9 +59,8 @@
 TEST(StatsTest, IncSpecificCounter) {
   Snapshot snapshot;
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_STATS_INC_SYSCALL_POLL(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_STATS_INC_SYSCALL_POLL();
 
   EXPECT_EQ(snapshot.delta().counters[GRPC_STATS_COUNTER_SYSCALL_POLL], 1);
 }
@@ -94,9 +92,8 @@
     for (auto j : test_values) {
       Snapshot snapshot;
 
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-      grpc_stats_inc_histogram[kHistogram](&exec_ctx, j);
-      grpc_exec_ctx_finish(&exec_ctx);
+      grpc_core::ExecCtx exec_ctx;
+      grpc_stats_inc_histogram[kHistogram](j);
 
       auto delta = snapshot.delta();
 
diff --git a/test/core/end2end/bad_server_response_test.cc b/test/core/end2end/bad_server_response_test.cc
index b0c6ae9..93809ac 100644
--- a/test/core/end2end/bad_server_response_test.cc
+++ b/test/core/end2end/bad_server_response_test.cc
@@ -91,22 +91,22 @@
 
 static void* tag(intptr_t t) { return (void*)t; }
 
-static void done_write(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void done_write(void* arg, grpc_error* error) {
   GPR_ASSERT(error == GRPC_ERROR_NONE);
 
   gpr_atm_rel_store(&state.done_atm, 1);
 }
 
-static void handle_write(grpc_exec_ctx* exec_ctx) {
+static void handle_write() {
   grpc_slice slice = grpc_slice_from_copied_buffer(
       state.response_payload, state.response_payload_length);
 
   grpc_slice_buffer_reset_and_unref(&state.outgoing_buffer);
   grpc_slice_buffer_add(&state.outgoing_buffer, slice);
-  grpc_endpoint_write(exec_ctx, state.tcp, &state.outgoing_buffer, &on_write);
+  grpc_endpoint_write(state.tcp, &state.outgoing_buffer, &on_write);
 }
 
-static void handle_read(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void handle_read(void* arg, grpc_error* error) {
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   state.incoming_data_length += state.temp_incoming_buffer.length;
 
@@ -123,14 +123,13 @@
           SERVER_INCOMING_DATA_LENGTH_LOWER_THRESHOLD);
   if (state.incoming_data_length >=
       SERVER_INCOMING_DATA_LENGTH_LOWER_THRESHOLD) {
-    handle_write(exec_ctx);
+    handle_write();
   } else {
-    grpc_endpoint_read(exec_ctx, state.tcp, &state.temp_incoming_buffer,
-                       &on_read);
+    grpc_endpoint_read(state.tcp, &state.temp_incoming_buffer, &on_read);
   }
 }
 
-static void on_connect(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* tcp,
+static void on_connect(void* arg, grpc_endpoint* tcp,
                        grpc_pollset* accepting_pollset,
                        grpc_tcp_server_acceptor* acceptor) {
   gpr_free(acceptor);
@@ -141,8 +140,8 @@
   grpc_slice_buffer_init(&state.outgoing_buffer);
   state.tcp = tcp;
   state.incoming_data_length = 0;
-  grpc_endpoint_add_to_pollset(exec_ctx, tcp, server->pollset);
-  grpc_endpoint_read(exec_ctx, tcp, &state.temp_incoming_buffer, &on_read);
+  grpc_endpoint_add_to_pollset(tcp, server->pollset);
+  grpc_endpoint_read(tcp, &state.temp_incoming_buffer, &on_read);
 }
 
 static gpr_timespec n_sec_deadline(int seconds) {
@@ -217,10 +216,10 @@
   cq_verifier_destroy(cqv);
 }
 
-static void cleanup_rpc(grpc_exec_ctx* exec_ctx) {
+static void cleanup_rpc() {
   grpc_event ev;
-  grpc_slice_buffer_destroy_internal(exec_ctx, &state.temp_incoming_buffer);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &state.outgoing_buffer);
+  grpc_slice_buffer_destroy_internal(&state.temp_incoming_buffer);
+  grpc_slice_buffer_destroy_internal(&state.outgoing_buffer);
   grpc_call_unref(state.call);
   grpc_completion_queue_shutdown(state.cq);
   do {
@@ -262,7 +261,7 @@
   poll_args* pa = (poll_args*)gpr_malloc(sizeof(*pa));
   pa->server = server;
   pa->signal_when_done = signal_when_done;
-  gpr_thd_new(&id, actually_poll_server, pa, nullptr);
+  gpr_thd_new(&id, "grpc_poll_server", actually_poll_server, pa, nullptr);
 }
 
 static void run_test(const char* response_payload,
@@ -270,7 +269,7 @@
                      grpc_status_code expected_status,
                      const char* expected_detail) {
   test_tcp_server test_server;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_event ev;
 
   grpc_init();
@@ -287,11 +286,11 @@
   gpr_event_wait(&ev, gpr_inf_future(GPR_CLOCK_REALTIME));
 
   /* clean up */
-  grpc_endpoint_shutdown(&exec_ctx, state.tcp,
+  grpc_endpoint_shutdown(state.tcp,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
-  grpc_endpoint_destroy(&exec_ctx, state.tcp);
-  cleanup_rpc(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_endpoint_destroy(state.tcp);
+  cleanup_rpc();
+  grpc_core::ExecCtx::Get()->Flush();
   test_tcp_server_destroy(&test_server);
 
   grpc_shutdown();
@@ -299,6 +298,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   /* status defined in hpack static table */
   run_test(HTTP2_RESP(204), sizeof(HTTP2_RESP(204)) - 1, GRPC_STATUS_CANCELLED,
@@ -337,5 +337,6 @@
   run_test(HTTP1_RESP, sizeof(HTTP1_RESP) - 1, GRPC_STATUS_UNAVAILABLE,
            HTTP1_DETAIL_MSG);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/end2end/connection_refused_test.cc b/test/core/end2end/connection_refused_test.cc
index f3f2dda..ca6d17e 100644
--- a/test/core/end2end/connection_refused_test.cc
+++ b/test/core/end2end/connection_refused_test.cc
@@ -133,9 +133,8 @@
   grpc_metadata_array_destroy(&trailing_metadata_recv);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    if (args != nullptr) grpc_channel_args_destroy(&exec_ctx, args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    if (args != nullptr) grpc_channel_args_destroy(args);
   }
 
   grpc_shutdown();
diff --git a/test/core/end2end/cq_verifier.h b/test/core/end2end/cq_verifier.h
index 0b3b3fb..959f849 100644
--- a/test/core/end2end/cq_verifier.h
+++ b/test/core/end2end/cq_verifier.h
@@ -24,10 +24,6 @@
 #include <grpc/grpc.h>
 #include "test/core/util/test_config.h"
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 /* A cq_verifier can verify that expected events arrive in a timely fashion
    on a single completion queue */
 
@@ -63,8 +59,4 @@
 int contains_metadata_slices(grpc_metadata_array* array, grpc_slice key,
                              grpc_slice value);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_TEST_CORE_END2END_CQ_VERIFIER_H */
diff --git a/test/core/end2end/data/client_certs.cc b/test/core/end2end/data/client_certs.cc
index 7e0b10d..46fc139 100644
--- a/test/core/end2end/data/client_certs.cc
+++ b/test/core/end2end/data/client_certs.cc
@@ -16,7 +16,7 @@
  *
  */
 
-extern "C" const char test_self_signed_client_cert[] = {
+extern const char test_self_signed_client_cert[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43,
     0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x6f, 0x44, 0x43, 0x43,
@@ -100,7 +100,7 @@
     0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d,
     0x0a, 0x00};
 
-extern "C" const char test_self_signed_client_key[] = {
+extern const char test_self_signed_client_key[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x50,
     0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x64, 0x77, 0x49, 0x42,
@@ -179,7 +179,7 @@
     0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x00};
 
-extern "C" const char test_signed_client_cert[] = {
+extern const char test_signed_client_cert[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43,
     0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x48, 0x7a, 0x43, 0x43,
@@ -248,7 +248,7 @@
     0x20, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45,
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x00};
 
-extern "C" const char test_signed_client_key[] = {
+extern const char test_signed_client_key[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x50,
     0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b, 0x45, 0x59, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x65, 0x51, 0x49, 0x42,
diff --git a/test/core/end2end/data/server1_cert.cc b/test/core/end2end/data/server1_cert.cc
index dd09810..0943244 100644
--- a/test/core/end2end/data/server1_cert.cc
+++ b/test/core/end2end/data/server1_cert.cc
@@ -16,7 +16,7 @@
  *
  */
 
-extern "C" const char test_server1_cert[] = {
+extern const char test_server1_cert[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43,
     0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x6e, 0x44, 0x43, 0x43,
diff --git a/test/core/end2end/data/server1_key.cc b/test/core/end2end/data/server1_key.cc
index 59dcaf6..8f3ad15 100644
--- a/test/core/end2end/data/server1_key.cc
+++ b/test/core/end2end/data/server1_key.cc
@@ -16,7 +16,7 @@
  *
  */
 
-extern "C" const char test_server1_key[] = {
+extern const char test_server1_key[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x52,
     0x53, 0x41, 0x20, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x20, 0x4b,
     0x45, 0x59, 0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43,
diff --git a/test/core/end2end/data/ssl_test_data.h b/test/core/end2end/data/ssl_test_data.h
index e9c7dbc..303f3a6 100644
--- a/test/core/end2end/data/ssl_test_data.h
+++ b/test/core/end2end/data/ssl_test_data.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_TEST_CORE_END2END_DATA_SSL_TEST_DATA_H
 #define GRPC_TEST_CORE_END2END_DATA_SSL_TEST_DATA_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern const char test_root_cert[];
 extern const char test_server1_cert[];
 extern const char test_server1_key[];
@@ -31,8 +27,4 @@
 extern const char test_signed_client_cert[];
 extern const char test_signed_client_key[];
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_TEST_CORE_END2END_DATA_SSL_TEST_DATA_H */
diff --git a/test/core/end2end/data/test_root_cert.cc b/test/core/end2end/data/test_root_cert.cc
index 36fca2e..b4771b2 100644
--- a/test/core/end2end/data/test_root_cert.cc
+++ b/test/core/end2end/data/test_root_cert.cc
@@ -16,7 +16,7 @@
  *
  */
 
-extern "C" const char test_root_cert[] = {
+extern const char test_root_cert[] = {
     0x2d, 0x2d, 0x2d, 0x2d, 0x2d, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x20, 0x43,
     0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x2d, 0x2d,
     0x2d, 0x2d, 0x2d, 0x0a, 0x4d, 0x49, 0x49, 0x43, 0x49, 0x7a, 0x43, 0x43,
diff --git a/test/core/end2end/dualstack_socket_test.cc b/test/core/end2end/dualstack_socket_test.cc
index ad2b24f..2ba1c17 100644
--- a/test/core/end2end/dualstack_socket_test.cc
+++ b/test/core/end2end/dualstack_socket_test.cc
@@ -29,7 +29,9 @@
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
 
+#include "src/core/lib/iomgr/error.h"
 #include "src/core/lib/iomgr/resolve_address.h"
+#include "src/core/lib/iomgr/sockaddr_utils.h"
 #include "src/core/lib/iomgr/socket_utils_posix.h"
 #include "src/core/lib/slice/slice_string_helpers.h"
 #include "src/core/lib/support/string.h"
@@ -54,6 +56,21 @@
 
 static void do_nothing(void* ignored) {}
 
+static void log_resolved_addrs(const char* label, const char* hostname) {
+  grpc_resolved_addresses* res = nullptr;
+  grpc_error* error = grpc_blocking_resolve_address(hostname, "80", &res);
+  if (error != GRPC_ERROR_NONE || res == nullptr) {
+    GRPC_LOG_IF_ERROR(hostname, error);
+    return;
+  }
+  for (size_t i = 0; i < res->naddrs; ++i) {
+    char* addr_str = grpc_sockaddr_to_uri(&res->addrs[i]);
+    gpr_log(GPR_INFO, "%s: %s", label, addr_str);
+    gpr_free(addr_str);
+  }
+  grpc_resolved_addresses_destroy(res);
+}
+
 void test_connect(const char* server_host, const char* client_host, int port,
                   int expect_ok) {
   char* client_hostport;
@@ -140,6 +157,8 @@
 
   gpr_log(GPR_INFO, "Testing with server=%s client=%s (expecting %s)",
           server_hostport, client_hostport, expect_ok ? "success" : "failure");
+  log_resolved_addrs("server resolved addr", server_host);
+  log_resolved_addrs("client resolved addr", client_host);
 
   gpr_free(client_hostport);
   gpr_free(server_hostport);
@@ -236,6 +255,8 @@
     CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
     cq_verify(cqv);
 
+    gpr_log(GPR_INFO, "status: %d (expected: %d)", status,
+            GRPC_STATUS_UNAVAILABLE);
     GPR_ASSERT(status == GRPC_STATUS_UNAVAILABLE);
   }
 
diff --git a/test/core/end2end/end2end_nosec_tests.cc b/test/core/end2end/end2end_nosec_tests.cc
index 3236fee..6318550 100644
--- a/test/core/end2end/end2end_nosec_tests.cc
+++ b/test/core/end2end/end2end_nosec_tests.cc
@@ -68,6 +68,8 @@
 extern void filter_causes_close_pre_init(void);
 extern void filter_latency(grpc_end2end_test_config config);
 extern void filter_latency_pre_init(void);
+extern void filter_status_code(grpc_end2end_test_config config);
+extern void filter_status_code_pre_init(void);
 extern void graceful_server_shutdown(grpc_end2end_test_config config);
 extern void graceful_server_shutdown_pre_init(void);
 extern void high_initial_seqno(grpc_end2end_test_config config);
@@ -170,6 +172,7 @@
   filter_call_init_fails_pre_init();
   filter_causes_close_pre_init();
   filter_latency_pre_init();
+  filter_status_code_pre_init();
   graceful_server_shutdown_pre_init();
   high_initial_seqno_pre_init();
   hpack_size_pre_init();
@@ -237,6 +240,7 @@
     filter_call_init_fails(config);
     filter_causes_close(config);
     filter_latency(config);
+    filter_status_code(config);
     graceful_server_shutdown(config);
     high_initial_seqno(config);
     hpack_size(config);
@@ -356,6 +360,10 @@
       filter_latency(config);
       continue;
     }
+    if (0 == strcmp("filter_status_code", argv[i])) {
+      filter_status_code(config);
+      continue;
+    }
     if (0 == strcmp("graceful_server_shutdown", argv[i])) {
       graceful_server_shutdown(config);
       continue;
diff --git a/test/core/end2end/end2end_tests.cc b/test/core/end2end/end2end_tests.cc
index ca9443b..9d8dfd6 100644
--- a/test/core/end2end/end2end_tests.cc
+++ b/test/core/end2end/end2end_tests.cc
@@ -70,6 +70,8 @@
 extern void filter_causes_close_pre_init(void);
 extern void filter_latency(grpc_end2end_test_config config);
 extern void filter_latency_pre_init(void);
+extern void filter_status_code(grpc_end2end_test_config config);
+extern void filter_status_code_pre_init(void);
 extern void graceful_server_shutdown(grpc_end2end_test_config config);
 extern void graceful_server_shutdown_pre_init(void);
 extern void high_initial_seqno(grpc_end2end_test_config config);
@@ -173,6 +175,7 @@
   filter_call_init_fails_pre_init();
   filter_causes_close_pre_init();
   filter_latency_pre_init();
+  filter_status_code_pre_init();
   graceful_server_shutdown_pre_init();
   high_initial_seqno_pre_init();
   hpack_size_pre_init();
@@ -241,6 +244,7 @@
     filter_call_init_fails(config);
     filter_causes_close(config);
     filter_latency(config);
+    filter_status_code(config);
     graceful_server_shutdown(config);
     high_initial_seqno(config);
     hpack_size(config);
@@ -364,6 +368,10 @@
       filter_latency(config);
       continue;
     }
+    if (0 == strcmp("filter_status_code", argv[i])) {
+      filter_status_code(config);
+      continue;
+    }
     if (0 == strcmp("graceful_server_shutdown", argv[i])) {
       graceful_server_shutdown(config);
       continue;
diff --git a/test/core/end2end/end2end_tests.h b/test/core/end2end/end2end_tests.h
index 33943a7..b42d90b 100644
--- a/test/core/end2end/end2end_tests.h
+++ b/test/core/end2end/end2end_tests.h
@@ -21,10 +21,6 @@
 
 #include <grpc/grpc.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_end2end_test_fixture grpc_end2end_test_fixture;
 typedef struct grpc_end2end_test_config grpc_end2end_test_config;
 
@@ -78,8 +74,4 @@
 void validate_host_override_string(const char* pattern, grpc_slice str,
                                    grpc_end2end_test_config config);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_TEST_CORE_END2END_END2END_TESTS_H */
diff --git a/test/core/end2end/fixtures/h2_census.cc b/test/core/end2end/fixtures/h2_census.cc
index fed8ead..75c80aa 100644
--- a/test/core/end2end/fixtures/h2_census.cc
+++ b/test/core/end2end/fixtures/h2_census.cc
@@ -75,9 +75,8 @@
       grpc_insecure_channel_create(ffd->localaddr, client_args, nullptr);
   GPR_ASSERT(f->client);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
   }
 }
 
@@ -92,9 +91,8 @@
   server_args = grpc_channel_args_copy_and_add(server_args, &arg, 1);
   f->server = grpc_server_create(server_args, nullptr);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(server_args);
   }
   grpc_server_register_completion_queue(f->server, f->cq, nullptr);
   GPR_ASSERT(grpc_server_add_insecure_http2_port(f->server, ffd->localaddr));
diff --git a/test/core/end2end/fixtures/h2_compress.cc b/test/core/end2end/fixtures/h2_compress.cc
index ea8990f..5b91815 100644
--- a/test/core/end2end/fixtures/h2_compress.cc
+++ b/test/core/end2end/fixtures/h2_compress.cc
@@ -66,9 +66,8 @@
   fullstack_compression_fixture_data* ffd =
       static_cast<fullstack_compression_fixture_data*>(f->fixture_data);
   if (ffd->client_args_compression != nullptr) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, ffd->client_args_compression);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(ffd->client_args_compression);
   }
   ffd->client_args_compression = grpc_channel_args_set_compression_algorithm(
       client_args, GRPC_COMPRESS_GZIP);
@@ -81,9 +80,8 @@
   fullstack_compression_fixture_data* ffd =
       static_cast<fullstack_compression_fixture_data*>(f->fixture_data);
   if (ffd->server_args_compression != nullptr) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, ffd->server_args_compression);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(ffd->server_args_compression);
   }
   ffd->server_args_compression = grpc_channel_args_set_compression_algorithm(
       server_args, GRPC_COMPRESS_GZIP);
@@ -97,14 +95,13 @@
 }
 
 void chttp2_tear_down_fullstack_compression(grpc_end2end_test_fixture* f) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   fullstack_compression_fixture_data* ffd =
       static_cast<fullstack_compression_fixture_data*>(f->fixture_data);
-  grpc_channel_args_destroy(&exec_ctx, ffd->client_args_compression);
-  grpc_channel_args_destroy(&exec_ctx, ffd->server_args_compression);
+  grpc_channel_args_destroy(ffd->client_args_compression);
+  grpc_channel_args_destroy(ffd->server_args_compression);
   gpr_free(ffd->localaddr);
   gpr_free(ffd);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 /* All test configurations */
diff --git a/test/core/end2end/fixtures/h2_fd.cc b/test/core/end2end/fixtures/h2_fd.cc
index 97f4b71..9157ab0 100644
--- a/test/core/end2end/fixtures/h2_fd.cc
+++ b/test/core/end2end/fixtures/h2_fd.cc
@@ -68,20 +68,18 @@
 
 static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* client_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   sp_fixture_data* sfd = static_cast<sp_fixture_data*>(f->fixture_data);
 
   GPR_ASSERT(!f->client);
   f->client = grpc_insecure_channel_create_from_fd(
       "fixture_client", sfd->fd_pair[0], client_args);
   GPR_ASSERT(f->client);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* server_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   sp_fixture_data* sfd = static_cast<sp_fixture_data*>(f->fixture_data);
   GPR_ASSERT(!f->server);
   f->server = grpc_server_create(server_args, nullptr);
@@ -90,8 +88,6 @@
   grpc_server_start(f->server);
 
   grpc_server_add_insecure_channel_from_fd(f->server, nullptr, sfd->fd_pair[1]);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
diff --git a/test/core/end2end/fixtures/h2_full+workarounds.cc b/test/core/end2end/fixtures/h2_full+workarounds.cc
index 71a497d..237841d 100644
--- a/test/core/end2end/fixtures/h2_full+workarounds.cc
+++ b/test/core/end2end/fixtures/h2_full+workarounds.cc
@@ -72,7 +72,7 @@
 void chttp2_init_server_fullstack(grpc_end2end_test_fixture* f,
                                   grpc_channel_args* server_args) {
   int i;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   fullstack_fixture_data* ffd =
       static_cast<fullstack_fixture_data*>(f->fixture_data);
   grpc_arg args[GRPC_MAX_WORKAROUND_ID];
@@ -90,8 +90,7 @@
   grpc_server_register_completion_queue(f->server, f->cq, nullptr);
   GPR_ASSERT(grpc_server_add_insecure_http2_port(f->server, ffd->localaddr));
   grpc_server_start(f->server);
-  grpc_channel_args_destroy(&exec_ctx, server_args_new);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_args_destroy(server_args_new);
 }
 
 void chttp2_tear_down_fullstack(grpc_end2end_test_fixture* f) {
diff --git a/test/core/end2end/fixtures/h2_load_reporting.cc b/test/core/end2end/fixtures/h2_load_reporting.cc
index 7486b6a..fda5f4b 100644
--- a/test/core/end2end/fixtures/h2_load_reporting.cc
+++ b/test/core/end2end/fixtures/h2_load_reporting.cc
@@ -78,9 +78,8 @@
   server_args = grpc_channel_args_copy_and_add(server_args, &arg, 1);
   f->server = grpc_server_create(server_args, nullptr);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(server_args);
   }
   grpc_server_register_completion_queue(f->server, f->cq, nullptr);
   GPR_ASSERT(grpc_server_add_insecure_http2_port(f->server, ffd->localaddr));
diff --git a/test/core/end2end/fixtures/h2_oauth2.cc b/test/core/end2end/fixtures/h2_oauth2.cc
index 1642cb0..5fed443 100644
--- a/test/core/end2end/fixtures/h2_oauth2.cc
+++ b/test/core/end2end/fixtures/h2_oauth2.cc
@@ -143,11 +143,11 @@
 
 static void chttp2_init_client_simple_ssl_with_oauth2_secure_fullstack(
     grpc_end2end_test_fixture* f, grpc_channel_args* client_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_credentials* ssl_creds =
       grpc_ssl_credentials_create(test_root_cert, nullptr, nullptr);
   grpc_call_credentials* oauth2_creds = grpc_md_only_test_credentials_create(
-      &exec_ctx, "authorization", oauth2_md, true /* is_async */);
+      "authorization", oauth2_md, true /* is_async */);
   grpc_channel_credentials* ssl_oauth2_creds =
       grpc_composite_channel_credentials_create(ssl_creds, oauth2_creds,
                                                 nullptr);
@@ -158,10 +158,9 @@
   grpc_channel_args* new_client_args =
       grpc_channel_args_copy_and_add(client_args, &ssl_name_override, 1);
   chttp2_init_client_secure_fullstack(f, new_client_args, ssl_oauth2_creds);
-  grpc_channel_args_destroy(&exec_ctx, new_client_args);
+  grpc_channel_args_destroy(new_client_args);
   grpc_channel_credentials_release(ssl_creds);
   grpc_call_credentials_release(oauth2_creds);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static int fail_server_auth_check(grpc_channel_args* server_args) {
diff --git a/test/core/end2end/fixtures/h2_sockpair+trace.cc b/test/core/end2end/fixtures/h2_sockpair+trace.cc
index 9319c40..9807e92 100644
--- a/test/core/end2end/fixtures/h2_sockpair+trace.cc
+++ b/test/core/end2end/fixtures/h2_sockpair+trace.cc
@@ -50,12 +50,11 @@
 
 static void server_setup_transport(void* ts, grpc_transport* transport) {
   grpc_end2end_test_fixture* f = static_cast<grpc_end2end_test_fixture*>(ts);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
-  grpc_endpoint_add_to_pollset(&exec_ctx, sfd->server, grpc_cq_pollset(f->cq));
-  grpc_server_setup_transport(&exec_ctx, f->server, transport, nullptr,
+  grpc_endpoint_add_to_pollset(sfd->server, grpc_cq_pollset(f->cq));
+  grpc_server_setup_transport(f->server, transport, nullptr,
                               grpc_server_get_channel_args(f->server));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 typedef struct {
@@ -63,13 +62,11 @@
   grpc_channel_args* client_args;
 } sp_client_setup;
 
-static void client_setup_transport(grpc_exec_ctx* exec_ctx, void* ts,
-                                   grpc_transport* transport) {
+static void client_setup_transport(void* ts, grpc_transport* transport) {
   sp_client_setup* cs = static_cast<sp_client_setup*>(ts);
 
-  cs->f->client =
-      grpc_channel_create(exec_ctx, "socketpair-target", cs->client_args,
-                          GRPC_CLIENT_DIRECT_CHANNEL, transport);
+  cs->f->client = grpc_channel_create("socketpair-target", cs->client_args,
+                                      GRPC_CLIENT_DIRECT_CHANNEL, transport);
 }
 
 static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
@@ -90,34 +87,30 @@
 
 static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* client_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
   grpc_transport* transport;
   sp_client_setup cs;
   cs.client_args = client_args;
   cs.f = f;
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, client_args, sfd->client, true);
-  client_setup_transport(&exec_ctx, &cs, transport);
+  transport = grpc_create_chttp2_transport(client_args, sfd->client, true);
+  client_setup_transport(&cs, transport);
   GPR_ASSERT(f->client);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* server_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
   grpc_transport* transport;
   GPR_ASSERT(!f->server);
   f->server = grpc_server_create(server_args, nullptr);
   grpc_server_register_completion_queue(f->server, f->cq, nullptr);
   grpc_server_start(f->server);
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, server_args, sfd->server, false);
+  transport = grpc_create_chttp2_transport(server_args, sfd->server, false);
   server_setup_transport(f, transport);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
@@ -133,7 +126,6 @@
 
 int main(int argc, char** argv) {
   size_t i;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
 
   /* force tracing on, with a value to force many
      code paths in trace.c to be taken */
@@ -147,7 +139,6 @@
   grpc_test_init(argc, argv);
   grpc_end2end_tests_pre_init();
   grpc_init();
-  grpc_exec_ctx_finish(&exec_ctx);
 
   GPR_ASSERT(0 == grpc_tracer_set_enabled("also-doesnt-exist", 0));
   GPR_ASSERT(1 == grpc_tracer_set_enabled("http", 1));
diff --git a/test/core/end2end/fixtures/h2_sockpair.cc b/test/core/end2end/fixtures/h2_sockpair.cc
index 03566aa..b68279f 100644
--- a/test/core/end2end/fixtures/h2_sockpair.cc
+++ b/test/core/end2end/fixtures/h2_sockpair.cc
@@ -44,12 +44,11 @@
 
 static void server_setup_transport(void* ts, grpc_transport* transport) {
   grpc_end2end_test_fixture* f = static_cast<grpc_end2end_test_fixture*>(ts);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
-  grpc_endpoint_add_to_pollset(&exec_ctx, sfd->server, grpc_cq_pollset(f->cq));
-  grpc_server_setup_transport(&exec_ctx, f->server, transport, nullptr,
+  grpc_endpoint_add_to_pollset(sfd->server, grpc_cq_pollset(f->cq));
+  grpc_server_setup_transport(f->server, transport, nullptr,
                               grpc_server_get_channel_args(f->server));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 typedef struct {
@@ -57,13 +56,11 @@
   grpc_channel_args* client_args;
 } sp_client_setup;
 
-static void client_setup_transport(grpc_exec_ctx* exec_ctx, void* ts,
-                                   grpc_transport* transport) {
+static void client_setup_transport(void* ts, grpc_transport* transport) {
   sp_client_setup* cs = static_cast<sp_client_setup*>(ts);
 
-  cs->f->client =
-      grpc_channel_create(exec_ctx, "socketpair-target", cs->client_args,
-                          GRPC_CLIENT_DIRECT_CHANNEL, transport);
+  cs->f->client = grpc_channel_create("socketpair-target", cs->client_args,
+                                      GRPC_CLIENT_DIRECT_CHANNEL, transport);
 }
 
 static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
@@ -84,34 +81,30 @@
 
 static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* client_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
   grpc_transport* transport;
   sp_client_setup cs;
   cs.client_args = client_args;
   cs.f = f;
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, client_args, sfd->client, true);
-  client_setup_transport(&exec_ctx, &cs, transport);
+  transport = grpc_create_chttp2_transport(client_args, sfd->client, true);
+  client_setup_transport(&cs, transport);
   GPR_ASSERT(f->client);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* server_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
   grpc_transport* transport;
   GPR_ASSERT(!f->server);
   f->server = grpc_server_create(server_args, nullptr);
   grpc_server_register_completion_queue(f->server, f->cq, nullptr);
   grpc_server_start(f->server);
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, server_args, sfd->server, false);
+  transport = grpc_create_chttp2_transport(server_args, sfd->server, false);
   server_setup_transport(f, transport);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
diff --git a/test/core/end2end/fixtures/h2_sockpair_1byte.cc b/test/core/end2end/fixtures/h2_sockpair_1byte.cc
index 9adba00..350be13 100644
--- a/test/core/end2end/fixtures/h2_sockpair_1byte.cc
+++ b/test/core/end2end/fixtures/h2_sockpair_1byte.cc
@@ -44,12 +44,11 @@
 
 static void server_setup_transport(void* ts, grpc_transport* transport) {
   grpc_end2end_test_fixture* f = static_cast<grpc_end2end_test_fixture*>(ts);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
-  grpc_endpoint_add_to_pollset(&exec_ctx, sfd->server, grpc_cq_pollset(f->cq));
-  grpc_server_setup_transport(&exec_ctx, f->server, transport, nullptr,
+  grpc_endpoint_add_to_pollset(sfd->server, grpc_cq_pollset(f->cq));
+  grpc_server_setup_transport(f->server, transport, nullptr,
                               grpc_server_get_channel_args(f->server));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 typedef struct {
@@ -57,13 +56,11 @@
   grpc_channel_args* client_args;
 } sp_client_setup;
 
-static void client_setup_transport(grpc_exec_ctx* exec_ctx, void* ts,
-                                   grpc_transport* transport) {
+static void client_setup_transport(void* ts, grpc_transport* transport) {
   sp_client_setup* cs = static_cast<sp_client_setup*>(ts);
 
-  cs->f->client =
-      grpc_channel_create(exec_ctx, "socketpair-target", cs->client_args,
-                          GRPC_CLIENT_DIRECT_CHANNEL, transport);
+  cs->f->client = grpc_channel_create("socketpair-target", cs->client_args,
+                                      GRPC_CLIENT_DIRECT_CHANNEL, transport);
 }
 
 static grpc_end2end_test_fixture chttp2_create_fixture_socketpair(
@@ -95,34 +92,30 @@
 
 static void chttp2_init_client_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* client_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
   grpc_transport* transport;
   sp_client_setup cs;
   cs.client_args = client_args;
   cs.f = f;
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, client_args, sfd->client, true);
-  client_setup_transport(&exec_ctx, &cs, transport);
+  transport = grpc_create_chttp2_transport(client_args, sfd->client, true);
+  client_setup_transport(&cs, transport);
   GPR_ASSERT(f->client);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 static void chttp2_init_server_socketpair(grpc_end2end_test_fixture* f,
                                           grpc_channel_args* server_args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_pair* sfd = static_cast<grpc_endpoint_pair*>(f->fixture_data);
   grpc_transport* transport;
   GPR_ASSERT(!f->server);
   f->server = grpc_server_create(server_args, nullptr);
   grpc_server_register_completion_queue(f->server, f->cq, nullptr);
   grpc_server_start(f->server);
-  transport =
-      grpc_create_chttp2_transport(&exec_ctx, server_args, sfd->server, false);
+  transport = grpc_create_chttp2_transport(server_args, sfd->server, false);
   server_setup_transport(f, transport);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 }
 
 static void chttp2_tear_down_socketpair(grpc_end2end_test_fixture* f) {
diff --git a/test/core/end2end/fixtures/h2_ssl.cc b/test/core/end2end/fixtures/h2_ssl.cc
index 3d7e2e3..9a0680c 100644
--- a/test/core/end2end/fixtures/h2_ssl.cc
+++ b/test/core/end2end/fixtures/h2_ssl.cc
@@ -110,9 +110,8 @@
       grpc_channel_args_copy_and_add(client_args, &ssl_name_override, 1);
   chttp2_init_client_secure_fullstack(f, new_client_args, ssl_creds);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, new_client_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(new_client_args);
   }
 }
 
diff --git a/test/core/end2end/fixtures/h2_ssl_proxy.cc b/test/core/end2end/fixtures/h2_ssl_proxy.cc
index f8d5a69..5ddbdef 100644
--- a/test/core/end2end/fixtures/h2_ssl_proxy.cc
+++ b/test/core/end2end/fixtures/h2_ssl_proxy.cc
@@ -66,9 +66,8 @@
       grpc_secure_channel_create(ssl_creds, target, new_client_args, nullptr);
   grpc_channel_credentials_release(ssl_creds);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, new_client_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(new_client_args);
   }
   return channel;
 }
@@ -148,9 +147,8 @@
       grpc_channel_args_copy_and_add(client_args, &ssl_name_override, 1);
   chttp2_init_client_secure_fullstack(f, new_client_args, ssl_creds);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, new_client_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(new_client_args);
   }
 }
 
diff --git a/test/core/end2end/fixtures/http_proxy_fixture.cc b/test/core/end2end/fixtures/http_proxy_fixture.cc
index ac0c953..137f7c9 100644
--- a/test/core/end2end/fixtures/http_proxy_fixture.cc
+++ b/test/core/end2end/fixtures/http_proxy_fixture.cc
@@ -68,6 +68,9 @@
 // Connection handling
 //
 
+// proxy_connection structure is only accessed in the closures which are all
+// scheduled under the same combiner lock. So there is is no need for a mutex to
+// protect this structure.
 typedef struct proxy_connection {
   grpc_end2end_http_proxy* proxy;
 
@@ -78,6 +81,8 @@
 
   grpc_pollset_set* pollset_set;
 
+  // NOTE: All the closures execute under proxy->combiner lock. Which means
+  // there will not be any data-races between the closures
   grpc_closure on_read_request_done;
   grpc_closure on_server_connect_done;
   grpc_closure on_write_response_done;
@@ -86,6 +91,13 @@
   grpc_closure on_server_read_done;
   grpc_closure on_server_write_done;
 
+  bool client_read_failed : 1;
+  bool client_write_failed : 1;
+  bool client_shutdown : 1;
+  bool server_read_failed : 1;
+  bool server_write_failed : 1;
+  bool server_shutdown : 1;
+
   grpc_slice_buffer client_read_buffer;
   grpc_slice_buffer client_deferred_write_buffer;
   bool client_is_writing;
@@ -104,24 +116,21 @@
 }
 
 // Helper function to destroy the proxy connection.
-static void proxy_connection_unref(grpc_exec_ctx* exec_ctx,
-                                   proxy_connection* conn, const char* reason) {
+static void proxy_connection_unref(proxy_connection* conn, const char* reason) {
   if (gpr_unref(&conn->refcount)) {
     gpr_log(GPR_DEBUG, "endpoints: %p %p", conn->client_endpoint,
             conn->server_endpoint);
-    grpc_endpoint_destroy(exec_ctx, conn->client_endpoint);
+    grpc_endpoint_destroy(conn->client_endpoint);
     if (conn->server_endpoint != nullptr) {
-      grpc_endpoint_destroy(exec_ctx, conn->server_endpoint);
+      grpc_endpoint_destroy(conn->server_endpoint);
     }
-    grpc_pollset_set_destroy(exec_ctx, conn->pollset_set);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &conn->client_read_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx,
-                                       &conn->client_deferred_write_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &conn->client_write_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &conn->server_read_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx,
-                                       &conn->server_deferred_write_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &conn->server_write_buffer);
+    grpc_pollset_set_destroy(conn->pollset_set);
+    grpc_slice_buffer_destroy_internal(&conn->client_read_buffer);
+    grpc_slice_buffer_destroy_internal(&conn->client_deferred_write_buffer);
+    grpc_slice_buffer_destroy_internal(&conn->client_write_buffer);
+    grpc_slice_buffer_destroy_internal(&conn->server_read_buffer);
+    grpc_slice_buffer_destroy_internal(&conn->server_deferred_write_buffer);
+    grpc_slice_buffer_destroy_internal(&conn->server_write_buffer);
     grpc_http_parser_destroy(&conn->http_parser);
     grpc_http_request_destroy(&conn->http_request);
     gpr_unref(&conn->proxy->users);
@@ -129,31 +138,59 @@
   }
 }
 
-// Helper function to shut down the proxy connection.
-// Does NOT take ownership of a reference to error.
-static void proxy_connection_failed(grpc_exec_ctx* exec_ctx,
-                                    proxy_connection* conn, bool is_client,
-                                    const char* prefix, grpc_error* error) {
-  const char* msg = grpc_error_string(error);
-  gpr_log(GPR_INFO, "%s: %s", prefix, msg);
+enum failure_type {
+  SETUP_FAILED,  // To be used before we start proxying.
+  CLIENT_READ_FAILED,
+  CLIENT_WRITE_FAILED,
+  SERVER_READ_FAILED,
+  SERVER_WRITE_FAILED,
+};
 
-  grpc_endpoint_shutdown(exec_ctx, conn->client_endpoint,
-                         GRPC_ERROR_REF(error));
-  if (conn->server_endpoint != nullptr) {
-    grpc_endpoint_shutdown(exec_ctx, conn->server_endpoint,
-                           GRPC_ERROR_REF(error));
+// Helper function to shut down the proxy connection.
+static void proxy_connection_failed(proxy_connection* conn,
+                                    failure_type failure, const char* prefix,
+                                    grpc_error* error) {
+  gpr_log(GPR_INFO, "%s: %s", prefix, grpc_error_string(error));
+  // Decide whether we should shut down the client and server.
+  bool shutdown_client = false;
+  bool shutdown_server = false;
+  if (failure == SETUP_FAILED) {
+    shutdown_client = true;
+    shutdown_server = true;
+  } else {
+    if ((failure == CLIENT_READ_FAILED && conn->client_write_failed) ||
+        (failure == CLIENT_WRITE_FAILED && conn->client_read_failed) ||
+        (failure == SERVER_READ_FAILED && !conn->client_is_writing)) {
+      shutdown_client = true;
+    }
+    if ((failure == SERVER_READ_FAILED && conn->server_write_failed) ||
+        (failure == SERVER_WRITE_FAILED && conn->server_read_failed) ||
+        (failure == CLIENT_READ_FAILED && !conn->server_is_writing)) {
+      shutdown_server = true;
+    }
   }
-  proxy_connection_unref(exec_ctx, conn, "conn_failed");
+  // If we decided to shut down either one and have not yet done so, do so.
+  if (shutdown_client && !conn->client_shutdown) {
+    grpc_endpoint_shutdown(conn->client_endpoint, GRPC_ERROR_REF(error));
+    conn->client_shutdown = true;
+  }
+  if (shutdown_server && !conn->server_shutdown &&
+      (conn->server_endpoint != nullptr)) {
+    grpc_endpoint_shutdown(conn->server_endpoint, GRPC_ERROR_REF(error));
+    conn->server_shutdown = true;
+  }
+  // Unref the connection.
+  proxy_connection_unref(conn, "conn_failed");
+  GRPC_ERROR_UNREF(error);
 }
 
 // Callback for writing proxy data to the client.
-static void on_client_write_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void on_client_write_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   conn->client_is_writing = false;
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy client write", error);
+    proxy_connection_failed(conn, CLIENT_WRITE_FAILED,
+                            "HTTP proxy client write", GRPC_ERROR_REF(error));
     return;
   }
   // Clear write buffer (the data we just wrote).
@@ -164,23 +201,21 @@
     grpc_slice_buffer_move_into(&conn->client_deferred_write_buffer,
                                 &conn->client_write_buffer);
     conn->client_is_writing = true;
-    grpc_endpoint_write(exec_ctx, conn->client_endpoint,
-                        &conn->client_write_buffer,
+    grpc_endpoint_write(conn->client_endpoint, &conn->client_write_buffer,
                         &conn->on_client_write_done);
   } else {
     // No more writes.  Unref the connection.
-    proxy_connection_unref(exec_ctx, conn, "write_done");
+    proxy_connection_unref(conn, "write_done");
   }
 }
 
 // Callback for writing proxy data to the backend server.
-static void on_server_write_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void on_server_write_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   conn->server_is_writing = false;
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, false /* is_client */,
-                            "HTTP proxy server write", error);
+    proxy_connection_failed(conn, SERVER_WRITE_FAILED,
+                            "HTTP proxy server write", GRPC_ERROR_REF(error));
     return;
   }
   // Clear write buffer (the data we just wrote).
@@ -191,23 +226,21 @@
     grpc_slice_buffer_move_into(&conn->server_deferred_write_buffer,
                                 &conn->server_write_buffer);
     conn->server_is_writing = true;
-    grpc_endpoint_write(exec_ctx, conn->server_endpoint,
-                        &conn->server_write_buffer,
+    grpc_endpoint_write(conn->server_endpoint, &conn->server_write_buffer,
                         &conn->on_server_write_done);
   } else {
     // No more writes.  Unref the connection.
-    proxy_connection_unref(exec_ctx, conn, "server_write");
+    proxy_connection_unref(conn, "server_write");
   }
 }
 
 // Callback for reading data from the client, which will be proxied to
 // the backend server.
-static void on_client_read_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_error* error) {
+static void on_client_read_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy client read", error);
+    proxy_connection_failed(conn, CLIENT_READ_FAILED, "HTTP proxy client read",
+                            GRPC_ERROR_REF(error));
     return;
   }
   // If there is already a pending write (i.e., server_write_buffer is
@@ -224,23 +257,21 @@
                                 &conn->server_write_buffer);
     proxy_connection_ref(conn, "client_read");
     conn->server_is_writing = true;
-    grpc_endpoint_write(exec_ctx, conn->server_endpoint,
-                        &conn->server_write_buffer,
+    grpc_endpoint_write(conn->server_endpoint, &conn->server_write_buffer,
                         &conn->on_server_write_done);
   }
   // Read more data.
-  grpc_endpoint_read(exec_ctx, conn->client_endpoint, &conn->client_read_buffer,
+  grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer,
                      &conn->on_client_read_done);
 }
 
 // Callback for reading data from the backend server, which will be
 // proxied to the client.
-static void on_server_read_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_error* error) {
+static void on_server_read_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, false /* is_client */,
-                            "HTTP proxy server read", error);
+    proxy_connection_failed(conn, SERVER_READ_FAILED, "HTTP proxy server read",
+                            GRPC_ERROR_REF(error));
     return;
   }
   // If there is already a pending write (i.e., client_write_buffer is
@@ -257,23 +288,21 @@
                                 &conn->client_write_buffer);
     proxy_connection_ref(conn, "server_read");
     conn->client_is_writing = true;
-    grpc_endpoint_write(exec_ctx, conn->client_endpoint,
-                        &conn->client_write_buffer,
+    grpc_endpoint_write(conn->client_endpoint, &conn->client_write_buffer,
                         &conn->on_client_write_done);
   }
   // Read more data.
-  grpc_endpoint_read(exec_ctx, conn->server_endpoint, &conn->server_read_buffer,
+  grpc_endpoint_read(conn->server_endpoint, &conn->server_read_buffer,
                      &conn->on_server_read_done);
 }
 
 // Callback to write the HTTP response for the CONNECT request.
-static void on_write_response_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void on_write_response_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   conn->client_is_writing = false;
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy write response", error);
+    proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy write response",
+                            GRPC_ERROR_REF(error));
     return;
   }
   // Clear write buffer.
@@ -283,17 +312,16 @@
   // for the other one.
   proxy_connection_ref(conn, "client_read");
   proxy_connection_ref(conn, "server_read");
-  proxy_connection_unref(exec_ctx, conn, "write_response");
-  grpc_endpoint_read(exec_ctx, conn->client_endpoint, &conn->client_read_buffer,
+  proxy_connection_unref(conn, "write_response");
+  grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer,
                      &conn->on_client_read_done);
-  grpc_endpoint_read(exec_ctx, conn->server_endpoint, &conn->server_read_buffer,
+  grpc_endpoint_read(conn->server_endpoint, &conn->server_read_buffer,
                      &conn->on_server_read_done);
 }
 
 // Callback to connect to the backend server specified by the HTTP
 // CONNECT request.
-static void on_server_connect_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void on_server_connect_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   if (error != GRPC_ERROR_NONE) {
     // TODO(roth): Technically, in this case, we should handle the error
@@ -301,8 +329,8 @@
     // connection failed.  However, for the purposes of this test code,
     // it's fine to pretend this is a client-side error, which will
     // cause the client connection to be dropped.
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy server connect", error);
+    proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy server connect",
+                            GRPC_ERROR_REF(error));
     return;
   }
   // We've established a connection, so send back a 200 response code to
@@ -312,8 +340,7 @@
       grpc_slice_from_copied_string("HTTP/1.0 200 connected\r\n\r\n");
   grpc_slice_buffer_add(&conn->client_write_buffer, slice);
   conn->client_is_writing = true;
-  grpc_endpoint_write(exec_ctx, conn->client_endpoint,
-                      &conn->client_write_buffer,
+  grpc_endpoint_write(conn->client_endpoint, &conn->client_write_buffer,
                       &conn->on_write_response_done);
 }
 
@@ -322,8 +349,7 @@
  * Basic <base64_encoded_expected_cred>
  * Returns true if it matches, false otherwise
  */
-static bool proxy_auth_header_matches(grpc_exec_ctx* exec_ctx,
-                                      char* proxy_auth_header_val,
+static bool proxy_auth_header_matches(char* proxy_auth_header_val,
                                       char* expected_cred) {
   GPR_ASSERT(proxy_auth_header_val != nullptr);
   GPR_ASSERT(expected_cred != nullptr);
@@ -331,11 +357,10 @@
     return false;
   }
   proxy_auth_header_val += 6;
-  grpc_slice decoded_slice =
-      grpc_base64_decode(exec_ctx, proxy_auth_header_val, 0);
+  grpc_slice decoded_slice = grpc_base64_decode(proxy_auth_header_val, 0);
   const bool header_matches =
       grpc_slice_str_cmp(decoded_slice, expected_cred) == 0;
-  grpc_slice_unref_internal(exec_ctx, decoded_slice);
+  grpc_slice_unref_internal(decoded_slice);
   return header_matches;
 }
 
@@ -345,14 +370,13 @@
 // the client indicating that the request failed.  However, for the purposes
 // of this test code, it's fine to pretend this is a client-side error,
 // which will cause the client connection to be dropped.
-static void on_read_request_done(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void on_read_request_done(void* arg, grpc_error* error) {
   proxy_connection* conn = (proxy_connection*)arg;
   gpr_log(GPR_DEBUG, "on_read_request_done: %p %s", conn,
           grpc_error_string(error));
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy read request", error);
+    proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy read request",
+                            GRPC_ERROR_REF(error));
     return;
   }
   // Read request and feed it to the parser.
@@ -361,8 +385,8 @@
       error = grpc_http_parser_parse(
           &conn->http_parser, conn->client_read_buffer.slices[i], nullptr);
       if (error != GRPC_ERROR_NONE) {
-        proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                                "HTTP proxy request parse", error);
+        proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy request parse",
+                                GRPC_ERROR_REF(error));
         GRPC_ERROR_UNREF(error);
         return;
       }
@@ -371,8 +395,8 @@
   grpc_slice_buffer_reset_and_unref(&conn->client_read_buffer);
   // If we're not done reading the request, read more data.
   if (conn->http_parser.state != GRPC_HTTP_BODY) {
-    grpc_endpoint_read(exec_ctx, conn->client_endpoint,
-                       &conn->client_read_buffer, &conn->on_read_request_done);
+    grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer,
+                       &conn->on_read_request_done);
     return;
   }
   // Make sure we got a CONNECT request.
@@ -382,8 +406,8 @@
                  conn->http_request.method);
     error = GRPC_ERROR_CREATE_FROM_COPIED_STRING(msg);
     gpr_free(msg);
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy read request", error);
+    proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy read request",
+                            GRPC_ERROR_REF(error));
     GRPC_ERROR_UNREF(error);
     return;
   }
@@ -395,16 +419,15 @@
     for (size_t i = 0; i < conn->http_request.hdr_count; i++) {
       if (strcmp(conn->http_request.hdrs[i].key, "Proxy-Authorization") == 0) {
         client_authenticated = proxy_auth_header_matches(
-            exec_ctx, conn->http_request.hdrs[i].value,
-            proxy_auth_arg->value.string);
+            conn->http_request.hdrs[i].value, proxy_auth_arg->value.string);
         break;
       }
     }
     if (!client_authenticated) {
       const char* msg = "HTTP Connect could not verify authentication";
       error = GRPC_ERROR_CREATE_FROM_STATIC_STRING(msg);
-      proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                              "HTTP proxy read request", error);
+      proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy read request",
+                              GRPC_ERROR_REF(error));
       GRPC_ERROR_UNREF(error);
       return;
     }
@@ -414,8 +437,8 @@
   error = grpc_blocking_resolve_address(conn->http_request.path, "80",
                                         &resolved_addresses);
   if (error != GRPC_ERROR_NONE) {
-    proxy_connection_failed(exec_ctx, conn, true /* is_client */,
-                            "HTTP proxy DNS lookup", error);
+    proxy_connection_failed(conn, SETUP_FAILED, "HTTP proxy DNS lookup",
+                            GRPC_ERROR_REF(error));
     GRPC_ERROR_UNREF(error);
     return;
   }
@@ -423,15 +446,15 @@
   // Connect to requested address.
   // The connection callback inherits our reference to conn.
   const grpc_millis deadline =
-      grpc_exec_ctx_now(exec_ctx) + 10 * GPR_MS_PER_SEC;
-  grpc_tcp_client_connect(exec_ctx, &conn->on_server_connect_done,
-                          &conn->server_endpoint, conn->pollset_set, nullptr,
+      grpc_core::ExecCtx::Get()->Now() + 10 * GPR_MS_PER_SEC;
+  grpc_tcp_client_connect(&conn->on_server_connect_done, &conn->server_endpoint,
+                          conn->pollset_set, nullptr,
                           &resolved_addresses->addrs[0], deadline);
   grpc_resolved_addresses_destroy(resolved_addresses);
 }
 
-static void on_accept(grpc_exec_ctx* exec_ctx, void* arg,
-                      grpc_endpoint* endpoint, grpc_pollset* accepting_pollset,
+static void on_accept(void* arg, grpc_endpoint* endpoint,
+                      grpc_pollset* accepting_pollset,
                       grpc_tcp_server_acceptor* acceptor) {
   gpr_free(acceptor);
   grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)arg;
@@ -442,8 +465,8 @@
   conn->proxy = proxy;
   gpr_ref_init(&conn->refcount, 1);
   conn->pollset_set = grpc_pollset_set_create();
-  grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
-  grpc_endpoint_add_to_pollset_set(exec_ctx, endpoint, conn->pollset_set);
+  grpc_pollset_set_add_pollset(conn->pollset_set, proxy->pollset);
+  grpc_endpoint_add_to_pollset_set(endpoint, conn->pollset_set);
   GRPC_CLOSURE_INIT(&conn->on_read_request_done, on_read_request_done, conn,
                     grpc_combiner_scheduler(conn->proxy->combiner));
   GRPC_CLOSURE_INIT(&conn->on_server_connect_done, on_server_connect_done, conn,
@@ -468,7 +491,7 @@
   grpc_slice_buffer_init(&conn->server_write_buffer);
   grpc_http_parser_init(&conn->http_parser, GRPC_HTTP_REQUEST,
                         &conn->http_request);
-  grpc_endpoint_read(exec_ctx, conn->client_endpoint, &conn->client_read_buffer,
+  grpc_endpoint_read(conn->client_endpoint, &conn->client_read_buffer,
                      &conn->on_read_request_done);
 }
 
@@ -478,24 +501,23 @@
 
 static void thread_main(void* arg) {
   grpc_end2end_http_proxy* proxy = (grpc_end2end_http_proxy*)arg;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   do {
     gpr_ref(&proxy->users);
     grpc_pollset_worker* worker = nullptr;
     gpr_mu_lock(proxy->mu);
     GRPC_LOG_IF_ERROR(
         "grpc_pollset_work",
-        grpc_pollset_work(&exec_ctx, proxy->pollset, &worker,
-                          grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC));
+        grpc_pollset_work(proxy->pollset, &worker,
+                          grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC));
     gpr_mu_unlock(proxy->mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   } while (!gpr_unref(&proxy->users));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 grpc_end2end_http_proxy* grpc_end2end_http_proxy_create(
     grpc_channel_args* args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_end2end_http_proxy* proxy =
       (grpc_end2end_http_proxy*)gpr_malloc(sizeof(*proxy));
   memset(proxy, 0, sizeof(*proxy));
@@ -507,8 +529,8 @@
   gpr_log(GPR_INFO, "Proxy address: %s", proxy->proxy_name);
   // Create TCP server.
   proxy->channel_args = grpc_channel_args_copy(args);
-  grpc_error* error = grpc_tcp_server_create(
-      &exec_ctx, nullptr, proxy->channel_args, &proxy->server);
+  grpc_error* error =
+      grpc_tcp_server_create(nullptr, proxy->channel_args, &proxy->server);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   // Bind to port.
   grpc_resolved_address resolved_addr;
@@ -523,37 +545,35 @@
   // Start server.
   proxy->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
   grpc_pollset_init(proxy->pollset, &proxy->mu);
-  grpc_tcp_server_start(&exec_ctx, proxy->server, &proxy->pollset, 1, on_accept,
-                        proxy);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_start(proxy->server, &proxy->pollset, 1, on_accept, proxy);
+
   // Start proxy thread.
   gpr_thd_options opt = gpr_thd_options_default();
   gpr_thd_options_set_joinable(&opt);
-  GPR_ASSERT(gpr_thd_new(&proxy->thd, thread_main, proxy, &opt));
+  GPR_ASSERT(
+      gpr_thd_new(&proxy->thd, "grpc_http_proxy", thread_main, proxy, &opt));
   return proxy;
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void destroy_pollset(void* arg, grpc_error* error) {
   grpc_pollset* pollset = (grpc_pollset*)arg;
-  grpc_pollset_destroy(exec_ctx, pollset);
+  grpc_pollset_destroy(pollset);
   gpr_free(pollset);
 }
 
 void grpc_end2end_http_proxy_destroy(grpc_end2end_http_proxy* proxy) {
   gpr_unref(&proxy->users);  // Signal proxy thread to shutdown.
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_thd_join(proxy->thd);
-  grpc_tcp_server_shutdown_listeners(&exec_ctx, proxy->server);
-  grpc_tcp_server_unref(&exec_ctx, proxy->server);
+  grpc_tcp_server_shutdown_listeners(proxy->server);
+  grpc_tcp_server_unref(proxy->server);
   gpr_free(proxy->proxy_name);
-  grpc_channel_args_destroy(&exec_ctx, proxy->channel_args);
-  grpc_pollset_shutdown(&exec_ctx, proxy->pollset,
+  grpc_channel_args_destroy(proxy->channel_args);
+  grpc_pollset_shutdown(proxy->pollset,
                         GRPC_CLOSURE_CREATE(destroy_pollset, proxy->pollset,
                                             grpc_schedule_on_exec_ctx));
-  GRPC_COMBINER_UNREF(&exec_ctx, proxy->combiner, "test");
+  GRPC_COMBINER_UNREF(proxy->combiner, "test");
   gpr_free(proxy);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 const char* grpc_end2end_http_proxy_get_proxy_name(
diff --git a/test/core/end2end/fixtures/proxy.cc b/test/core/end2end/fixtures/proxy.cc
index 2fab62b..b1698c8 100644
--- a/test/core/end2end/fixtures/proxy.cc
+++ b/test/core/end2end/fixtures/proxy.cc
@@ -98,7 +98,8 @@
 
   grpc_call_details_init(&proxy->new_call_details);
   gpr_thd_options_set_joinable(&opt);
-  GPR_ASSERT(gpr_thd_new(&proxy->thd, thread_main, proxy, &opt));
+  GPR_ASSERT(
+      gpr_thd_new(&proxy->thd, "grpc_end2end_proxy", thread_main, proxy, &opt));
 
   request_call(proxy);
 
diff --git a/test/core/end2end/fuzzers/api_fuzzer.cc b/test/core/end2end/fuzzers/api_fuzzer.cc
index d84f8a3..967a6d5 100644
--- a/test/core/end2end/fuzzers/api_fuzzer.cc
+++ b/test/core/end2end/fuzzers/api_fuzzer.cc
@@ -56,7 +56,7 @@
 static grpc_channel* g_channel;
 static grpc_resource_quota* g_resource_quota;
 
-extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
+extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
 
 static gpr_timespec now_impl(gpr_clock_type clock_type) {
   GPR_ASSERT(clock_type != GPR_TIMESPAN);
@@ -376,8 +376,7 @@
   grpc_lb_addresses** lb_addrs;
 } addr_req;
 
-static void finish_resolve(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
+static void finish_resolve(void* arg, grpc_error* error) {
   addr_req* r = static_cast<addr_req*>(arg);
 
   if (error == GRPC_ERROR_NONE && 0 == strcmp(r->addr, "server")) {
@@ -395,9 +394,9 @@
                                     nullptr);
       *r->lb_addrs = lb_addrs;
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, r->on_done, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(r->on_done, GRPC_ERROR_NONE);
   } else {
-    GRPC_CLOSURE_SCHED(exec_ctx, r->on_done,
+    GRPC_CLOSURE_SCHED(r->on_done,
                        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                            "Resolution failed", &error, 1));
   }
@@ -406,8 +405,7 @@
   gpr_free(r);
 }
 
-void my_resolve_address(grpc_exec_ctx* exec_ctx, const char* addr,
-                        const char* default_port,
+void my_resolve_address(const char* addr, const char* default_port,
                         grpc_pollset_set* interested_parties,
                         grpc_closure* on_done,
                         grpc_resolved_addresses** addresses) {
@@ -417,22 +415,24 @@
   r->addrs = addresses;
   r->lb_addrs = nullptr;
   grpc_timer_init(
-      exec_ctx, &r->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx),
+      &r->timer, GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(),
       GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx));
 }
 
-grpc_ares_request* my_dns_lookup_ares(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* addr,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** lb_addrs, bool check_grpclb,
-    char** service_config_json) {
+grpc_ares_request* my_dns_lookup_ares(const char* dns_server, const char* addr,
+                                      const char* default_port,
+                                      grpc_pollset_set* interested_parties,
+                                      grpc_closure* on_done,
+                                      grpc_lb_addresses** lb_addrs,
+                                      bool check_grpclb,
+                                      char** service_config_json) {
   addr_req* r = static_cast<addr_req*>(gpr_malloc(sizeof(*r)));
   r->addr = gpr_strdup(addr);
   r->on_done = on_done;
   r->addrs = nullptr;
   r->lb_addrs = lb_addrs;
   grpc_timer_init(
-      exec_ctx, &r->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx),
+      &r->timer, GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(),
       GRPC_CLOSURE_CREATE(finish_resolve, r, grpc_schedule_on_exec_ctx));
   return nullptr;
 }
@@ -441,13 +441,13 @@
 // client connection
 
 // defined in tcp_client_posix.c
-extern "C" void (*grpc_tcp_client_connect_impl)(
-    grpc_exec_ctx* exec_ctx, grpc_closure* closure, grpc_endpoint** ep,
+extern void (*grpc_tcp_client_connect_impl)(
+    grpc_closure* closure, grpc_endpoint** ep,
     grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
     const grpc_resolved_address* addr, grpc_millis deadline);
 
-static void sched_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                          grpc_endpoint** ep, gpr_timespec deadline);
+static void sched_connect(grpc_closure* closure, grpc_endpoint** ep,
+                          gpr_timespec deadline);
 
 typedef struct {
   grpc_timer timer;
@@ -456,11 +456,11 @@
   gpr_timespec deadline;
 } future_connect;
 
-static void do_connect(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void do_connect(void* arg, grpc_error* error) {
   future_connect* fc = static_cast<future_connect*>(arg);
   if (error != GRPC_ERROR_NONE) {
     *fc->ep = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, fc->closure, GRPC_ERROR_REF(error));
+    GRPC_CLOSURE_SCHED(fc->closure, GRPC_ERROR_REF(error));
   } else if (g_server != nullptr) {
     grpc_endpoint* client;
     grpc_endpoint* server;
@@ -468,25 +468,23 @@
     *fc->ep = client;
 
     grpc_transport* transport =
-        grpc_create_chttp2_transport(exec_ctx, nullptr, server, false);
-    grpc_server_setup_transport(exec_ctx, g_server, transport, nullptr,
-                                nullptr);
-    grpc_chttp2_transport_start_reading(exec_ctx, transport, nullptr, nullptr);
+        grpc_create_chttp2_transport(nullptr, server, false);
+    grpc_server_setup_transport(g_server, transport, nullptr, nullptr);
+    grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 
-    GRPC_CLOSURE_SCHED(exec_ctx, fc->closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(fc->closure, GRPC_ERROR_NONE);
   } else {
-    sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline);
+    sched_connect(fc->closure, fc->ep, fc->deadline);
   }
   gpr_free(fc);
 }
 
-static void sched_connect(grpc_exec_ctx* exec_ctx, grpc_closure* closure,
-                          grpc_endpoint** ep, gpr_timespec deadline) {
+static void sched_connect(grpc_closure* closure, grpc_endpoint** ep,
+                          gpr_timespec deadline) {
   if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) {
     *ep = nullptr;
-    GRPC_CLOSURE_SCHED(
-        exec_ctx, closure,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connect deadline exceeded"));
+    GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                    "Connect deadline exceeded"));
     return;
   }
 
@@ -495,17 +493,16 @@
   fc->ep = ep;
   fc->deadline = deadline;
   grpc_timer_init(
-      exec_ctx, &fc->timer, GPR_MS_PER_SEC + grpc_exec_ctx_now(exec_ctx),
+      &fc->timer, GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now(),
       GRPC_CLOSURE_CREATE(do_connect, fc, grpc_schedule_on_exec_ctx));
 }
 
-static void my_tcp_client_connect(grpc_exec_ctx* exec_ctx,
-                                  grpc_closure* closure, grpc_endpoint** ep,
+static void my_tcp_client_connect(grpc_closure* closure, grpc_endpoint** ep,
                                   grpc_pollset_set* interested_parties,
                                   const grpc_channel_args* channel_args,
                                   const grpc_resolved_address* addr,
                                   grpc_millis deadline) {
-  sched_connect(exec_ctx, closure, ep,
+  sched_connect(closure, ep,
                 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC));
 }
 
@@ -751,9 +748,8 @@
   grpc_init();
   grpc_timer_manager_set_threading(false);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_executor_set_threading(&exec_ctx, false);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_executor_set_threading(false);
   }
   grpc_resolve_address = my_resolve_address;
   grpc_dns_lookup_ares = my_dns_lookup_ares;
@@ -846,9 +842,8 @@
           g_channel = grpc_insecure_channel_create(target_uri, args, nullptr);
           GPR_ASSERT(g_channel != nullptr);
           {
-            grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-            grpc_channel_args_destroy(&exec_ctx, args);
-            grpc_exec_ctx_finish(&exec_ctx);
+            grpc_core::ExecCtx exec_ctx;
+            grpc_channel_args_destroy(args);
           }
           gpr_free(target_uri);
           gpr_free(target);
@@ -874,9 +869,8 @@
           g_server = grpc_server_create(args, nullptr);
           GPR_ASSERT(g_server != nullptr);
           {
-            grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-            grpc_channel_args_destroy(&exec_ctx, args);
-            grpc_exec_ctx_finish(&exec_ctx);
+            grpc_core::ExecCtx exec_ctx;
+            grpc_channel_args_destroy(args);
           }
           grpc_server_register_completion_queue(g_server, cq, nullptr);
           grpc_server_start(g_server);
@@ -1205,9 +1199,8 @@
               grpc_secure_channel_create(creds, target_uri, args, nullptr);
           GPR_ASSERT(g_channel != nullptr);
           {
-            grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-            grpc_channel_args_destroy(&exec_ctx, args);
-            grpc_exec_ctx_finish(&exec_ctx);
+            grpc_core::ExecCtx exec_ctx;
+            grpc_channel_args_destroy(args);
           }
           gpr_free(target_uri);
           gpr_free(target);
diff --git a/test/core/end2end/fuzzers/client_fuzzer.cc b/test/core/end2end/fuzzers/client_fuzzer.cc
index 5871f0f..c17d581 100644
--- a/test/core/end2end/fuzzers/client_fuzzer.cc
+++ b/test/core/end2end/fuzzers/client_fuzzer.cc
@@ -43,112 +43,114 @@
   if (squelch) gpr_set_log_function(dont_log);
   if (leak_check) grpc_memory_counters_init();
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_executor_set_threading(&exec_ctx, false);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_executor_set_threading(false);
 
-  grpc_resource_quota* resource_quota =
-      grpc_resource_quota_create("client_fuzzer");
-  grpc_endpoint* mock_endpoint =
-      grpc_mock_endpoint_create(discard_write, resource_quota);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+    grpc_resource_quota* resource_quota =
+        grpc_resource_quota_create("client_fuzzer");
+    grpc_endpoint* mock_endpoint =
+        grpc_mock_endpoint_create(discard_write, resource_quota);
+    grpc_resource_quota_unref_internal(resource_quota);
 
-  grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
-  grpc_transport* transport =
-      grpc_create_chttp2_transport(&exec_ctx, nullptr, mock_endpoint, true);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
+    grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
+    grpc_transport* transport =
+        grpc_create_chttp2_transport(nullptr, mock_endpoint, true);
+    grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 
-  grpc_channel* channel = grpc_channel_create(
-      &exec_ctx, "test-target", nullptr, GRPC_CLIENT_DIRECT_CHANNEL, transport);
-  grpc_slice host = grpc_slice_from_static_string("localhost");
-  grpc_call* call = grpc_channel_create_call(
-      channel, nullptr, 0, cq, grpc_slice_from_static_string("/foo"), &host,
-      gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
+    grpc_channel* channel = grpc_channel_create(
+        "test-target", nullptr, GRPC_CLIENT_DIRECT_CHANNEL, transport);
+    grpc_slice host = grpc_slice_from_static_string("localhost");
+    grpc_call* call = grpc_channel_create_call(
+        channel, nullptr, 0, cq, grpc_slice_from_static_string("/foo"), &host,
+        gpr_inf_future(GPR_CLOCK_REALTIME), nullptr);
 
-  grpc_metadata_array initial_metadata_recv;
-  grpc_metadata_array_init(&initial_metadata_recv);
-  grpc_byte_buffer* response_payload_recv = nullptr;
-  grpc_metadata_array trailing_metadata_recv;
-  grpc_metadata_array_init(&trailing_metadata_recv);
-  grpc_status_code status;
-  grpc_slice details = grpc_empty_slice();
+    grpc_metadata_array initial_metadata_recv;
+    grpc_metadata_array_init(&initial_metadata_recv);
+    grpc_byte_buffer* response_payload_recv = nullptr;
+    grpc_metadata_array trailing_metadata_recv;
+    grpc_metadata_array_init(&trailing_metadata_recv);
+    grpc_status_code status;
+    grpc_slice details = grpc_empty_slice();
 
-  grpc_op ops[6];
-  memset(ops, 0, sizeof(ops));
-  grpc_op* op = ops;
-  op->op = GRPC_OP_SEND_INITIAL_METADATA;
-  op->data.send_initial_metadata.count = 0;
-  op->flags = 0;
-  op->reserved = nullptr;
-  op++;
-  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
-  op->flags = 0;
-  op->reserved = nullptr;
-  op++;
-  op->op = GRPC_OP_RECV_INITIAL_METADATA;
-  op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
-  op->flags = 0;
-  op->reserved = nullptr;
-  op++;
-  op->op = GRPC_OP_RECV_MESSAGE;
-  op->data.recv_message.recv_message = &response_payload_recv;
-  op->flags = 0;
-  op->reserved = nullptr;
-  op++;
-  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
-  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
-  op->data.recv_status_on_client.status = &status;
-  op->data.recv_status_on_client.status_details = &details;
-  op->flags = 0;
-  op->reserved = nullptr;
-  op++;
-  grpc_call_error error =
-      grpc_call_start_batch(call, ops, (size_t)(op - ops), tag(1), nullptr);
-  int requested_calls = 1;
-  GPR_ASSERT(GRPC_CALL_OK == error);
+    grpc_op ops[6];
+    memset(ops, 0, sizeof(ops));
+    grpc_op* op = ops;
+    op->op = GRPC_OP_SEND_INITIAL_METADATA;
+    op->data.send_initial_metadata.count = 0;
+    op->flags = 0;
+    op->reserved = nullptr;
+    op++;
+    op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+    op->flags = 0;
+    op->reserved = nullptr;
+    op++;
+    op->op = GRPC_OP_RECV_INITIAL_METADATA;
+    op->data.recv_initial_metadata.recv_initial_metadata =
+        &initial_metadata_recv;
+    op->flags = 0;
+    op->reserved = nullptr;
+    op++;
+    op->op = GRPC_OP_RECV_MESSAGE;
+    op->data.recv_message.recv_message = &response_payload_recv;
+    op->flags = 0;
+    op->reserved = nullptr;
+    op++;
+    op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+    op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+    op->data.recv_status_on_client.status = &status;
+    op->data.recv_status_on_client.status_details = &details;
+    op->flags = 0;
+    op->reserved = nullptr;
+    op++;
+    grpc_call_error error =
+        grpc_call_start_batch(call, ops, (size_t)(op - ops), tag(1), nullptr);
+    int requested_calls = 1;
+    GPR_ASSERT(GRPC_CALL_OK == error);
 
-  grpc_mock_endpoint_put_read(
-      &exec_ctx, mock_endpoint,
-      grpc_slice_from_copied_buffer((const char*)data, size));
+    grpc_mock_endpoint_put_read(
+        mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
 
-  grpc_event ev;
-  while (1) {
-    grpc_exec_ctx_flush(&exec_ctx);
-    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
-                                    nullptr);
-    switch (ev.type) {
-      case GRPC_QUEUE_TIMEOUT:
-        goto done;
-      case GRPC_QUEUE_SHUTDOWN:
-        break;
-      case GRPC_OP_COMPLETE:
-        requested_calls--;
-        break;
+    grpc_event ev;
+    while (1) {
+      grpc_core::ExecCtx::Get()->Flush();
+      ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
+                                      nullptr);
+      switch (ev.type) {
+        case GRPC_QUEUE_TIMEOUT:
+          goto done;
+        case GRPC_QUEUE_SHUTDOWN:
+          break;
+        case GRPC_OP_COMPLETE:
+          requested_calls--;
+          break;
+      }
     }
-  }
 
-done:
-  if (requested_calls) {
-    grpc_call_cancel(call, nullptr);
-  }
-  for (int i = 0; i < requested_calls; i++) {
-    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
-                                    nullptr);
-    GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
-  }
-  grpc_completion_queue_shutdown(cq);
-  for (int i = 0; i < requested_calls; i++) {
-    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
-                                    nullptr);
-    GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
-  }
-  grpc_call_unref(call);
-  grpc_completion_queue_destroy(cq);
-  grpc_metadata_array_destroy(&initial_metadata_recv);
-  grpc_metadata_array_destroy(&trailing_metadata_recv);
-  grpc_slice_unref(details);
-  grpc_channel_destroy(channel);
-  if (response_payload_recv != nullptr) {
-    grpc_byte_buffer_destroy(response_payload_recv);
+  done:
+    if (requested_calls) {
+      grpc_call_cancel(call, nullptr);
+    }
+    for (int i = 0; i < requested_calls; i++) {
+      ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
+                                      nullptr);
+      GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
+    }
+    grpc_completion_queue_shutdown(cq);
+    for (int i = 0; i < requested_calls; i++) {
+      ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
+                                      nullptr);
+      GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
+    }
+    grpc_call_unref(call);
+    grpc_completion_queue_destroy(cq);
+    grpc_metadata_array_destroy(&initial_metadata_recv);
+    grpc_metadata_array_destroy(&trailing_metadata_recv);
+    grpc_slice_unref(details);
+    grpc_channel_destroy(channel);
+    if (response_payload_recv != nullptr) {
+      grpc_byte_buffer_destroy(response_payload_recv);
+    }
   }
   grpc_shutdown();
   if (leak_check) {
diff --git a/test/core/end2end/fuzzers/server_fuzzer.cc b/test/core/end2end/fuzzers/server_fuzzer.cc
index 67caf4e..61c55e0 100644
--- a/test/core/end2end/fuzzers/server_fuzzer.cc
+++ b/test/core/end2end/fuzzers/server_fuzzer.cc
@@ -41,81 +41,82 @@
   if (squelch) gpr_set_log_function(dont_log);
   if (leak_check) grpc_memory_counters_init();
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_executor_set_threading(&exec_ctx, false);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_executor_set_threading(false);
 
-  grpc_resource_quota* resource_quota =
-      grpc_resource_quota_create("server_fuzzer");
-  grpc_endpoint* mock_endpoint =
-      grpc_mock_endpoint_create(discard_write, resource_quota);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
-  grpc_mock_endpoint_put_read(
-      &exec_ctx, mock_endpoint,
-      grpc_slice_from_copied_buffer((const char*)data, size));
+    grpc_resource_quota* resource_quota =
+        grpc_resource_quota_create("server_fuzzer");
+    grpc_endpoint* mock_endpoint =
+        grpc_mock_endpoint_create(discard_write, resource_quota);
+    grpc_resource_quota_unref_internal(resource_quota);
+    grpc_mock_endpoint_put_read(
+        mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
 
-  grpc_server* server = grpc_server_create(nullptr, nullptr);
-  grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
-  grpc_server_register_completion_queue(server, cq, nullptr);
-  // TODO(ctiller): add registered methods (one for POST, one for PUT)
-  // void *registered_method =
-  //    grpc_server_register_method(server, "/reg", NULL, 0);
-  grpc_server_start(server);
-  grpc_transport* transport =
-      grpc_create_chttp2_transport(&exec_ctx, nullptr, mock_endpoint, false);
-  grpc_server_setup_transport(&exec_ctx, server, transport, nullptr, nullptr);
-  grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr, nullptr);
+    grpc_server* server = grpc_server_create(nullptr, nullptr);
+    grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
+    grpc_server_register_completion_queue(server, cq, nullptr);
+    // TODO(ctiller): add registered methods (one for POST, one for PUT)
+    // void *registered_method =
+    //    grpc_server_register_method(server, "/reg", NULL, 0);
+    grpc_server_start(server);
+    grpc_transport* transport =
+        grpc_create_chttp2_transport(nullptr, mock_endpoint, false);
+    grpc_server_setup_transport(server, transport, nullptr, nullptr);
+    grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 
-  grpc_call* call1 = nullptr;
-  grpc_call_details call_details1;
-  grpc_metadata_array request_metadata1;
-  grpc_call_details_init(&call_details1);
-  grpc_metadata_array_init(&request_metadata1);
-  int requested_calls = 0;
+    grpc_call* call1 = nullptr;
+    grpc_call_details call_details1;
+    grpc_metadata_array request_metadata1;
+    grpc_call_details_init(&call_details1);
+    grpc_metadata_array_init(&request_metadata1);
+    int requested_calls = 0;
 
-  GPR_ASSERT(GRPC_CALL_OK ==
-             grpc_server_request_call(server, &call1, &call_details1,
-                                      &request_metadata1, cq, cq, tag(1)));
-  requested_calls++;
+    GPR_ASSERT(GRPC_CALL_OK ==
+               grpc_server_request_call(server, &call1, &call_details1,
+                                        &request_metadata1, cq, cq, tag(1)));
+    requested_calls++;
 
-  grpc_event ev;
-  while (1) {
-    grpc_exec_ctx_flush(&exec_ctx);
-    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
-                                    nullptr);
-    switch (ev.type) {
-      case GRPC_QUEUE_TIMEOUT:
-        goto done;
-      case GRPC_QUEUE_SHUTDOWN:
-        break;
-      case GRPC_OP_COMPLETE:
-        switch (detag(ev.tag)) {
-          case 1:
-            requested_calls--;
-            // TODO(ctiller): keep reading that call!
-            break;
-        }
+    grpc_event ev;
+    while (1) {
+      grpc_core::ExecCtx::Get()->Flush();
+      ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
+                                      nullptr);
+      switch (ev.type) {
+        case GRPC_QUEUE_TIMEOUT:
+          goto done;
+        case GRPC_QUEUE_SHUTDOWN:
+          break;
+        case GRPC_OP_COMPLETE:
+          switch (detag(ev.tag)) {
+            case 1:
+              requested_calls--;
+              // TODO(ctiller): keep reading that call!
+              break;
+          }
+      }
     }
-  }
 
-done:
-  if (call1 != nullptr) grpc_call_unref(call1);
-  grpc_call_details_destroy(&call_details1);
-  grpc_metadata_array_destroy(&request_metadata1);
-  grpc_server_shutdown_and_notify(server, cq, tag(0xdead));
-  grpc_server_cancel_all_calls(server);
-  for (int i = 0; i <= requested_calls; i++) {
-    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
-                                    nullptr);
-    GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
+  done:
+    if (call1 != nullptr) grpc_call_unref(call1);
+    grpc_call_details_destroy(&call_details1);
+    grpc_metadata_array_destroy(&request_metadata1);
+    grpc_server_shutdown_and_notify(server, cq, tag(0xdead));
+    grpc_server_cancel_all_calls(server);
+    for (int i = 0; i <= requested_calls; i++) {
+      ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
+                                      nullptr);
+      GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
+    }
+    grpc_completion_queue_shutdown(cq);
+    for (int i = 0; i <= requested_calls; i++) {
+      ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
+                                      nullptr);
+      GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
+    }
+    grpc_server_destroy(server);
+    grpc_completion_queue_destroy(cq);
   }
-  grpc_completion_queue_shutdown(cq);
-  for (int i = 0; i <= requested_calls; i++) {
-    ev = grpc_completion_queue_next(cq, gpr_inf_past(GPR_CLOCK_REALTIME),
-                                    nullptr);
-    GPR_ASSERT(ev.type == GRPC_QUEUE_SHUTDOWN);
-  }
-  grpc_server_destroy(server);
-  grpc_completion_queue_destroy(cq);
   grpc_shutdown();
   if (leak_check) {
     counters = grpc_memory_counters_snapshot();
diff --git a/test/core/end2end/gen_build_yaml.py b/test/core/end2end/gen_build_yaml.py
index 7c8e7f4..e7cf97b 100755
--- a/test/core/end2end/gen_build_yaml.py
+++ b/test/core/end2end/gen_build_yaml.py
@@ -101,6 +101,7 @@
     'filter_causes_close': default_test_options._replace(cpu_cost=LOWCPU),
     'filter_call_init_fails': default_test_options,
     'filter_latency': default_test_options._replace(cpu_cost=LOWCPU),
+    'filter_status_code': default_test_options._replace(cpu_cost=LOWCPU),
     'graceful_server_shutdown': default_test_options._replace(cpu_cost=LOWCPU,exclude_inproc=True),
     'hpack_size': default_test_options._replace(proxyable=False,
                                                 traceable=False,
diff --git a/test/core/end2end/generate_tests.bzl b/test/core/end2end/generate_tests.bzl
index b9a42bd..1d759e1 100755
--- a/test/core/end2end/generate_tests.bzl
+++ b/test/core/end2end/generate_tests.bzl
@@ -146,6 +146,7 @@
     'trailing_metadata': test_options(),
     'authority_not_supported': test_options(),
     'filter_latency': test_options(),
+    'filter_status_code': test_options(),
     'workaround_cronet_compression': test_options(),
     'write_buffering': test_options(needs_write_buffering=True),
     'write_buffering_at_end': test_options(needs_write_buffering=True),
diff --git a/test/core/end2end/goaway_server_test.cc b/test/core/end2end/goaway_server_test.cc
index 2d0db96..94cfbdd 100644
--- a/test/core/end2end/goaway_server_test.cc
+++ b/test/core/end2end/goaway_server_test.cc
@@ -39,16 +39,15 @@
 
 static gpr_mu g_mu;
 static int g_resolve_port = -1;
-static void (*iomgr_resolve_address)(grpc_exec_ctx* exec_ctx, const char* addr,
-                                     const char* default_port,
+static void (*iomgr_resolve_address)(const char* addr, const char* default_port,
                                      grpc_pollset_set* interested_parties,
                                      grpc_closure* on_done,
                                      grpc_resolved_addresses** addresses);
 
 static grpc_ares_request* (*iomgr_dns_lookup_ares)(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* addr,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** addresses, bool check_grpclb,
+    const char* dns_server, const char* addr, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** addresses, bool check_grpclb,
     char** service_config_json);
 
 static void set_resolve_port(int port) {
@@ -57,14 +56,13 @@
   gpr_mu_unlock(&g_mu);
 }
 
-static void my_resolve_address(grpc_exec_ctx* exec_ctx, const char* addr,
-                               const char* default_port,
+static void my_resolve_address(const char* addr, const char* default_port,
                                grpc_pollset_set* interested_parties,
                                grpc_closure* on_done,
                                grpc_resolved_addresses** addrs) {
   if (0 != strcmp(addr, "test")) {
-    iomgr_resolve_address(exec_ctx, addr, default_port, interested_parties,
-                          on_done, addrs);
+    iomgr_resolve_address(addr, default_port, interested_parties, on_done,
+                          addrs);
     return;
   }
 
@@ -86,16 +84,16 @@
     (*addrs)->addrs[0].len = sizeof(*sa);
     gpr_mu_unlock(&g_mu);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
+  GRPC_CLOSURE_SCHED(on_done, error);
 }
 
 static grpc_ares_request* my_dns_lookup_ares(
-    grpc_exec_ctx* exec_ctx, const char* dns_server, const char* addr,
-    const char* default_port, grpc_pollset_set* interested_parties,
-    grpc_closure* on_done, grpc_lb_addresses** lb_addrs, bool check_grpclb,
+    const char* dns_server, const char* addr, const char* default_port,
+    grpc_pollset_set* interested_parties, grpc_closure* on_done,
+    grpc_lb_addresses** lb_addrs, bool check_grpclb,
     char** service_config_json) {
   if (0 != strcmp(addr, "test")) {
-    return iomgr_dns_lookup_ares(exec_ctx, dns_server, addr, default_port,
+    return iomgr_dns_lookup_ares(dns_server, addr, default_port,
                                  interested_parties, on_done, lb_addrs,
                                  check_grpclb, service_config_json);
   }
@@ -117,7 +115,7 @@
     gpr_free(sa);
     gpr_mu_unlock(&g_mu);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, error);
+  GRPC_CLOSURE_SCHED(on_done, error);
   return nullptr;
 }
 
diff --git a/test/core/end2end/h2_ssl_cert_test.cc b/test/core/end2end/h2_ssl_cert_test.cc
index 9a98c07..d50d1f4 100644
--- a/test/core/end2end/h2_ssl_cert_test.cc
+++ b/test/core/end2end/h2_ssl_cert_test.cc
@@ -181,9 +181,8 @@
         grpc_channel_args_copy_and_add(client_args, &ssl_name_override, 1);  \
     chttp2_init_client_secure_fullstack(f, new_client_args, ssl_creds);      \
     {                                                                        \
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;                           \
-      grpc_channel_args_destroy(&exec_ctx, new_client_args);                 \
-      grpc_exec_ctx_finish(&exec_ctx);                                       \
+      grpc_core::ExecCtx exec_ctx;                                           \
+      grpc_channel_args_destroy(new_client_args);                            \
     }                                                                        \
   }
 
diff --git a/test/core/end2end/tests/cancel_after_accept.cc b/test/core/end2end/tests/cancel_after_accept.cc
index 83439d7..f59caf7 100644
--- a/test/core/end2end/tests/cancel_after_accept.cc
+++ b/test/core/end2end/tests/cancel_after_accept.cc
@@ -245,9 +245,8 @@
   grpc_call_unref(s);
 
   if (args != nullptr) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(args);
   }
 
   cq_verifier_destroy(cqv);
diff --git a/test/core/end2end/tests/cancel_after_round_trip.cc b/test/core/end2end/tests/cancel_after_round_trip.cc
index ddcec67..b10b939 100644
--- a/test/core/end2end/tests/cancel_after_round_trip.cc
+++ b/test/core/end2end/tests/cancel_after_round_trip.cc
@@ -278,9 +278,8 @@
   grpc_call_unref(s);
 
   if (args != nullptr) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(args);
   }
 
   cq_verifier_destroy(cqv);
diff --git a/test/core/end2end/tests/compressed_payload.cc b/test/core/end2end/tests/compressed_payload.cc
index a8ea0ff..944edc7 100644
--- a/test/core/end2end/tests/compressed_payload.cc
+++ b/test/core/end2end/tests/compressed_payload.cc
@@ -129,10 +129,9 @@
   server_args =
       grpc_channel_args_set_compression_algorithm(nullptr, GRPC_COMPRESS_NONE);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     server_args = grpc_channel_args_compression_algorithm_set_state(
-        &exec_ctx, &server_args, algorithm_to_disable, false);
-    grpc_exec_ctx_finish(&exec_ctx);
+        &server_args, algorithm_to_disable, false);
   }
 
   f = begin_test(config, test_name, client_args, server_args);
@@ -257,10 +256,9 @@
   grpc_byte_buffer_destroy(request_payload_recv);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 
   end_test(&f);
@@ -539,10 +537,9 @@
   cq_verifier_destroy(cqv);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 
   end_test(&f);
diff --git a/test/core/end2end/tests/connectivity.cc b/test/core/end2end/tests/connectivity.cc
index 2ea4ca8..da65080 100644
--- a/test/core/end2end/tests/connectivity.cc
+++ b/test/core/end2end/tests/connectivity.cc
@@ -68,7 +68,8 @@
   ce.cq = f.cq;
   gpr_event_init(&ce.started);
   gpr_thd_options_set_joinable(&thdopt);
-  GPR_ASSERT(gpr_thd_new(&thdid, child_thread, &ce, &thdopt));
+  GPR_ASSERT(
+      gpr_thd_new(&thdid, "grpc_connectivity", child_thread, &ce, &thdopt));
 
   gpr_event_wait(&ce.started, gpr_inf_future(GPR_CLOCK_MONOTONIC));
 
diff --git a/test/core/end2end/tests/filter_call_init_fails.cc b/test/core/end2end/tests/filter_call_init_fails.cc
index 6eed68a..8f46f0b 100644
--- a/test/core/end2end/tests/filter_call_init_fails.cc
+++ b/test/core/end2end/tests/filter_call_init_fails.cc
@@ -399,26 +399,23 @@
  * Test filter - always fails to initialize a call
  */
 
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   return grpc_error_set_int(
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("access denied"),
       GRPC_ERROR_INT_GRPC_STATUS, GRPC_STATUS_PERMISSION_DENIED);
 }
 
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 static const grpc_channel_filter test_filter = {
     grpc_call_next_op,
@@ -437,8 +434,7 @@
  * Registration
  */
 
-static bool maybe_add_server_channel_filter(grpc_exec_ctx* exec_ctx,
-                                            grpc_channel_stack_builder* builder,
+static bool maybe_add_server_channel_filter(grpc_channel_stack_builder* builder,
                                             void* arg) {
   if (g_enable_server_channel_filter) {
     // Want to add the filter as close to the end as possible, to make
@@ -457,8 +453,7 @@
   }
 }
 
-static bool maybe_add_client_channel_filter(grpc_exec_ctx* exec_ctx,
-                                            grpc_channel_stack_builder* builder,
+static bool maybe_add_client_channel_filter(grpc_channel_stack_builder* builder,
                                             void* arg) {
   if (g_enable_client_channel_filter) {
     // Want to add the filter as close to the end as possible, to make
@@ -478,7 +473,7 @@
 }
 
 static bool maybe_add_client_subchannel_filter(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   if (g_enable_client_subchannel_filter) {
     // Want to add the filter as close to the end as possible, to make
     // sure that all of the filters work well together.  However, we
diff --git a/test/core/end2end/tests/filter_causes_close.cc b/test/core/end2end/tests/filter_causes_close.cc
index 793f590..ec8f9db 100644
--- a/test/core/end2end/tests/filter_causes_close.cc
+++ b/test/core/end2end/tests/filter_causes_close.cc
@@ -197,12 +197,11 @@
   uint8_t unused;
 } channel_data;
 
-static void recv_im_ready(grpc_exec_ctx* exec_ctx, void* arg,
-                          grpc_error* error) {
+static void recv_im_ready(void* arg, grpc_error* error) {
   grpc_call_element* elem = (grpc_call_element*)arg;
   call_data* calld = (call_data*)elem->call_data;
   GRPC_CLOSURE_RUN(
-      exec_ctx, calld->recv_im_ready,
+      calld->recv_im_ready,
       grpc_error_set_int(GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Failure that's not preventable.", &error, 1),
                          GRPC_ERROR_INT_GRPC_STATUS,
@@ -210,8 +209,7 @@
 }
 
 static void start_transport_stream_op_batch(
-    grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
-    grpc_transport_stream_op_batch* op) {
+    grpc_call_element* elem, grpc_transport_stream_op_batch* op) {
   call_data* calld = (call_data*)elem->call_data;
   if (op->recv_initial_metadata) {
     calld->recv_im_ready =
@@ -219,27 +217,24 @@
     op->payload->recv_initial_metadata.recv_initial_metadata_ready =
         GRPC_CLOSURE_CREATE(recv_im_ready, elem, grpc_schedule_on_exec_ctx);
   }
-  grpc_call_next_op(exec_ctx, elem, op);
+  grpc_call_next_op(elem, op);
 }
 
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_call_elem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void destroy_call_elem(grpc_call_element* elem,
                               const grpc_call_final_info* final_info,
                               grpc_closure* ignored) {}
 
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 static const grpc_channel_filter test_filter = {
     start_transport_stream_op_batch,
@@ -258,8 +253,7 @@
  * Registration
  */
 
-static bool maybe_add_filter(grpc_exec_ctx* exec_ctx,
-                             grpc_channel_stack_builder* builder, void* arg) {
+static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
   if (g_enable_filter) {
     return grpc_channel_stack_builder_prepend_filter(builder, &test_filter,
                                                      nullptr, nullptr);
diff --git a/test/core/end2end/tests/filter_latency.cc b/test/core/end2end/tests/filter_latency.cc
index c4d96eb..845cbc0 100644
--- a/test/core/end2end/tests/filter_latency.cc
+++ b/test/core/end2end/tests/filter_latency.cc
@@ -247,14 +247,12 @@
  * Test latency filter
  */
 
-static grpc_error* init_call_elem(grpc_exec_ctx* exec_ctx,
-                                  grpc_call_element* elem,
+static grpc_error* init_call_elem(grpc_call_element* elem,
                                   const grpc_call_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void client_destroy_call_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_element* elem,
+static void client_destroy_call_elem(grpc_call_element* elem,
                                      const grpc_call_final_info* final_info,
                                      grpc_closure* ignored) {
   gpr_mu_lock(&g_mu);
@@ -262,8 +260,7 @@
   gpr_mu_unlock(&g_mu);
 }
 
-static void server_destroy_call_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_call_element* elem,
+static void server_destroy_call_elem(grpc_call_element* elem,
                                      const grpc_call_final_info* final_info,
                                      grpc_closure* ignored) {
   gpr_mu_lock(&g_mu);
@@ -271,14 +268,12 @@
   gpr_mu_unlock(&g_mu);
 }
 
-static grpc_error* init_channel_elem(grpc_exec_ctx* exec_ctx,
-                                     grpc_channel_element* elem,
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
                                      grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void destroy_channel_elem(grpc_exec_ctx* exec_ctx,
-                                 grpc_channel_element* elem) {}
+static void destroy_channel_elem(grpc_channel_element* elem) {}
 
 static const grpc_channel_filter test_client_filter = {
     grpc_call_next_op,
@@ -310,8 +305,7 @@
  * Registration
  */
 
-static bool maybe_add_filter(grpc_exec_ctx* exec_ctx,
-                             grpc_channel_stack_builder* builder, void* arg) {
+static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
   grpc_channel_filter* filter = (grpc_channel_filter*)arg;
   if (g_enable_filter) {
     // Want to add the filter as close to the end as possible, to make
diff --git a/test/core/end2end/tests/filter_status_code.cc b/test/core/end2end/tests/filter_status_code.cc
new file mode 100644
index 0000000..261ddd9
--- /dev/null
+++ b/test/core/end2end/tests/filter_status_code.cc
@@ -0,0 +1,353 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/end2end/end2end_tests.h"
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <grpc/byte_buffer.h>
+#include <grpc/support/alloc.h>
+#include <grpc/support/log.h>
+#include <grpc/support/time.h>
+#include <grpc/support/useful.h>
+
+#include "src/core/lib/channel/channel_stack_builder.h"
+#include "src/core/lib/surface/channel_init.h"
+#include "test/core/end2end/cq_verifier.h"
+
+static bool g_enable_filter = false;
+static gpr_mu g_mu;
+static bool g_client_code_recv;
+static bool g_server_code_recv;
+static gpr_cv g_client_code_cv;
+static gpr_cv g_server_code_cv;
+static grpc_status_code g_client_status_code;
+static grpc_status_code g_server_status_code;
+
+static void* tag(intptr_t t) { return (void*)t; }
+
+static grpc_end2end_test_fixture begin_test(grpc_end2end_test_config config,
+                                            const char* test_name,
+                                            grpc_channel_args* client_args,
+                                            grpc_channel_args* server_args) {
+  grpc_end2end_test_fixture f;
+  gpr_log(GPR_INFO, "Running test: %s/%s", test_name, config.name);
+  f = config.create_fixture(client_args, server_args);
+  config.init_server(&f, server_args);
+  config.init_client(&f, client_args);
+  return f;
+}
+
+static gpr_timespec n_seconds_from_now(int n) {
+  return grpc_timeout_seconds_to_deadline(n);
+}
+
+static gpr_timespec five_seconds_from_now(void) {
+  return n_seconds_from_now(5);
+}
+
+static void drain_cq(grpc_completion_queue* cq) {
+  grpc_event ev;
+  do {
+    ev = grpc_completion_queue_next(cq, five_seconds_from_now(), nullptr);
+  } while (ev.type != GRPC_QUEUE_SHUTDOWN);
+}
+
+static void shutdown_server(grpc_end2end_test_fixture* f) {
+  if (!f->server) return;
+  grpc_server_shutdown_and_notify(f->server, f->shutdown_cq, tag(1000));
+  GPR_ASSERT(grpc_completion_queue_pluck(f->shutdown_cq, tag(1000),
+                                         grpc_timeout_seconds_to_deadline(5),
+                                         nullptr)
+                 .type == GRPC_OP_COMPLETE);
+  grpc_server_destroy(f->server);
+  f->server = nullptr;
+}
+
+static void shutdown_client(grpc_end2end_test_fixture* f) {
+  if (!f->client) return;
+  grpc_channel_destroy(f->client);
+  f->client = nullptr;
+}
+
+static void end_test(grpc_end2end_test_fixture* f) {
+  shutdown_server(f);
+  shutdown_client(f);
+
+  grpc_completion_queue_shutdown(f->cq);
+  drain_cq(f->cq);
+  grpc_completion_queue_destroy(f->cq);
+  grpc_completion_queue_destroy(f->shutdown_cq);
+}
+
+// Simple request via a server filter that saves the reported status code.
+static void test_request(grpc_end2end_test_config config) {
+  grpc_call* c;
+  grpc_call* s;
+  grpc_end2end_test_fixture f =
+      begin_test(config, "filter_status_code", nullptr, nullptr);
+  cq_verifier* cqv = cq_verifier_create(f.cq);
+  grpc_op ops[6];
+  grpc_op* op;
+  grpc_metadata_array initial_metadata_recv;
+  grpc_metadata_array trailing_metadata_recv;
+  grpc_metadata_array request_metadata_recv;
+  grpc_call_details call_details;
+  grpc_status_code status;
+  grpc_call_error error;
+  grpc_slice details;
+  int was_cancelled = 2;
+
+  gpr_mu_lock(&g_mu);
+  g_client_status_code = GRPC_STATUS_OK;
+  g_server_status_code = GRPC_STATUS_OK;
+  gpr_mu_unlock(&g_mu);
+
+  gpr_timespec deadline = five_seconds_from_now();
+  c = grpc_channel_create_call(
+      f.client, nullptr, GRPC_PROPAGATE_DEFAULTS, f.cq,
+      grpc_slice_from_static_string("/foo"),
+      get_host_override_slice("foo.test.google.fr", config), deadline, nullptr);
+  GPR_ASSERT(c);
+
+  grpc_metadata_array_init(&initial_metadata_recv);
+  grpc_metadata_array_init(&trailing_metadata_recv);
+  grpc_metadata_array_init(&request_metadata_recv);
+  grpc_call_details_init(&call_details);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->data.send_initial_metadata.metadata = nullptr;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_SEND_CLOSE_FROM_CLIENT;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_INITIAL_METADATA;
+  op->data.recv_initial_metadata.recv_initial_metadata = &initial_metadata_recv;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_STATUS_ON_CLIENT;
+  op->data.recv_status_on_client.trailing_metadata = &trailing_metadata_recv;
+  op->data.recv_status_on_client.status = &status;
+  op->data.recv_status_on_client.status_details = &details;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  error = grpc_call_start_batch(c, ops, (size_t)(op - ops), tag(1), nullptr);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  error =
+      grpc_server_request_call(f.server, &s, &call_details,
+                               &request_metadata_recv, f.cq, f.cq, tag(101));
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  CQ_EXPECT_COMPLETION(cqv, tag(101), 1);
+  cq_verify(cqv);
+
+  memset(ops, 0, sizeof(ops));
+  op = ops;
+  op->op = GRPC_OP_SEND_INITIAL_METADATA;
+  op->data.send_initial_metadata.count = 0;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_SEND_STATUS_FROM_SERVER;
+  op->data.send_status_from_server.trailing_metadata_count = 0;
+  op->data.send_status_from_server.status = GRPC_STATUS_UNIMPLEMENTED;
+  grpc_slice status_string = grpc_slice_from_static_string("xyz");
+  op->data.send_status_from_server.status_details = &status_string;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  op->op = GRPC_OP_RECV_CLOSE_ON_SERVER;
+  op->data.recv_close_on_server.cancelled = &was_cancelled;
+  op->flags = 0;
+  op->reserved = nullptr;
+  op++;
+  error = grpc_call_start_batch(s, ops, (size_t)(op - ops), tag(102), nullptr);
+  GPR_ASSERT(GRPC_CALL_OK == error);
+
+  CQ_EXPECT_COMPLETION(cqv, tag(102), 1);
+  CQ_EXPECT_COMPLETION(cqv, tag(1), 1);
+  cq_verify(cqv);
+
+  GPR_ASSERT(status == GRPC_STATUS_UNIMPLEMENTED);
+  GPR_ASSERT(0 == grpc_slice_str_cmp(details, "xyz"));
+
+  grpc_slice_unref(details);
+  grpc_metadata_array_destroy(&initial_metadata_recv);
+  grpc_metadata_array_destroy(&trailing_metadata_recv);
+  grpc_metadata_array_destroy(&request_metadata_recv);
+  grpc_call_details_destroy(&call_details);
+
+  grpc_call_unref(s);
+  grpc_call_unref(c);
+
+  cq_verifier_destroy(cqv);
+
+  end_test(&f);
+  config.tear_down_data(&f);
+
+  // Perform checks after test tear-down
+  // Guards against the case that there's outstanding channel-related work on a
+  // call prior to verification
+  // TODO(https://github.com/grpc/grpc/issues/13915) enable this for windows
+#ifndef GPR_WINDOWS
+  gpr_mu_lock(&g_mu);
+  if (!g_client_code_recv) {
+    GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
+                           grpc_timeout_seconds_to_deadline(3)));
+  }
+  if (!g_server_code_recv) {
+    GPR_ASSERT(gpr_cv_wait(&g_client_code_cv, &g_mu,
+                           grpc_timeout_seconds_to_deadline(3)));
+  }
+  GPR_ASSERT(g_client_status_code == GRPC_STATUS_UNIMPLEMENTED);
+  GPR_ASSERT(g_server_status_code == GRPC_STATUS_UNIMPLEMENTED);
+  gpr_mu_unlock(&g_mu);
+#endif  // GPR_WINDOWS
+}
+
+/*******************************************************************************
+ * Test status_code filter
+ */
+
+static grpc_error* init_call_elem(grpc_call_element* elem,
+                                  const grpc_call_element_args* args) {
+  return GRPC_ERROR_NONE;
+}
+
+static void client_destroy_call_elem(grpc_call_element* elem,
+                                     const grpc_call_final_info* final_info,
+                                     grpc_closure* ignored) {
+  gpr_mu_lock(&g_mu);
+  g_client_status_code = final_info->final_status;
+  g_client_code_recv = true;
+  gpr_cv_signal(&g_client_code_cv);
+  gpr_mu_unlock(&g_mu);
+}
+
+static void server_destroy_call_elem(grpc_call_element* elem,
+                                     const grpc_call_final_info* final_info,
+                                     grpc_closure* ignored) {
+  gpr_mu_lock(&g_mu);
+  g_server_status_code = final_info->final_status;
+  g_server_code_recv = true;
+  gpr_cv_signal(&g_server_code_cv);
+  gpr_mu_unlock(&g_mu);
+}
+
+static grpc_error* init_channel_elem(grpc_channel_element* elem,
+                                     grpc_channel_element_args* args) {
+  return GRPC_ERROR_NONE;
+}
+
+static void destroy_channel_elem(grpc_channel_element* elem) {}
+
+static const grpc_channel_filter test_client_filter = {
+    grpc_call_next_op,
+    grpc_channel_next_op,
+    0,
+    init_call_elem,
+    grpc_call_stack_ignore_set_pollset_or_pollset_set,
+    client_destroy_call_elem,
+    0,
+    init_channel_elem,
+    destroy_channel_elem,
+    grpc_channel_next_get_info,
+    "client_filter_status_code"};
+
+static const grpc_channel_filter test_server_filter = {
+    grpc_call_next_op,
+    grpc_channel_next_op,
+    0,
+    init_call_elem,
+    grpc_call_stack_ignore_set_pollset_or_pollset_set,
+    server_destroy_call_elem,
+    0,
+    init_channel_elem,
+    destroy_channel_elem,
+    grpc_channel_next_get_info,
+    "server_filter_status_code"};
+
+/*******************************************************************************
+ * Registration
+ */
+
+static bool maybe_add_filter(grpc_channel_stack_builder* builder, void* arg) {
+  grpc_channel_filter* filter = (grpc_channel_filter*)arg;
+  if (g_enable_filter) {
+    // Want to add the filter as close to the end as possible, to make
+    // sure that all of the filters work well together.  However, we
+    // can't add it at the very end, because the
+    // connected_channel/client_channel filter must be the last one.
+    // So we add it right before the last one.
+    grpc_channel_stack_builder_iterator* it =
+        grpc_channel_stack_builder_create_iterator_at_last(builder);
+    GPR_ASSERT(grpc_channel_stack_builder_move_prev(it));
+    const bool retval = grpc_channel_stack_builder_add_filter_before(
+        it, filter, nullptr, nullptr);
+    grpc_channel_stack_builder_iterator_destroy(it);
+    return retval;
+  } else {
+    return true;
+  }
+}
+
+static void init_plugin(void) {
+  gpr_mu_init(&g_mu);
+  gpr_cv_init(&g_client_code_cv);
+  gpr_cv_init(&g_server_code_cv);
+  g_client_code_recv = false;
+  g_server_code_recv = false;
+
+  grpc_channel_init_register_stage(GRPC_CLIENT_CHANNEL, INT_MAX,
+                                   maybe_add_filter,
+                                   (void*)&test_client_filter);
+  grpc_channel_init_register_stage(GRPC_CLIENT_DIRECT_CHANNEL, INT_MAX,
+                                   maybe_add_filter,
+                                   (void*)&test_client_filter);
+  grpc_channel_init_register_stage(GRPC_SERVER_CHANNEL, INT_MAX,
+                                   maybe_add_filter,
+                                   (void*)&test_server_filter);
+}
+
+static void destroy_plugin(void) {
+  gpr_cv_destroy(&g_client_code_cv);
+  gpr_cv_destroy(&g_server_code_cv);
+  gpr_mu_destroy(&g_mu);
+}
+
+void filter_status_code(grpc_end2end_test_config config) {
+  g_enable_filter = true;
+  test_request(config);
+  g_enable_filter = false;
+}
+
+void filter_status_code_pre_init(void) {
+  grpc_register_plugin(init_plugin, destroy_plugin);
+}
diff --git a/test/core/end2end/tests/load_reporting_hook.cc b/test/core/end2end/tests/load_reporting_hook.cc
index faabec3..e056bd5 100644
--- a/test/core/end2end/tests/load_reporting_hook.cc
+++ b/test/core/end2end/tests/load_reporting_hook.cc
@@ -300,9 +300,8 @@
                                 &trailing_lr_metadata);
   end_test(&f);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, lr_server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(lr_server_args);
   }
   config.tear_down_data(&f);
 }
diff --git a/test/core/end2end/tests/max_message_length.cc b/test/core/end2end/tests/max_message_length.cc
index f1ac27f..e581f1f 100644
--- a/test/core/end2end/tests/max_message_length.cc
+++ b/test/core/end2end/tests/max_message_length.cc
@@ -173,12 +173,9 @@
   f = begin_test(config, "test_max_request_message_length", client_args,
                  server_args);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    if (client_args != nullptr)
-      grpc_channel_args_destroy(&exec_ctx, client_args);
-    if (server_args != nullptr)
-      grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    if (client_args != nullptr) grpc_channel_args_destroy(client_args);
+    if (server_args != nullptr) grpc_channel_args_destroy(server_args);
   }
 
   cqv = cq_verifier_create(f.cq);
@@ -366,12 +363,9 @@
   f = begin_test(config, "test_max_response_message_length", client_args,
                  server_args);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    if (client_args != nullptr)
-      grpc_channel_args_destroy(&exec_ctx, client_args);
-    if (server_args != nullptr)
-      grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    if (client_args != nullptr) grpc_channel_args_destroy(client_args);
+    if (server_args != nullptr) grpc_channel_args_destroy(server_args);
   }
   cqv = cq_verifier_create(f.cq);
 
diff --git a/test/core/end2end/tests/no_logging.cc b/test/core/end2end/tests/no_logging.cc
index 55d211c..bf86512 100644
--- a/test/core/end2end/tests/no_logging.cc
+++ b/test/core/end2end/tests/no_logging.cc
@@ -36,7 +36,7 @@
 
 static void* tag(intptr_t t) { return (void*)t; }
 
-extern "C" void gpr_default_log(gpr_log_func_args* args);
+void gpr_default_log(gpr_log_func_args* args);
 
 static void test_no_log(gpr_log_func_args* args) {
   char* message = nullptr;
diff --git a/test/core/end2end/tests/stream_compression_compressed_payload.cc b/test/core/end2end/tests/stream_compression_compressed_payload.cc
index d733464..ec3050a 100644
--- a/test/core/end2end/tests/stream_compression_compressed_payload.cc
+++ b/test/core/end2end/tests/stream_compression_compressed_payload.cc
@@ -129,10 +129,9 @@
   server_args = grpc_channel_args_set_stream_compression_algorithm(
       nullptr, GRPC_STREAM_COMPRESS_NONE);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     server_args = grpc_channel_args_stream_compression_algorithm_set_state(
-        &exec_ctx, &server_args, algorithm_to_disable, false);
-    grpc_exec_ctx_finish(&exec_ctx);
+        &server_args, algorithm_to_disable, false);
   }
 
   f = begin_test(config, test_name, client_args, server_args);
@@ -258,10 +257,9 @@
   grpc_byte_buffer_destroy(request_payload_recv);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 
   end_test(&f);
@@ -547,10 +545,9 @@
   cq_verifier_destroy(cqv);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 
   end_test(&f);
diff --git a/test/core/end2end/tests/stream_compression_payload.cc b/test/core/end2end/tests/stream_compression_payload.cc
index 924961e..b95e652 100644
--- a/test/core/end2end/tests/stream_compression_payload.cc
+++ b/test/core/end2end/tests/stream_compression_payload.cc
@@ -277,10 +277,9 @@
   end_test(&f);
   config.tear_down_data(&f);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 }
 
diff --git a/test/core/end2end/tests/stream_compression_ping_pong_streaming.cc b/test/core/end2end/tests/stream_compression_ping_pong_streaming.cc
index d3b526f..2a8799e 100644
--- a/test/core/end2end/tests/stream_compression_ping_pong_streaming.cc
+++ b/test/core/end2end/tests/stream_compression_ping_pong_streaming.cc
@@ -275,10 +275,9 @@
   end_test(&f);
   config.tear_down_data(&f);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 }
 
diff --git a/test/core/end2end/tests/workaround_cronet_compression.cc b/test/core/end2end/tests/workaround_cronet_compression.cc
index bc4d507..d4decce 100644
--- a/test/core/end2end/tests/workaround_cronet_compression.cc
+++ b/test/core/end2end/tests/workaround_cronet_compression.cc
@@ -142,15 +142,14 @@
       nullptr, default_server_channel_compression_algorithm);
 
   if (user_agent_override) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_channel_args* client_args_old = client_args;
     grpc_arg arg;
     arg.key = const_cast<char*>(GRPC_ARG_PRIMARY_USER_AGENT_STRING);
     arg.type = GRPC_ARG_STRING;
     arg.value.string = user_agent_override;
     client_args = grpc_channel_args_copy_and_add(client_args_old, &arg, 1);
-    grpc_channel_args_destroy(&exec_ctx, client_args_old);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_channel_args_destroy(client_args_old);
   }
 
   f = begin_test(config, test_name, client_args, server_args);
@@ -351,10 +350,9 @@
   cq_verifier_destroy(cqv);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, client_args);
-    grpc_channel_args_destroy(&exec_ctx, server_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(client_args);
+    grpc_channel_args_destroy(server_args);
   }
 
   end_test(&f);
diff --git a/test/core/fling/client.cc b/test/core/fling/client.cc
index 544b66d..69fb6dc 100644
--- a/test/core/fling/client.cc
+++ b/test/core/fling/client.cc
@@ -22,15 +22,15 @@
 #include <string.h>
 
 #include <grpc/support/cmdline.h>
-#include <grpc/support/histogram.h>
 #include <grpc/support/log.h>
 #include <grpc/support/time.h>
 #include <grpc/support/useful.h>
 #include "src/core/lib/profiling/timers.h"
 #include "test/core/util/grpc_profiler.h"
+#include "test/core/util/histogram.h"
 #include "test/core/util/test_config.h"
 
-static gpr_histogram* histogram;
+static grpc_histogram* histogram;
 static grpc_byte_buffer* the_buffer;
 static grpc_channel* channel;
 static grpc_completion_queue* cq;
@@ -195,7 +195,7 @@
   channel = grpc_insecure_channel_create(target, nullptr, nullptr);
   cq = grpc_completion_queue_create_for_next(nullptr);
   the_buffer = grpc_raw_byte_buffer_create(&slice, (size_t)payload_size);
-  histogram = gpr_histogram_create(0.01, 60e9);
+  histogram = grpc_histogram_create(0.01, 60e9);
 
   sc.init();
 
@@ -213,7 +213,7 @@
     start = now();
     sc.do_one_step();
     stop = now();
-    gpr_histogram_add(histogram, stop - start);
+    grpc_histogram_add(histogram, stop - start);
   }
   grpc_profiler_stop();
 
@@ -232,11 +232,11 @@
   grpc_slice_unref(slice);
 
   gpr_log(GPR_INFO, "latency (50/95/99/99.9): %f/%f/%f/%f",
-          gpr_histogram_percentile(histogram, 50),
-          gpr_histogram_percentile(histogram, 95),
-          gpr_histogram_percentile(histogram, 99),
-          gpr_histogram_percentile(histogram, 99.9));
-  gpr_histogram_destroy(histogram);
+          grpc_histogram_percentile(histogram, 50),
+          grpc_histogram_percentile(histogram, 95),
+          grpc_histogram_percentile(histogram, 99),
+          grpc_histogram_percentile(histogram, 99.9));
+  grpc_histogram_destroy(histogram);
 
   grpc_shutdown();
 
diff --git a/test/core/handshake/client_ssl.cc b/test/core/handshake/client_ssl.cc
index 2b149a7..2302e3d 100644
--- a/test/core/handshake/client_ssl.cc
+++ b/test/core/handshake/client_ssl.cc
@@ -231,7 +231,8 @@
   gpr_thd_id thdid;
   gpr_thd_options_set_joinable(&thdopt);
   server_args args = {server_socket, server_alpn_preferred};
-  GPR_ASSERT(gpr_thd_new(&thdid, server_thread, &args, &thdopt));
+  GPR_ASSERT(gpr_thd_new(&thdid, "grpc_client_ssl_test", server_thread, &args,
+                         &thdopt));
 
   // Load key pair and establish client SSL credentials.
   grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
diff --git a/test/core/handshake/readahead_handshaker_server_ssl.cc b/test/core/handshake/readahead_handshaker_server_ssl.cc
index 2810082..599e0e1 100644
--- a/test/core/handshake/readahead_handshaker_server_ssl.cc
+++ b/test/core/handshake/readahead_handshaker_server_ssl.cc
@@ -49,41 +49,37 @@
  * to the security_handshaker). This test is meant to protect code relying on
  * this functionality that lives outside of this repo. */
 
-static void readahead_handshaker_destroy(grpc_exec_ctx* ctx,
-                                         grpc_handshaker* handshaker) {
+static void readahead_handshaker_destroy(grpc_handshaker* handshaker) {
   gpr_free(handshaker);
 }
 
-static void readahead_handshaker_shutdown(grpc_exec_ctx* ctx,
-                                          grpc_handshaker* handshaker,
+static void readahead_handshaker_shutdown(grpc_handshaker* handshaker,
                                           grpc_error* error) {}
 
 static void readahead_handshaker_do_handshake(
-    grpc_exec_ctx* ctx, grpc_handshaker* handshaker,
-    grpc_tcp_server_acceptor* acceptor, grpc_closure* on_handshake_done,
-    grpc_handshaker_args* args) {
-  grpc_endpoint_read(ctx, args->endpoint, args->read_buffer, on_handshake_done);
+    grpc_handshaker* handshaker, grpc_tcp_server_acceptor* acceptor,
+    grpc_closure* on_handshake_done, grpc_handshaker_args* args) {
+  grpc_endpoint_read(args->endpoint, args->read_buffer, on_handshake_done);
 }
 
 const grpc_handshaker_vtable readahead_handshaker_vtable = {
     readahead_handshaker_destroy, readahead_handshaker_shutdown,
     readahead_handshaker_do_handshake};
 
-static grpc_handshaker* readahead_handshaker_create(grpc_exec_ctx* ctx) {
+static grpc_handshaker* readahead_handshaker_create() {
   grpc_handshaker* h = (grpc_handshaker*)gpr_zalloc(sizeof(grpc_handshaker));
   grpc_handshaker_init(&readahead_handshaker_vtable, h);
   return h;
 }
 
 static void readahead_handshaker_factory_add_handshakers(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* hf,
-    const grpc_channel_args* args, grpc_handshake_manager* handshake_mgr) {
-  grpc_handshake_manager_add(handshake_mgr,
-                             readahead_handshaker_create(exec_ctx));
+    grpc_handshaker_factory* hf, const grpc_channel_args* args,
+    grpc_handshake_manager* handshake_mgr) {
+  grpc_handshake_manager_add(handshake_mgr, readahead_handshaker_create());
 }
 
 static void readahead_handshaker_factory_destroy(
-    grpc_exec_ctx* exec_ctx, grpc_handshaker_factory* handshaker_factory) {}
+    grpc_handshaker_factory* handshaker_factory) {}
 
 static const grpc_handshaker_factory_vtable
     readahead_handshaker_factory_vtable = {
diff --git a/test/core/handshake/server_ssl_common.cc b/test/core/handshake/server_ssl_common.cc
index 599b281..0bf453a 100644
--- a/test/core/handshake/server_ssl_common.cc
+++ b/test/core/handshake/server_ssl_common.cc
@@ -137,7 +137,8 @@
   gpr_thd_options thdopt = gpr_thd_options_default();
   gpr_thd_id thdid;
   gpr_thd_options_set_joinable(&thdopt);
-  GPR_ASSERT(gpr_thd_new(&thdid, server_thread, &port, &thdopt));
+  GPR_ASSERT(
+      gpr_thd_new(&thdid, "grpc_ssl_test", server_thread, &port, &thdopt));
 
   SSL_load_error_strings();
   OpenSSL_add_ssl_algorithms();
diff --git a/test/core/http/format_request_test.cc b/test/core/http/format_request_test.cc
index 684738a..353e138 100644
--- a/test/core/http/format_request_test.cc
+++ b/test/core/http/format_request_test.cc
@@ -139,11 +139,13 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   test_format_get_request();
   test_format_post_request();
   test_format_post_request_no_body();
   test_format_post_request_content_type_override();
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/http/httpcli_test.cc b/test/core/http/httpcli_test.cc
index 81e9374..259e3aa 100644
--- a/test/core/http/httpcli_test.cc
+++ b/test/core/http/httpcli_test.cc
@@ -40,7 +40,7 @@
       grpc_timeout_seconds_to_deadline(seconds));
 }
 
-static void on_finish(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_finish(void* arg, grpc_error* error) {
   const char* expect =
       "<html><head><title>Hello world!</title></head>"
       "<body><p>This is a test</p></body></html>";
@@ -53,15 +53,14 @@
   g_done = 1;
   GPR_ASSERT(GRPC_LOG_IF_ERROR(
       "pollset_kick",
-      grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                        nullptr)));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&g_pops), nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
 static void test_get(int port) {
   grpc_httpcli_request req;
   char* host;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   g_done = 0;
   gpr_log(GPR_INFO, "test_get");
@@ -78,19 +77,18 @@
   memset(&response, 0, sizeof(response));
   grpc_resource_quota* resource_quota = grpc_resource_quota_create("test_get");
   grpc_httpcli_get(
-      &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
+      &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
       GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
       &response);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                          &worker, n_seconds_time(1))));
+        "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&g_pops),
+                                          &worker, n_seconds_time(1))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
@@ -101,7 +99,7 @@
 static void test_post(int port) {
   grpc_httpcli_request req;
   char* host;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   g_done = 0;
   gpr_log(GPR_INFO, "test_post");
@@ -118,20 +116,18 @@
   memset(&response, 0, sizeof(response));
   grpc_resource_quota* resource_quota = grpc_resource_quota_create("test_post");
   grpc_httpcli_post(
-      &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
-      n_seconds_time(15),
+      &g_context, &g_pops, resource_quota, &req, "hello", 5, n_seconds_time(15),
       GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
       &response);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                          &worker, n_seconds_time(1))));
+        "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&g_pops),
+                                          &worker, n_seconds_time(1))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
@@ -139,69 +135,69 @@
   grpc_http_response_destroy(&response);
 }
 
-static void destroy_pops(grpc_exec_ctx* exec_ctx, void* p, grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, grpc_polling_entity_pollset(
-                                     static_cast<grpc_polling_entity*>(p)));
+static void destroy_pops(void* p, grpc_error* error) {
+  grpc_pollset_destroy(
+      grpc_polling_entity_pollset(static_cast<grpc_polling_entity*>(p)));
 }
 
 int main(int argc, char** argv) {
-  grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   gpr_subprocess* server;
-  char* me = argv[0];
-  char* lslash = strrchr(me, '/');
-  char* args[4];
-  int port = grpc_pick_unused_port_or_die();
-  int arg_shift = 0;
-  /* figure out where we are */
-  char* root;
-  if (lslash) {
-    root = static_cast<char*>(gpr_malloc((size_t)(lslash - me + 1)));
-    memcpy(root, me, (size_t)(lslash - me));
-    root[lslash - me] = 0;
-  } else {
-    root = gpr_strdup(".");
-  }
-
-  GPR_ASSERT(argc <= 2);
-  if (argc == 2) {
-    args[0] = gpr_strdup(argv[1]);
-  } else {
-    arg_shift = 1;
-    gpr_asprintf(&args[0], "%s/../../tools/distrib/python_wrapper.sh", root);
-    gpr_asprintf(&args[1], "%s/../../test/core/http/test_server.py", root);
-  }
-
-  /* start the server */
-  args[1 + arg_shift] = const_cast<char*>("--port");
-  gpr_asprintf(&args[2 + arg_shift], "%d", port);
-  server = gpr_subprocess_create(3 + arg_shift, (const char**)args);
-  GPR_ASSERT(server);
-  gpr_free(args[0]);
-  if (arg_shift) gpr_free(args[1]);
-  gpr_free(args[2 + arg_shift]);
-  gpr_free(root);
-
-  gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
-                               gpr_time_from_seconds(5, GPR_TIMESPAN)));
-
   grpc_test_init(argc, argv);
   grpc_init();
-  grpc_httpcli_context_init(&g_context);
-  grpc_pollset* pollset =
-      static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
-  grpc_pollset_init(pollset, &g_mu);
-  g_pops = grpc_polling_entity_create_from_pollset(pollset);
+  {
+    grpc_closure destroyed;
+    grpc_core::ExecCtx exec_ctx;
+    char* me = argv[0];
+    char* lslash = strrchr(me, '/');
+    char* args[4];
+    int port = grpc_pick_unused_port_or_die();
+    int arg_shift = 0;
+    /* figure out where we are */
+    char* root;
+    if (lslash) {
+      root = static_cast<char*>(gpr_malloc((size_t)(lslash - me + 1)));
+      memcpy(root, me, (size_t)(lslash - me));
+      root[lslash - me] = 0;
+    } else {
+      root = gpr_strdup(".");
+    }
 
-  test_get(port);
-  test_post(port);
+    GPR_ASSERT(argc <= 2);
+    if (argc == 2) {
+      args[0] = gpr_strdup(argv[1]);
+    } else {
+      arg_shift = 1;
+      gpr_asprintf(&args[0], "%s/../../tools/distrib/python_wrapper.sh", root);
+      gpr_asprintf(&args[1], "%s/../../test/core/http/test_server.py", root);
+    }
 
-  grpc_httpcli_context_destroy(&exec_ctx, &g_context);
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                        &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+    /* start the server */
+    args[1 + arg_shift] = const_cast<char*>("--port");
+    gpr_asprintf(&args[2 + arg_shift], "%d", port);
+    server = gpr_subprocess_create(3 + arg_shift, (const char**)args);
+    GPR_ASSERT(server);
+    gpr_free(args[0]);
+    if (arg_shift) gpr_free(args[1]);
+    gpr_free(args[2 + arg_shift]);
+    gpr_free(root);
+
+    gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+                                 gpr_time_from_seconds(5, GPR_TIMESPAN)));
+
+    grpc_httpcli_context_init(&g_context);
+    grpc_pollset* pollset =
+        static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+    grpc_pollset_init(pollset, &g_mu);
+    g_pops = grpc_polling_entity_create_from_pollset(pollset);
+
+    test_get(port);
+    test_post(port);
+
+    grpc_httpcli_context_destroy(&g_context);
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(grpc_polling_entity_pollset(&g_pops), &destroyed);
+  }
   grpc_shutdown();
 
   gpr_free(grpc_polling_entity_pollset(&g_pops));
diff --git a/test/core/http/httpscli_test.cc b/test/core/http/httpscli_test.cc
index da8405c..adf69f1 100644
--- a/test/core/http/httpscli_test.cc
+++ b/test/core/http/httpscli_test.cc
@@ -40,7 +40,7 @@
       grpc_timeout_seconds_to_deadline(seconds));
 }
 
-static void on_finish(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void on_finish(void* arg, grpc_error* error) {
   const char* expect =
       "<html><head><title>Hello world!</title></head>"
       "<body><p>This is a test</p></body></html>";
@@ -53,15 +53,14 @@
   g_done = 1;
   GPR_ASSERT(GRPC_LOG_IF_ERROR(
       "pollset_kick",
-      grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                        nullptr)));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&g_pops), nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
 static void test_get(int port) {
   grpc_httpcli_request req;
   char* host;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   g_done = 0;
   gpr_log(GPR_INFO, "test_get");
@@ -79,19 +78,18 @@
   memset(&response, 0, sizeof(response));
   grpc_resource_quota* resource_quota = grpc_resource_quota_create("test_get");
   grpc_httpcli_get(
-      &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
+      &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
       GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
       &response);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                          &worker, n_seconds_time(1))));
+        "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&g_pops),
+                                          &worker, n_seconds_time(1))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
@@ -102,7 +100,7 @@
 static void test_post(int port) {
   grpc_httpcli_request req;
   char* host;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   g_done = 0;
   gpr_log(GPR_INFO, "test_post");
@@ -120,20 +118,18 @@
   memset(&response, 0, sizeof(response));
   grpc_resource_quota* resource_quota = grpc_resource_quota_create("test_post");
   grpc_httpcli_post(
-      &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
-      n_seconds_time(15),
+      &g_context, &g_pops, resource_quota, &req, "hello", 5, n_seconds_time(15),
       GRPC_CLOSURE_CREATE(on_finish, &response, grpc_schedule_on_exec_ctx),
       &response);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+  grpc_resource_quota_unref_internal(resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                          &worker, n_seconds_time(1))));
+        "pollset_work", grpc_pollset_work(grpc_polling_entity_pollset(&g_pops),
+                                          &worker, n_seconds_time(1))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
@@ -141,14 +137,13 @@
   grpc_http_response_destroy(&response);
 }
 
-static void destroy_pops(grpc_exec_ctx* exec_ctx, void* p, grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, grpc_polling_entity_pollset(
-                                     static_cast<grpc_polling_entity*>(p)));
+static void destroy_pops(void* p, grpc_error* error) {
+  grpc_pollset_destroy(
+      grpc_polling_entity_pollset(static_cast<grpc_polling_entity*>(p)));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   gpr_subprocess* server;
   char* me = argv[0];
   char* lslash = strrchr(me, '/');
@@ -199,12 +194,13 @@
   test_get(port);
   test_post(port);
 
-  grpc_httpcli_context_destroy(&exec_ctx, &g_context);
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
-                        &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_httpcli_context_destroy(&g_context);
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pops, &g_pops,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(grpc_polling_entity_pollset(&g_pops), &destroyed);
+  }
   grpc_shutdown();
 
   gpr_free(grpc_polling_entity_pollset(&g_pops));
diff --git a/test/core/http/parser_test.cc b/test/core/http/parser_test.cc
index 0b60e36..18f1985 100644
--- a/test/core/http/parser_test.cc
+++ b/test/core/http/parser_test.cc
@@ -21,6 +21,7 @@
 #include <stdarg.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
@@ -217,6 +218,7 @@
   char *tmp1, *tmp2;
 
   grpc_test_init(argc, argv);
+  grpc_init();
 
   for (i = 0; i < GPR_ARRAY_SIZE(split_modes); i++) {
     test_succeeds(split_modes[i],
@@ -300,5 +302,6 @@
     gpr_free(tmp2);
   }
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/http/request_fuzzer.cc b/test/core/http/request_fuzzer.cc
index 368ac1b..9798cfb 100644
--- a/test/core/http/request_fuzzer.cc
+++ b/test/core/http/request_fuzzer.cc
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/lib/http/parser.h"
@@ -30,6 +31,7 @@
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
   grpc_http_parser parser;
   grpc_http_request request;
+  grpc_init();
   memset(&request, 0, sizeof(request));
   grpc_http_parser_init(&parser, GRPC_HTTP_REQUEST, &request);
   grpc_slice slice = grpc_slice_from_copied_buffer((const char*)data, size);
@@ -38,5 +40,6 @@
   grpc_slice_unref(slice);
   grpc_http_parser_destroy(&parser);
   grpc_http_request_destroy(&request);
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/http/response_fuzzer.cc b/test/core/http/response_fuzzer.cc
index 2a793fd..fc0904b 100644
--- a/test/core/http/response_fuzzer.cc
+++ b/test/core/http/response_fuzzer.cc
@@ -19,6 +19,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/lib/http/parser.h"
@@ -29,6 +30,7 @@
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
   grpc_http_parser parser;
   grpc_http_response response;
+  grpc_init();
   memset(&response, 0, sizeof(response));
   grpc_http_parser_init(&parser, GRPC_HTTP_RESPONSE, &response);
   grpc_slice slice = grpc_slice_from_copied_buffer((const char*)data, size);
@@ -37,5 +39,6 @@
   grpc_slice_unref(slice);
   grpc_http_parser_destroy(&parser);
   grpc_http_response_destroy(&response);
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/combiner_test.cc b/test/core/iomgr/combiner_test.cc
index 146a6bd..891008c 100644
--- a/test/core/iomgr/combiner_test.cc
+++ b/test/core/iomgr/combiner_test.cc
@@ -28,13 +28,11 @@
 
 static void test_no_op(void) {
   gpr_log(GPR_DEBUG, "test_no_op");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_COMBINER_UNREF(&exec_ctx, grpc_combiner_create(), "test_no_op");
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_COMBINER_UNREF(grpc_combiner_create(), "test_no_op");
 }
 
-static void set_event_to_true(grpc_exec_ctx* exec_ctx, void* value,
-                              grpc_error* error) {
+static void set_event_to_true(void* value, grpc_error* error) {
   gpr_event_set(static_cast<gpr_event*>(value), (void*)1);
 }
 
@@ -44,16 +42,14 @@
   grpc_combiner* lock = grpc_combiner_create();
   gpr_event done;
   gpr_event_init(&done);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_CLOSURE_SCHED(&exec_ctx,
-                     GRPC_CLOSURE_CREATE(set_event_to_true, &done,
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(set_event_to_true, &done,
                                          grpc_combiner_scheduler(lock)),
                      GRPC_ERROR_NONE);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(gpr_event_wait(&done, grpc_timeout_seconds_to_deadline(5)) !=
              nullptr);
-  GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_one");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(lock, "test_execute_one");
 }
 
 typedef struct {
@@ -67,7 +63,7 @@
   size_t value;
 } ex_args;
 
-static void check_one(grpc_exec_ctx* exec_ctx, void* a, grpc_error* error) {
+static void check_one(void* a, grpc_error* error) {
   ex_args* args = static_cast<ex_args*>(a);
   GPR_ASSERT(*args->ctr == args->value - 1);
   *args->ctr = args->value;
@@ -76,28 +72,25 @@
 
 static void execute_many_loop(void* a) {
   thd_args* args = static_cast<thd_args*>(a);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   size_t n = 1;
   for (size_t i = 0; i < 10; i++) {
     for (size_t j = 0; j < 10000; j++) {
       ex_args* c = static_cast<ex_args*>(gpr_malloc(sizeof(*c)));
       c->ctr = &args->ctr;
       c->value = n++;
-      GRPC_CLOSURE_SCHED(&exec_ctx,
-                         GRPC_CLOSURE_CREATE(
+      GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(
                              check_one, c, grpc_combiner_scheduler(args->lock)),
                          GRPC_ERROR_NONE);
-      grpc_exec_ctx_flush(&exec_ctx);
+      grpc_core::ExecCtx::Get()->Flush();
     }
     // sleep for a little bit, to test a combiner draining and another thread
     // picking it up
     gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(100));
   }
-  GRPC_CLOSURE_SCHED(&exec_ctx,
-                     GRPC_CLOSURE_CREATE(set_event_to_true, &args->done,
+  GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(set_event_to_true, &args->done,
                                          grpc_combiner_scheduler(args->lock)),
                      GRPC_ERROR_NONE);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_execute_many(void) {
@@ -112,27 +105,26 @@
     ta[i].ctr = 0;
     ta[i].lock = lock;
     gpr_event_init(&ta[i].done);
-    GPR_ASSERT(gpr_thd_new(&thds[i], execute_many_loop, &ta[i], &options));
+    GPR_ASSERT(gpr_thd_new(&thds[i], "grpc_execute_many", execute_many_loop,
+                           &ta[i], &options));
   }
   for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
     GPR_ASSERT(gpr_event_wait(&ta[i].done,
                               gpr_inf_future(GPR_CLOCK_REALTIME)) != nullptr);
     gpr_thd_join(thds[i]);
   }
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_many");
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_COMBINER_UNREF(lock, "test_execute_many");
 }
 
 static gpr_event got_in_finally;
 
-static void in_finally(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void in_finally(void* arg, grpc_error* error) {
   gpr_event_set(&got_in_finally, (void*)1);
 }
 
-static void add_finally(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
-  GRPC_CLOSURE_SCHED(exec_ctx,
-                     GRPC_CLOSURE_CREATE(in_finally, arg,
+static void add_finally(void* arg, grpc_error* error) {
+  GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(in_finally, arg,
                                          grpc_combiner_finally_scheduler(
                                              static_cast<grpc_combiner*>(arg))),
                      GRPC_ERROR_NONE);
@@ -142,17 +134,15 @@
   gpr_log(GPR_DEBUG, "test_execute_finally");
 
   grpc_combiner* lock = grpc_combiner_create();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_event_init(&got_in_finally);
   GRPC_CLOSURE_SCHED(
-      &exec_ctx,
       GRPC_CLOSURE_CREATE(add_finally, lock, grpc_combiner_scheduler(lock)),
       GRPC_ERROR_NONE);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(gpr_event_wait(&got_in_finally,
                             grpc_timeout_seconds_to_deadline(5)) != nullptr);
-  GRPC_COMBINER_UNREF(&exec_ctx, lock, "test_execute_finally");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(lock, "test_execute_finally");
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/iomgr/endpoint_pair_test.cc b/test/core/iomgr/endpoint_pair_test.cc
index 30a0cb5..90dd40d 100644
--- a/test/core/iomgr/endpoint_pair_test.cc
+++ b/test/core/iomgr/endpoint_pair_test.cc
@@ -32,7 +32,7 @@
 
 static grpc_endpoint_test_fixture create_fixture_endpoint_pair(
     size_t slice_size) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_endpoint_test_fixture f;
   grpc_arg a[1];
   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
@@ -43,9 +43,8 @@
 
   f.client_ep = p.client;
   f.server_ep = p.server;
-  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
-  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_endpoint_add_to_pollset(f.client_ep, g_pollset);
+  grpc_endpoint_add_to_pollset(f.server_ep, g_pollset);
 
   return f;
 }
@@ -54,23 +53,23 @@
     {"tcp/tcp_socketpair", create_fixture_endpoint_pair, clean_up},
 };
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
   grpc_init();
-  g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
-  grpc_pollset_init(g_pollset, &g_mu);
-  grpc_endpoint_tests(configs[0], g_pollset, g_mu);
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+    grpc_pollset_init(g_pollset, &g_mu);
+    grpc_endpoint_tests(configs[0], g_pollset, g_mu);
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(g_pollset, &destroyed);
+  }
   grpc_shutdown();
   gpr_free(g_pollset);
 
diff --git a/test/core/iomgr/endpoint_tests.cc b/test/core/iomgr/endpoint_tests.cc
index 026e341..8ccae52 100644
--- a/test/core/iomgr/endpoint_tests.cc
+++ b/test/core/iomgr/endpoint_tests.cc
@@ -115,8 +115,7 @@
   grpc_closure done_write;
 };
 
-static void read_and_write_test_read_handler(grpc_exec_ctx* exec_ctx,
-                                             void* data, grpc_error* error) {
+static void read_and_write_test_read_handler(void* data, grpc_error* error) {
   struct read_and_write_test_state* state =
       (struct read_and_write_test_state*)data;
 
@@ -126,17 +125,14 @@
     gpr_log(GPR_INFO, "Read handler done");
     gpr_mu_lock(g_mu);
     state->read_done = 1 + (error == GRPC_ERROR_NONE);
-    GRPC_LOG_IF_ERROR("pollset_kick",
-                      grpc_pollset_kick(exec_ctx, g_pollset, nullptr));
+    GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr));
     gpr_mu_unlock(g_mu);
   } else if (error == GRPC_ERROR_NONE) {
-    grpc_endpoint_read(exec_ctx, state->read_ep, &state->incoming,
-                       &state->done_read);
+    grpc_endpoint_read(state->read_ep, &state->incoming, &state->done_read);
   }
 }
 
-static void read_and_write_test_write_handler(grpc_exec_ctx* exec_ctx,
-                                              void* data, grpc_error* error) {
+static void read_and_write_test_write_handler(void* data, grpc_error* error) {
   struct read_and_write_test_state* state =
       (struct read_and_write_test_state*)data;
   grpc_slice* slices = nullptr;
@@ -153,7 +149,7 @@
                                &state->current_write_data);
       grpc_slice_buffer_reset_and_unref(&state->outgoing);
       grpc_slice_buffer_addn(&state->outgoing, slices, nslices);
-      grpc_endpoint_write(exec_ctx, state->write_ep, &state->outgoing,
+      grpc_endpoint_write(state->write_ep, &state->outgoing,
                           &state->done_write);
       gpr_free(slices);
       return;
@@ -163,8 +159,7 @@
   gpr_log(GPR_INFO, "Write handler done");
   gpr_mu_lock(g_mu);
   state->write_done = 1 + (error == GRPC_ERROR_NONE);
-  GRPC_LOG_IF_ERROR("pollset_kick",
-                    grpc_pollset_kick(exec_ctx, g_pollset, nullptr));
+  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr));
   gpr_mu_unlock(g_mu);
 }
 
@@ -178,7 +173,7 @@
   struct read_and_write_test_state state;
   grpc_endpoint_test_fixture f =
       begin_test(config, "read_and_write_test", slice_size);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
   gpr_log(GPR_DEBUG,
@@ -217,66 +212,57 @@
      for the first iteration as for later iterations. It does the right thing
      even when bytes_written is unsigned. */
   state.bytes_written -= state.current_write_size;
-  read_and_write_test_write_handler(&exec_ctx, &state, GRPC_ERROR_NONE);
-  grpc_exec_ctx_flush(&exec_ctx);
+  read_and_write_test_write_handler(&state, GRPC_ERROR_NONE);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  grpc_endpoint_read(&exec_ctx, state.read_ep, &state.incoming,
-                     &state.done_read);
+  grpc_endpoint_read(state.read_ep, &state.incoming, &state.done_read);
 
   if (shutdown) {
     gpr_log(GPR_DEBUG, "shutdown read");
     grpc_endpoint_shutdown(
-        &exec_ctx, state.read_ep,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
+        state.read_ep, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
     gpr_log(GPR_DEBUG, "shutdown write");
     grpc_endpoint_shutdown(
-        &exec_ctx, state.write_ep,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
+        state.write_ep, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
   }
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   gpr_mu_lock(g_mu);
   while (!state.read_done || !state.write_done) {
     grpc_pollset_worker* worker = nullptr;
-    GPR_ASSERT(grpc_exec_ctx_now(&exec_ctx) < deadline);
+    GPR_ASSERT(grpc_core::ExecCtx::Get()->Now() < deadline);
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
   }
   gpr_mu_unlock(g_mu);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   end_test(config);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.outgoing);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
-  grpc_endpoint_destroy(&exec_ctx, state.read_ep);
-  grpc_endpoint_destroy(&exec_ctx, state.write_ep);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_buffer_destroy_internal(&state.outgoing);
+  grpc_slice_buffer_destroy_internal(&state.incoming);
+  grpc_endpoint_destroy(state.read_ep);
+  grpc_endpoint_destroy(state.write_ep);
 }
 
-static void inc_on_failure(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
+static void inc_on_failure(void* arg, grpc_error* error) {
   gpr_mu_lock(g_mu);
   *(int*)arg += (error != GRPC_ERROR_NONE);
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
-static void wait_for_fail_count(grpc_exec_ctx* exec_ctx, int* fail_count,
-                                int want_fail_count) {
-  grpc_exec_ctx_flush(exec_ctx);
+static void wait_for_fail_count(int* fail_count, int want_fail_count) {
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_mu_lock(g_mu);
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
-  while (grpc_exec_ctx_now(exec_ctx) < deadline &&
+  while (grpc_core::ExecCtx::Get()->Now() < deadline &&
          *fail_count < want_fail_count) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
   GPR_ASSERT(*fail_count == want_fail_count);
@@ -291,33 +277,32 @@
   grpc_slice_buffer slice_buffer;
   grpc_slice_buffer_init(&slice_buffer);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
-  grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
+  grpc_core::ExecCtx exec_ctx;
+  grpc_endpoint_add_to_pollset(f.client_ep, g_pollset);
+  grpc_endpoint_read(f.client_ep, &slice_buffer,
                      GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                          grpc_schedule_on_exec_ctx));
-  wait_for_fail_count(&exec_ctx, &fail_count, 0);
-  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
+  wait_for_fail_count(&fail_count, 0);
+  grpc_endpoint_shutdown(f.client_ep,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
-  wait_for_fail_count(&exec_ctx, &fail_count, 1);
-  grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
+  wait_for_fail_count(&fail_count, 1);
+  grpc_endpoint_read(f.client_ep, &slice_buffer,
                      GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                          grpc_schedule_on_exec_ctx));
-  wait_for_fail_count(&exec_ctx, &fail_count, 2);
+  wait_for_fail_count(&fail_count, 2);
   grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
-  grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
+  grpc_endpoint_write(f.client_ep, &slice_buffer,
                       GRPC_CLOSURE_CREATE(inc_on_failure, &fail_count,
                                           grpc_schedule_on_exec_ctx));
-  wait_for_fail_count(&exec_ctx, &fail_count, 3);
-  grpc_endpoint_shutdown(&exec_ctx, f.client_ep,
+  wait_for_fail_count(&fail_count, 3);
+  grpc_endpoint_shutdown(f.client_ep,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Test Shutdown"));
-  wait_for_fail_count(&exec_ctx, &fail_count, 3);
+  wait_for_fail_count(&fail_count, 3);
 
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &slice_buffer);
+  grpc_slice_buffer_destroy_internal(&slice_buffer);
 
-  grpc_endpoint_destroy(&exec_ctx, f.client_ep);
-  grpc_endpoint_destroy(&exec_ctx, f.server_ep);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_endpoint_destroy(f.client_ep);
+  grpc_endpoint_destroy(f.server_ep);
 }
 
 void grpc_endpoint_tests(grpc_endpoint_test_config config,
diff --git a/test/core/iomgr/ev_epollsig_linux_test.cc b/test/core/iomgr/ev_epollsig_linux_test.cc
index ac8b2f4..e767e01 100644
--- a/test/core/iomgr/ev_epollsig_linux_test.cc
+++ b/test/core/iomgr/ev_epollsig_linux_test.cc
@@ -70,19 +70,18 @@
   }
 }
 
-static void test_fd_cleanup(grpc_exec_ctx* exec_ctx, test_fd* tfds,
-                            int num_fds) {
+static void test_fd_cleanup(test_fd* tfds, int num_fds) {
   int release_fd;
   int i;
 
   for (i = 0; i < num_fds; i++) {
-    grpc_fd_shutdown(exec_ctx, tfds[i].fd,
+    grpc_fd_shutdown(tfds[i].fd,
                      GRPC_ERROR_CREATE_FROM_STATIC_STRING("test_fd_cleanup"));
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
 
-    grpc_fd_orphan(exec_ctx, tfds[i].fd, nullptr, &release_fd,
-                   false /* already_closed */, "test_fd_cleanup");
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_fd_orphan(tfds[i].fd, nullptr, &release_fd, false /* already_closed */,
+                   "test_fd_cleanup");
+    grpc_core::ExecCtx::Get()->Flush();
 
     GPR_ASSERT(release_fd == tfds[i].inner_fd);
     close(tfds[i].inner_fd);
@@ -98,22 +97,20 @@
   }
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, (grpc_pollset*)p);
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy((grpc_pollset*)p);
 }
 
-static void test_pollset_cleanup(grpc_exec_ctx* exec_ctx,
-                                 test_pollset* pollsets, int num_pollsets) {
+static void test_pollset_cleanup(test_pollset* pollsets, int num_pollsets) {
   grpc_closure destroyed;
   int i;
 
   for (i = 0; i < num_pollsets; i++) {
     GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, pollsets[i].pollset,
                       grpc_schedule_on_exec_ctx);
-    grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);
+    grpc_pollset_shutdown(pollsets[i].pollset, &destroyed);
 
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_free(pollsets[i].pollset);
   }
 }
@@ -133,7 +130,7 @@
 #define NUM_POLLSETS 4
 
 static void test_add_fd_to_pollset() {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   test_fd tfds[NUM_FDS];
   int fds[NUM_FDS];
   test_pollset pollsets[NUM_POLLSETS];
@@ -170,33 +167,33 @@
 
   /* == Step 1 == */
   for (i = 0; i <= 2; i++) {
-    grpc_pollset_add_fd(&exec_ctx, pollsets[0].pollset, tfds[i].fd);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_pollset_add_fd(pollsets[0].pollset, tfds[i].fd);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   for (i = 3; i <= 4; i++) {
-    grpc_pollset_add_fd(&exec_ctx, pollsets[1].pollset, tfds[i].fd);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_pollset_add_fd(pollsets[1].pollset, tfds[i].fd);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   for (i = 5; i <= 7; i++) {
-    grpc_pollset_add_fd(&exec_ctx, pollsets[2].pollset, tfds[i].fd);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_pollset_add_fd(pollsets[2].pollset, tfds[i].fd);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   /* == Step 2 == */
   for (i = 0; i <= 1; i++) {
-    grpc_pollset_add_fd(&exec_ctx, pollsets[3].pollset, tfds[i].fd);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_pollset_add_fd(pollsets[3].pollset, tfds[i].fd);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   /* == Step 3 == */
-  grpc_pollset_add_fd(&exec_ctx, pollsets[1].pollset, tfds[0].fd);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_pollset_add_fd(pollsets[1].pollset, tfds[0].fd);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* == Step 4 == */
-  grpc_pollset_add_fd(&exec_ctx, pollsets[2].pollset, tfds[3].fd);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_pollset_add_fd(pollsets[2].pollset, tfds[3].fd);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* All polling islands are merged at this point */
 
@@ -213,9 +210,8 @@
         expected_pi, grpc_pollset_get_polling_island(pollsets[i].pollset)));
   }
 
-  test_fd_cleanup(&exec_ctx, tfds, NUM_FDS);
-  test_pollset_cleanup(&exec_ctx, pollsets, NUM_POLLSETS);
-  grpc_exec_ctx_finish(&exec_ctx);
+  test_fd_cleanup(tfds, NUM_FDS);
+  test_pollset_cleanup(pollsets, NUM_POLLSETS);
 }
 
 #undef NUM_FDS
@@ -235,26 +231,24 @@
 static void test_threading_loop(void* arg) {
   threading_shared* shared = static_cast<threading_shared*>(arg);
   while (thread_wakeups < 1000000) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_pollset_worker* worker;
     gpr_mu_lock(shared->mu);
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work", grpc_pollset_work(&exec_ctx, shared->pollset, &worker,
-                                          GRPC_MILLIS_INF_FUTURE)));
+        "pollset_work",
+        grpc_pollset_work(shared->pollset, &worker, GRPC_MILLIS_INF_FUTURE)));
     gpr_mu_unlock(shared->mu);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 }
 
-static void test_threading_wakeup(grpc_exec_ctx* exec_ctx, void* arg,
-                                  grpc_error* error) {
+static void test_threading_wakeup(void* arg, grpc_error* error) {
   threading_shared* shared = static_cast<threading_shared*>(arg);
   ++shared->wakeups;
   ++thread_wakeups;
   if (error == GRPC_ERROR_NONE) {
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
         "consume_wakeup", grpc_wakeup_fd_consume_wakeup(shared->wakeup_fd)));
-    grpc_fd_notify_on_read(exec_ctx, shared->wakeup_desc, &shared->on_wakeup);
+    grpc_fd_notify_on_read(shared->wakeup_desc, &shared->on_wakeup);
     GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_next",
                                  grpc_wakeup_fd_wakeup(shared->wakeup_fd)));
   }
@@ -269,7 +263,7 @@
   for (size_t i = 0; i < GPR_ARRAY_SIZE(thds); i++) {
     gpr_thd_options opt = gpr_thd_options_default();
     gpr_thd_options_set_joinable(&opt);
-    gpr_thd_new(&thds[i], test_threading_loop, &shared, &opt);
+    gpr_thd_new(&thds[i], "test_thread", test_threading_loop, &shared, &opt);
   }
   grpc_wakeup_fd fd;
   GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&fd)));
@@ -277,13 +271,12 @@
   shared.wakeup_desc = grpc_fd_create(fd.read_fd, "wakeup");
   shared.wakeups = 0;
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_pollset_add_fd(&exec_ctx, shared.pollset, shared.wakeup_desc);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_pollset_add_fd(shared.pollset, shared.wakeup_desc);
     grpc_fd_notify_on_read(
-        &exec_ctx, shared.wakeup_desc,
+        shared.wakeup_desc,
         GRPC_CLOSURE_INIT(&shared.on_wakeup, test_threading_wakeup, &shared,
                           grpc_schedule_on_exec_ctx));
-    grpc_exec_ctx_finish(&exec_ctx);
   }
   GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_first",
                                grpc_wakeup_fd_wakeup(shared.wakeup_fd)));
@@ -293,14 +286,13 @@
   fd.read_fd = 0;
   grpc_wakeup_fd_destroy(&fd);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_fd_shutdown(&exec_ctx, shared.wakeup_desc, GRPC_ERROR_CANCELLED);
-    grpc_fd_orphan(&exec_ctx, shared.wakeup_desc, nullptr, nullptr,
+    grpc_core::ExecCtx exec_ctx;
+    grpc_fd_shutdown(shared.wakeup_desc, GRPC_ERROR_CANCELLED);
+    grpc_fd_orphan(shared.wakeup_desc, nullptr, nullptr,
                    false /* already_closed */, "done");
-    grpc_pollset_shutdown(&exec_ctx, shared.pollset,
+    grpc_pollset_shutdown(shared.pollset,
                           GRPC_CLOSURE_CREATE(destroy_pollset, shared.pollset,
                                               grpc_schedule_on_exec_ctx));
-    grpc_exec_ctx_finish(&exec_ctx);
   }
   gpr_free(shared.pollset);
 }
@@ -309,20 +301,21 @@
   const char* poll_strategy = nullptr;
   grpc_test_init(argc, argv);
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  {
+    grpc_core::ExecCtx exec_ctx;
 
-  poll_strategy = grpc_get_poll_strategy_name();
-  if (poll_strategy != nullptr && strcmp(poll_strategy, "epollsig") == 0) {
-    test_add_fd_to_pollset();
-    test_threading();
-  } else {
-    gpr_log(GPR_INFO,
-            "Skipping the test. The test is only relevant for 'epollsig' "
-            "strategy. and the current strategy is: '%s'",
-            poll_strategy);
+    poll_strategy = grpc_get_poll_strategy_name();
+    if (poll_strategy != nullptr && strcmp(poll_strategy, "epollsig") == 0) {
+      test_add_fd_to_pollset();
+      test_threading();
+    } else {
+      gpr_log(GPR_INFO,
+              "Skipping the test. The test is only relevant for 'epollsig' "
+              "strategy. and the current strategy is: '%s'",
+              poll_strategy);
+    }
   }
 
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/fd_conservation_posix_test.cc b/test/core/iomgr/fd_conservation_posix_test.cc
index f46430c..aaa1401 100644
--- a/test/core/iomgr/fd_conservation_posix_test.cc
+++ b/test/core/iomgr/fd_conservation_posix_test.cc
@@ -31,26 +31,27 @@
 
   grpc_test_init(argc, argv);
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  {
+    grpc_core::ExecCtx exec_ctx;
 
-  /* set max # of file descriptors to a low value, and
-     verify we can create and destroy many more than this number
-     of descriptors */
-  rlim.rlim_cur = rlim.rlim_max = 10;
-  GPR_ASSERT(0 == setrlimit(RLIMIT_NOFILE, &rlim));
-  grpc_resource_quota* resource_quota =
-      grpc_resource_quota_create("fd_conservation_posix_test");
+    /* set max # of file descriptors to a low value, and
+       verify we can create and destroy many more than this number
+       of descriptors */
+    rlim.rlim_cur = rlim.rlim_max = 10;
+    GPR_ASSERT(0 == setrlimit(RLIMIT_NOFILE, &rlim));
+    grpc_resource_quota* resource_quota =
+        grpc_resource_quota_create("fd_conservation_posix_test");
 
-  for (i = 0; i < 100; i++) {
-    p = grpc_iomgr_create_endpoint_pair("test", nullptr);
-    grpc_endpoint_destroy(&exec_ctx, p.client);
-    grpc_endpoint_destroy(&exec_ctx, p.server);
-    grpc_exec_ctx_flush(&exec_ctx);
+    for (i = 0; i < 100; i++) {
+      p = grpc_iomgr_create_endpoint_pair("test", NULL);
+      grpc_endpoint_destroy(p.client);
+      grpc_endpoint_destroy(p.server);
+      grpc_core::ExecCtx::Get()->Flush();
+    }
+
+    grpc_resource_quota_unref(resource_quota);
   }
 
-  grpc_resource_quota_unref(resource_quota);
-
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/fd_posix_test.cc b/test/core/iomgr/fd_posix_test.cc
index a03d841..cf75517 100644
--- a/test/core/iomgr/fd_posix_test.cc
+++ b/test/core/iomgr/fd_posix_test.cc
@@ -111,20 +111,19 @@
 
 /* Called when an upload session can be safely shutdown.
    Close session FD and start to shutdown listen FD. */
-static void session_shutdown_cb(grpc_exec_ctx* exec_ctx, void* arg, /*session */
+static void session_shutdown_cb(void* arg, /*session */
                                 bool success) {
   session* se = static_cast<session*>(arg);
   server* sv = se->sv;
-  grpc_fd_orphan(exec_ctx, se->em_fd, nullptr, nullptr,
-                 false /* already_closed */, "a");
+  grpc_fd_orphan(se->em_fd, nullptr, nullptr, false /* already_closed */, "a");
   gpr_free(se);
   /* Start to shutdown listen fd. */
-  grpc_fd_shutdown(exec_ctx, sv->em_fd,
+  grpc_fd_shutdown(sv->em_fd,
                    GRPC_ERROR_CREATE_FROM_STATIC_STRING("session_shutdown_cb"));
 }
 
 /* Called when data become readable in a session. */
-static void session_read_cb(grpc_exec_ctx* exec_ctx, void* arg, /*session */
+static void session_read_cb(void* arg, /*session */
                             grpc_error* error) {
   session* se = static_cast<session*>(arg);
   int fd = grpc_fd_wrapped_fd(se->em_fd);
@@ -133,7 +132,7 @@
   ssize_t read_total = 0;
 
   if (error != GRPC_ERROR_NONE) {
-    session_shutdown_cb(exec_ctx, arg, 1);
+    session_shutdown_cb(arg, 1);
     return;
   }
 
@@ -148,7 +147,7 @@
      It is possible to read nothing due to spurious edge event or data has
      been drained, In such a case, read() returns -1 and set errno to EAGAIN. */
   if (read_once == 0) {
-    session_shutdown_cb(exec_ctx, arg, 1);
+    session_shutdown_cb(arg, 1);
   } else if (read_once == -1) {
     if (errno == EAGAIN) {
       /* An edge triggered event is cached in the kernel until next poll.
@@ -159,7 +158,7 @@
          TODO(chenw): in multi-threaded version, callback and polling can be
          run in different threads. polling may catch a persist read edge event
          before notify_on_read is called.  */
-      grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
+      grpc_fd_notify_on_read(se->em_fd, &se->session_read_closure);
     } else {
       gpr_log(GPR_ERROR, "Unhandled read error %s", strerror(errno));
       abort();
@@ -169,22 +168,20 @@
 
 /* Called when the listen FD can be safely shutdown.
    Close listen FD and signal that server can be shutdown. */
-static void listen_shutdown_cb(grpc_exec_ctx* exec_ctx, void* arg /*server */,
-                               int success) {
+static void listen_shutdown_cb(void* arg /*server */, int success) {
   server* sv = static_cast<server*>(arg);
 
-  grpc_fd_orphan(exec_ctx, sv->em_fd, nullptr, nullptr,
-                 false /* already_closed */, "b");
+  grpc_fd_orphan(sv->em_fd, nullptr, nullptr, false /* already_closed */, "b");
 
   gpr_mu_lock(g_mu);
   sv->done = 1;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
 /* Called when a new TCP connection request arrives in the listening port. */
-static void listen_cb(grpc_exec_ctx* exec_ctx, void* arg, /*=sv_arg*/
+static void listen_cb(void* arg, /*=sv_arg*/
                       grpc_error* error) {
   server* sv = static_cast<server*>(arg);
   int fd;
@@ -195,7 +192,7 @@
   grpc_fd* listen_em_fd = sv->em_fd;
 
   if (error != GRPC_ERROR_NONE) {
-    listen_shutdown_cb(exec_ctx, arg, 1);
+    listen_shutdown_cb(arg, 1);
     return;
   }
 
@@ -207,12 +204,12 @@
   se = static_cast<session*>(gpr_malloc(sizeof(*se)));
   se->sv = sv;
   se->em_fd = grpc_fd_create(fd, "listener");
-  grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
+  grpc_pollset_add_fd(g_pollset, se->em_fd);
   GRPC_CLOSURE_INIT(&se->session_read_closure, session_read_cb, se,
                     grpc_schedule_on_exec_ctx);
-  grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
+  grpc_fd_notify_on_read(se->em_fd, &se->session_read_closure);
 
-  grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure);
+  grpc_fd_notify_on_read(listen_em_fd, &sv->listen_closure);
 }
 
 /* Max number of connections pending to be accepted by listen(). */
@@ -222,7 +219,7 @@
    listen_cb() is registered to be interested in reading from listen_fd.
    When connection request arrives, listen_cb() is called to accept the
    connection request. */
-static int server_start(grpc_exec_ctx* exec_ctx, server* sv) {
+static int server_start(server* sv) {
   int port = 0;
   int fd;
   struct sockaddr_in sin;
@@ -236,11 +233,11 @@
   GPR_ASSERT(listen(fd, MAX_NUM_FD) == 0);
 
   sv->em_fd = grpc_fd_create(fd, "server");
-  grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
+  grpc_pollset_add_fd(g_pollset, sv->em_fd);
   /* Register to be interested in reading from listen_fd. */
   GRPC_CLOSURE_INIT(&sv->listen_closure, listen_cb, sv,
                     grpc_schedule_on_exec_ctx);
-  grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure);
+  grpc_fd_notify_on_read(sv->em_fd, &sv->listen_closure);
 
   return port;
 }
@@ -249,13 +246,13 @@
 static void server_wait_and_shutdown(server* sv) {
   gpr_mu_lock(g_mu);
   while (!sv->done) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
-                                          GRPC_MILLIS_INF_FUTURE)));
+        "pollset_work",
+        grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
@@ -289,18 +286,16 @@
 }
 
 /* Called when a client upload session is ready to shutdown. */
-static void client_session_shutdown_cb(grpc_exec_ctx* exec_ctx,
-                                       void* arg /*client */, int success) {
+static void client_session_shutdown_cb(void* arg /*client */, int success) {
   client* cl = static_cast<client*>(arg);
-  grpc_fd_orphan(exec_ctx, cl->em_fd, nullptr, nullptr,
-                 false /* already_closed */, "c");
+  grpc_fd_orphan(cl->em_fd, nullptr, nullptr, false /* already_closed */, "c");
   cl->done = 1;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
 }
 
 /* Write as much as possible, then register notify_on_write. */
-static void client_session_write(grpc_exec_ctx* exec_ctx, void* arg, /*client */
+static void client_session_write(void* arg, /*client */
                                  grpc_error* error) {
   client* cl = static_cast<client*>(arg);
   int fd = grpc_fd_wrapped_fd(cl->em_fd);
@@ -308,7 +303,7 @@
 
   if (error != GRPC_ERROR_NONE) {
     gpr_mu_lock(g_mu);
-    client_session_shutdown_cb(exec_ctx, arg, 1);
+    client_session_shutdown_cb(arg, 1);
     gpr_mu_unlock(g_mu);
     return;
   }
@@ -323,10 +318,10 @@
     if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
       GRPC_CLOSURE_INIT(&cl->write_closure, client_session_write, cl,
                         grpc_schedule_on_exec_ctx);
-      grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure);
+      grpc_fd_notify_on_write(cl->em_fd, &cl->write_closure);
       cl->client_write_cnt++;
     } else {
-      client_session_shutdown_cb(exec_ctx, arg, 1);
+      client_session_shutdown_cb(arg, 1);
     }
     gpr_mu_unlock(g_mu);
   } else {
@@ -336,7 +331,7 @@
 }
 
 /* Start a client to send a stream of bytes. */
-static void client_start(grpc_exec_ctx* exec_ctx, client* cl, int port) {
+static void client_start(client* cl, int port) {
   int fd;
   struct sockaddr_in sin;
   create_test_socket(port, &fd, &sin);
@@ -357,9 +352,9 @@
   }
 
   cl->em_fd = grpc_fd_create(fd, "client");
-  grpc_pollset_add_fd(exec_ctx, g_pollset, cl->em_fd);
+  grpc_pollset_add_fd(g_pollset, cl->em_fd);
 
-  client_session_write(exec_ctx, cl, GRPC_ERROR_NONE);
+  client_session_write(cl, GRPC_ERROR_NONE);
 }
 
 /* Wait for the signal to shutdown a client. */
@@ -367,12 +362,12 @@
   gpr_mu_lock(g_mu);
   while (!cl->done) {
     grpc_pollset_worker* worker = nullptr;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
-                                          GRPC_MILLIS_INF_FUTURE)));
+        "pollset_work",
+        grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
@@ -385,13 +380,13 @@
   server sv;
   client cl;
   int port;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   server_init(&sv);
-  port = server_start(&exec_ctx, &sv);
+  port = server_start(&sv);
   client_init(&cl);
-  client_start(&exec_ctx, &cl, port);
-  grpc_exec_ctx_finish(&exec_ctx);
+  client_start(&cl, port);
+
   client_wait_and_shutdown(&cl);
   server_wait_and_shutdown(&sv);
   GPR_ASSERT(sv.read_bytes_total == cl.write_bytes_total);
@@ -406,27 +401,25 @@
 
 void destroy_change_data(fd_change_data* fdc) {}
 
-static void first_read_callback(grpc_exec_ctx* exec_ctx,
-                                void* arg /* fd_change_data */,
+static void first_read_callback(void* arg /* fd_change_data */,
                                 grpc_error* error) {
   fd_change_data* fdc = static_cast<fd_change_data*>(arg);
 
   gpr_mu_lock(g_mu);
   fdc->cb_that_ran = first_read_callback;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
-static void second_read_callback(grpc_exec_ctx* exec_ctx,
-                                 void* arg /* fd_change_data */,
+static void second_read_callback(void* arg /* fd_change_data */,
                                  grpc_error* error) {
   fd_change_data* fdc = static_cast<fd_change_data*>(arg);
 
   gpr_mu_lock(g_mu);
   fdc->cb_that_ran = second_read_callback;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
@@ -443,7 +436,7 @@
   ssize_t result;
   grpc_closure first_closure;
   grpc_closure second_closure;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_CLOSURE_INIT(&first_closure, first_read_callback, &a,
                     grpc_schedule_on_exec_ctx);
@@ -460,10 +453,10 @@
   GPR_ASSERT(fcntl(sv[1], F_SETFL, flags | O_NONBLOCK) == 0);
 
   em_fd = grpc_fd_create(sv[0], "test_grpc_fd_change");
-  grpc_pollset_add_fd(&exec_ctx, g_pollset, em_fd);
+  grpc_pollset_add_fd(g_pollset, em_fd);
 
   /* Register the first callback, then make its FD readable */
-  grpc_fd_notify_on_read(&exec_ctx, em_fd, &first_closure);
+  grpc_fd_notify_on_read(em_fd, &first_closure);
   data = 0;
   result = write(sv[1], &data, 1);
   GPR_ASSERT(result == 1);
@@ -473,10 +466,10 @@
   while (a.cb_that_ran == nullptr) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
-                                          GRPC_MILLIS_INF_FUTURE)));
+        "pollset_work",
+        grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   GPR_ASSERT(a.cb_that_ran == first_read_callback);
@@ -488,7 +481,7 @@
 
   /* Now register a second callback with distinct change data, and do the same
      thing again. */
-  grpc_fd_notify_on_read(&exec_ctx, em_fd, &second_closure);
+  grpc_fd_notify_on_read(em_fd, &second_closure);
   data = 0;
   result = write(sv[1], &data, 1);
   GPR_ASSERT(result == 1);
@@ -497,44 +490,43 @@
   while (b.cb_that_ran == nullptr) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
-                                          GRPC_MILLIS_INF_FUTURE)));
+        "pollset_work",
+        grpc_pollset_work(g_pollset, &worker, GRPC_MILLIS_INF_FUTURE)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   /* Except now we verify that second_read_callback ran instead */
   GPR_ASSERT(b.cb_that_ran == second_read_callback);
   gpr_mu_unlock(g_mu);
 
-  grpc_fd_orphan(&exec_ctx, em_fd, nullptr, nullptr, false /* already_closed */,
-                 "d");
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_fd_orphan(em_fd, nullptr, nullptr, false /* already_closed */, "d");
+
   destroy_change_data(&a);
   destroy_change_data(&b);
   close(sv[1]);
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
   grpc_init();
-  g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
-  grpc_pollset_init(g_pollset, &g_mu);
-  test_grpc_fd();
-  test_grpc_fd_change();
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_flush(&exec_ctx);
-  gpr_free(g_pollset);
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+    grpc_pollset_init(g_pollset, &g_mu);
+    test_grpc_fd();
+    test_grpc_fd_change();
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(g_pollset, &destroyed);
+    grpc_core::ExecCtx::Get()->Flush();
+    gpr_free(g_pollset);
+  }
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/load_file_test.cc b/test/core/iomgr/load_file_test.cc
index 9f360ba..797d0ef 100644
--- a/test/core/iomgr/load_file_test.cc
+++ b/test/core/iomgr/load_file_test.cc
@@ -19,6 +19,7 @@
 #include <stdio.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/slice.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -152,9 +153,11 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_load_empty_file();
   test_load_failure();
   test_load_small_file();
   test_load_big_file();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/pollset_set_test.cc b/test/core/iomgr/pollset_set_test.cc
index 719eab9..f270791 100644
--- a/test/core/iomgr/pollset_set_test.cc
+++ b/test/core/iomgr/pollset_set_test.cc
@@ -47,11 +47,10 @@
   }
 }
 
-void cleanup_test_pollset_sets(grpc_exec_ctx* exec_ctx,
-                               test_pollset_set* pollset_sets,
+void cleanup_test_pollset_sets(test_pollset_set* pollset_sets,
                                const int num_pss) {
   for (int i = 0; i < num_pss; i++) {
-    grpc_pollset_set_destroy(exec_ctx, pollset_sets[i].pss);
+    grpc_pollset_set_destroy(pollset_sets[i].pss);
     pollset_sets[i].pss = nullptr;
   }
 }
@@ -73,21 +72,19 @@
   }
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
-static void cleanup_test_pollsets(grpc_exec_ctx* exec_ctx,
-                                  test_pollset* pollsets,
+static void cleanup_test_pollsets(test_pollset* pollsets,
                                   const int num_pollsets) {
   grpc_closure destroyed;
   for (int i = 0; i < num_pollsets; i++) {
     GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, pollsets[i].ps,
                       grpc_schedule_on_exec_ctx);
-    grpc_pollset_shutdown(exec_ctx, pollsets[i].ps, &destroyed);
+    grpc_pollset_shutdown(pollsets[i].ps, &destroyed);
 
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_free(pollsets[i].ps);
     pollsets[i].ps = nullptr;
   }
@@ -105,45 +102,43 @@
   grpc_closure on_readable;   /* Closure to call when this fd is readable */
 } test_fd;
 
-void on_readable(grpc_exec_ctx* exec_ctx, void* tfd, grpc_error* error) {
+void on_readable(void* tfd, grpc_error* error) {
   ((test_fd*)tfd)->is_on_readable_called = true;
 }
 
-static void reset_test_fd(grpc_exec_ctx* exec_ctx, test_fd* tfd) {
+static void reset_test_fd(test_fd* tfd) {
   tfd->is_on_readable_called = false;
 
   GRPC_CLOSURE_INIT(&tfd->on_readable, on_readable, tfd,
                     grpc_schedule_on_exec_ctx);
-  grpc_fd_notify_on_read(exec_ctx, tfd->fd, &tfd->on_readable);
+  grpc_fd_notify_on_read(tfd->fd, &tfd->on_readable);
 }
 
-static void init_test_fds(grpc_exec_ctx* exec_ctx, test_fd* tfds,
-                          const int num_fds) {
+static void init_test_fds(test_fd* tfds, const int num_fds) {
   for (int i = 0; i < num_fds; i++) {
     GPR_ASSERT(GRPC_ERROR_NONE == grpc_wakeup_fd_init(&tfds[i].wakeup_fd));
     tfds[i].fd = grpc_fd_create(GRPC_WAKEUP_FD_GET_READ_FD(&tfds[i].wakeup_fd),
                                 "test_fd");
-    reset_test_fd(exec_ctx, &tfds[i]);
+    reset_test_fd(&tfds[i]);
   }
 }
 
-static void cleanup_test_fds(grpc_exec_ctx* exec_ctx, test_fd* tfds,
-                             const int num_fds) {
+static void cleanup_test_fds(test_fd* tfds, const int num_fds) {
   int release_fd;
 
   for (int i = 0; i < num_fds; i++) {
-    grpc_fd_shutdown(exec_ctx, tfds[i].fd,
+    grpc_fd_shutdown(tfds[i].fd,
                      GRPC_ERROR_CREATE_FROM_STATIC_STRING("fd cleanup"));
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
 
     /* grpc_fd_orphan frees the memory allocated for grpc_fd. Normally it also
      * calls close() on the underlying fd. In our case, we are using
      * grpc_wakeup_fd and we would like to destroy it ourselves (by calling
      * grpc_wakeup_fd_destroy). To prevent grpc_fd from calling close() on the
      * underlying fd, call it with a non-NULL 'release_fd' parameter */
-    grpc_fd_orphan(exec_ctx, tfds[i].fd, nullptr, &release_fd,
-                   false /* already_closed */, "test_fd_cleanup");
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_fd_orphan(tfds[i].fd, nullptr, &release_fd, false /* already_closed */,
+                   "test_fd_cleanup");
+    grpc_core::ExecCtx::Get()->Flush();
 
     grpc_wakeup_fd_destroy(&tfds[i].wakeup_fd);
   }
@@ -155,8 +150,7 @@
   }
 }
 
-static void verify_readable_and_reset(grpc_exec_ctx* exec_ctx, test_fd* tfds,
-                                      const int num_fds) {
+static void verify_readable_and_reset(test_fd* tfds, const int num_fds) {
   for (int i = 0; i < num_fds; i++) {
     /* Verify that the on_readable callback was called */
     GPR_ASSERT(tfds[i].is_on_readable_called);
@@ -164,7 +158,7 @@
     /* Reset the tfd[i] structure */
     GPR_ASSERT(GRPC_ERROR_NONE ==
                grpc_wakeup_fd_consume_wakeup(&tfds[i].wakeup_fd));
-    reset_test_fd(exec_ctx, &tfds[i]);
+    reset_test_fd(&tfds[i]);
   }
 }
 
@@ -205,7 +199,7 @@
    *                    |
    *                    +---> FD9 (Added after PS2 is added to PSS0)
    */
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_pollset_worker* worker;
   grpc_millis deadline;
 
@@ -216,34 +210,33 @@
   const int num_ps = GPR_ARRAY_SIZE(pollsets);
   const int num_pss = GPR_ARRAY_SIZE(pollset_sets);
 
-  init_test_fds(&exec_ctx, tfds, num_fds);
+  init_test_fds(tfds, num_fds);
   init_test_pollsets(pollsets, num_ps);
   init_test_pollset_sets(pollset_sets, num_pss);
 
   /* Construct the pollset_set/pollset/fd tree (see diagram above) */
 
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
+  grpc_pollset_set_add_fd(pollset_sets[0].pss, tfds[0].fd);
+  grpc_pollset_set_add_fd(pollset_sets[1].pss, tfds[1].fd);
 
-  grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[2].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[3].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[4].fd);
+  grpc_pollset_add_fd(pollsets[0].ps, tfds[2].fd);
+  grpc_pollset_add_fd(pollsets[1].ps, tfds[3].fd);
+  grpc_pollset_add_fd(pollsets[2].ps, tfds[4].fd);
 
-  grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss,
-                                   pollset_sets[1].pss);
+  grpc_pollset_set_add_pollset_set(pollset_sets[0].pss, pollset_sets[1].pss);
 
-  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps);
-  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps);
-  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps);
+  grpc_pollset_set_add_pollset(pollset_sets[1].pss, pollsets[0].ps);
+  grpc_pollset_set_add_pollset(pollset_sets[0].pss, pollsets[1].ps);
+  grpc_pollset_set_add_pollset(pollset_sets[0].pss, pollsets[2].ps);
 
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd);
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd);
+  grpc_pollset_set_add_fd(pollset_sets[0].pss, tfds[5].fd);
+  grpc_pollset_set_add_fd(pollset_sets[1].pss, tfds[6].fd);
 
-  grpc_pollset_add_fd(&exec_ctx, pollsets[0].ps, tfds[7].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[8].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollsets[2].ps, tfds[9].fd);
+  grpc_pollset_add_fd(pollsets[0].ps, tfds[7].fd);
+  grpc_pollset_add_fd(pollsets[1].ps, tfds[8].fd);
+  grpc_pollset_add_fd(pollsets[2].ps, tfds[9].fd);
 
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Test that if any FD in the above structure is readable, it is observable by
    * doing grpc_pollset_work on any pollset
@@ -263,34 +256,32 @@
     deadline = grpc_timespec_to_millis_round_up(
         grpc_timeout_milliseconds_to_deadline(2));
     GPR_ASSERT(GRPC_ERROR_NONE ==
-               grpc_pollset_work(&exec_ctx, pollsets[i].ps, &worker, deadline));
+               grpc_pollset_work(pollsets[i].ps, &worker, deadline));
     gpr_mu_unlock(pollsets[i].mu);
 
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
 
-    verify_readable_and_reset(&exec_ctx, tfds, num_fds);
-    grpc_exec_ctx_flush(&exec_ctx);
+    verify_readable_and_reset(tfds, num_fds);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
   /* Test tear down */
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[5].fd);
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[6].fd);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_pollset_set_del_fd(pollset_sets[0].pss, tfds[0].fd);
+  grpc_pollset_set_del_fd(pollset_sets[0].pss, tfds[5].fd);
+  grpc_pollset_set_del_fd(pollset_sets[1].pss, tfds[1].fd);
+  grpc_pollset_set_del_fd(pollset_sets[1].pss, tfds[6].fd);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollsets[0].ps);
-  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[1].ps);
-  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[0].pss, pollsets[2].ps);
+  grpc_pollset_set_del_pollset(pollset_sets[1].pss, pollsets[0].ps);
+  grpc_pollset_set_del_pollset(pollset_sets[0].pss, pollsets[1].ps);
+  grpc_pollset_set_del_pollset(pollset_sets[0].pss, pollsets[2].ps);
 
-  grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss,
-                                   pollset_sets[1].pss);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_pollset_set_del_pollset_set(pollset_sets[0].pss, pollset_sets[1].pss);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  cleanup_test_fds(&exec_ctx, tfds, num_fds);
-  cleanup_test_pollsets(&exec_ctx, pollsets, num_ps);
-  cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss);
-  grpc_exec_ctx_finish(&exec_ctx);
+  cleanup_test_fds(tfds, num_fds);
+  cleanup_test_pollsets(pollsets, num_ps);
+  cleanup_test_pollset_sets(pollset_sets, num_pss);
 }
 
 /* Same FD added multiple times to the pollset_set tree */
@@ -310,7 +301,7 @@
    *                    |           +--> FD2
    *                    +---> FD1
    */
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_pollset_worker* worker;
   grpc_millis deadline;
 
@@ -321,21 +312,20 @@
   const int num_ps = 1;
   const int num_pss = GPR_ARRAY_SIZE(pollset_sets);
 
-  init_test_fds(&exec_ctx, tfds, num_fds);
+  init_test_fds(tfds, num_fds);
   init_test_pollsets(&pollset, num_ps);
   init_test_pollset_sets(pollset_sets, num_pss);
 
   /* Construct the structure */
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[0].fd);
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
+  grpc_pollset_set_add_fd(pollset_sets[0].pss, tfds[0].fd);
+  grpc_pollset_set_add_fd(pollset_sets[1].pss, tfds[0].fd);
+  grpc_pollset_set_add_fd(pollset_sets[1].pss, tfds[1].fd);
 
-  grpc_pollset_add_fd(&exec_ctx, pollset.ps, tfds[1].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollset.ps, tfds[2].fd);
+  grpc_pollset_add_fd(pollset.ps, tfds[1].fd);
+  grpc_pollset_add_fd(pollset.ps, tfds[2].fd);
 
-  grpc_pollset_set_add_pollset(&exec_ctx, pollset_sets[1].pss, pollset.ps);
-  grpc_pollset_set_add_pollset_set(&exec_ctx, pollset_sets[0].pss,
-                                   pollset_sets[1].pss);
+  grpc_pollset_set_add_pollset(pollset_sets[1].pss, pollset.ps);
+  grpc_pollset_set_add_pollset_set(pollset_sets[0].pss, pollset_sets[1].pss);
 
   /* Test. Make all FDs readable and make sure that can be observed by doing a
    * grpc_pollset_work on the pollset 'PS' */
@@ -345,27 +335,25 @@
   deadline = grpc_timespec_to_millis_round_up(
       grpc_timeout_milliseconds_to_deadline(2));
   GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_pollset_work(&exec_ctx, pollset.ps, &worker, deadline));
+             grpc_pollset_work(pollset.ps, &worker, deadline));
   gpr_mu_unlock(pollset.mu);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  verify_readable_and_reset(&exec_ctx, tfds, num_fds);
-  grpc_exec_ctx_flush(&exec_ctx);
+  verify_readable_and_reset(tfds, num_fds);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Tear down */
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[0].pss, tfds[0].fd);
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[0].fd);
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_sets[1].pss, tfds[1].fd);
+  grpc_pollset_set_del_fd(pollset_sets[0].pss, tfds[0].fd);
+  grpc_pollset_set_del_fd(pollset_sets[1].pss, tfds[0].fd);
+  grpc_pollset_set_del_fd(pollset_sets[1].pss, tfds[1].fd);
 
-  grpc_pollset_set_del_pollset(&exec_ctx, pollset_sets[1].pss, pollset.ps);
-  grpc_pollset_set_del_pollset_set(&exec_ctx, pollset_sets[0].pss,
-                                   pollset_sets[1].pss);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_pollset_set_del_pollset(pollset_sets[1].pss, pollset.ps);
+  grpc_pollset_set_del_pollset_set(pollset_sets[0].pss, pollset_sets[1].pss);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  cleanup_test_fds(&exec_ctx, tfds, num_fds);
-  cleanup_test_pollsets(&exec_ctx, &pollset, num_ps);
-  cleanup_test_pollset_sets(&exec_ctx, pollset_sets, num_pss);
-  grpc_exec_ctx_finish(&exec_ctx);
+  cleanup_test_fds(tfds, num_fds);
+  cleanup_test_pollsets(&pollset, num_ps);
+  cleanup_test_pollset_sets(pollset_sets, num_pss);
 }
 
 /* Pollset_set with an empty pollset */
@@ -383,7 +371,7 @@
    *                   |
    *                   +---> FD2
    */
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_pollset_worker* worker;
   grpc_millis deadline;
 
@@ -394,17 +382,17 @@
   const int num_ps = GPR_ARRAY_SIZE(pollsets);
   const int num_pss = 1;
 
-  init_test_fds(&exec_ctx, tfds, num_fds);
+  init_test_fds(tfds, num_fds);
   init_test_pollsets(pollsets, num_ps);
   init_test_pollset_sets(&pollset_set, num_pss);
 
   /* Construct the structure */
-  grpc_pollset_set_add_fd(&exec_ctx, pollset_set.pss, tfds[0].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[1].fd);
-  grpc_pollset_add_fd(&exec_ctx, pollsets[1].ps, tfds[2].fd);
+  grpc_pollset_set_add_fd(pollset_set.pss, tfds[0].fd);
+  grpc_pollset_add_fd(pollsets[1].ps, tfds[1].fd);
+  grpc_pollset_add_fd(pollsets[1].ps, tfds[2].fd);
 
-  grpc_pollset_set_add_pollset(&exec_ctx, pollset_set.pss, pollsets[0].ps);
-  grpc_pollset_set_add_pollset(&exec_ctx, pollset_set.pss, pollsets[1].ps);
+  grpc_pollset_set_add_pollset(pollset_set.pss, pollsets[0].ps);
+  grpc_pollset_set_add_pollset(pollset_set.pss, pollsets[1].ps);
 
   /* Test. Make all FDs readable and make sure that can be observed by doing
    * grpc_pollset_work on the empty pollset 'PS0' */
@@ -414,45 +402,44 @@
   deadline = grpc_timespec_to_millis_round_up(
       grpc_timeout_milliseconds_to_deadline(2));
   GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_pollset_work(&exec_ctx, pollsets[0].ps, &worker, deadline));
+             grpc_pollset_work(pollsets[0].ps, &worker, deadline));
   gpr_mu_unlock(pollsets[0].mu);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  verify_readable_and_reset(&exec_ctx, tfds, num_fds);
-  grpc_exec_ctx_flush(&exec_ctx);
+  verify_readable_and_reset(tfds, num_fds);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Tear down */
-  grpc_pollset_set_del_fd(&exec_ctx, pollset_set.pss, tfds[0].fd);
-  grpc_pollset_set_del_pollset(&exec_ctx, pollset_set.pss, pollsets[0].ps);
-  grpc_pollset_set_del_pollset(&exec_ctx, pollset_set.pss, pollsets[1].ps);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_pollset_set_del_fd(pollset_set.pss, tfds[0].fd);
+  grpc_pollset_set_del_pollset(pollset_set.pss, pollsets[0].ps);
+  grpc_pollset_set_del_pollset(pollset_set.pss, pollsets[1].ps);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  cleanup_test_fds(&exec_ctx, tfds, num_fds);
-  cleanup_test_pollsets(&exec_ctx, pollsets, num_ps);
-  cleanup_test_pollset_sets(&exec_ctx, &pollset_set, num_pss);
-  grpc_exec_ctx_finish(&exec_ctx);
+  cleanup_test_fds(tfds, num_fds);
+  cleanup_test_pollsets(pollsets, num_ps);
+  cleanup_test_pollset_sets(&pollset_set, num_pss);
 }
 
 int main(int argc, char** argv) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
   grpc_init();
-  const char* poll_strategy = grpc_get_poll_strategy_name();
+  {
+    grpc_core::ExecCtx exec_ctx;
+    const char* poll_strategy = grpc_get_poll_strategy_name();
 
-  if (poll_strategy != nullptr &&
-      (strcmp(poll_strategy, "epollsig") == 0 ||
-       strcmp(poll_strategy, "epoll-threadpool") == 0)) {
-    pollset_set_test_basic();
-    pollset_set_test_dup_fds();
-    pollset_set_test_empty_pollset();
-  } else {
-    gpr_log(GPR_INFO,
-            "Skipping the test. The test is only relevant for 'epoll' "
-            "strategy. and the current strategy is: '%s'",
-            poll_strategy);
+    if (poll_strategy != nullptr &&
+        (strcmp(poll_strategy, "epollsig") == 0 ||
+         strcmp(poll_strategy, "epoll-threadpool") == 0)) {
+      pollset_set_test_basic();
+      pollset_set_test_dup_fds();
+      pollset_set_test_empty_pollset();
+    } else {
+      gpr_log(GPR_INFO,
+              "Skipping the test. The test is only relevant for 'epoll' "
+              "strategy. and the current strategy is: '%s'",
+              poll_strategy);
+    }
   }
-
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/resolve_address_posix_test.cc b/test/core/iomgr/resolve_address_posix_test.cc
index 1a5eb9a..e363153 100644
--- a/test/core/iomgr/resolve_address_posix_test.cc
+++ b/test/core/iomgr/resolve_address_posix_test.cc
@@ -46,29 +46,29 @@
   grpc_pollset_set* pollset_set;
 } args_struct;
 
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void do_nothing(void* arg, grpc_error* error) {}
 
-void args_init(grpc_exec_ctx* exec_ctx, args_struct* args) {
+void args_init(args_struct* args) {
   gpr_event_init(&args->ev);
   args->pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
   grpc_pollset_init(args->pollset, &args->mu);
   args->pollset_set = grpc_pollset_set_create();
-  grpc_pollset_set_add_pollset(exec_ctx, args->pollset_set, args->pollset);
+  grpc_pollset_set_add_pollset(args->pollset_set, args->pollset);
   args->addrs = nullptr;
 }
 
-void args_finish(grpc_exec_ctx* exec_ctx, args_struct* args) {
+void args_finish(args_struct* args) {
   GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline()));
   grpc_resolved_addresses_destroy(args->addrs);
-  grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
-  grpc_pollset_set_destroy(exec_ctx, args->pollset_set);
+  grpc_pollset_set_del_pollset(args->pollset_set, args->pollset);
+  grpc_pollset_set_destroy(args->pollset_set);
   grpc_closure do_nothing_cb;
   GRPC_CLOSURE_INIT(&do_nothing_cb, do_nothing, nullptr,
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
+  grpc_pollset_shutdown(args->pollset, &do_nothing_cb);
   // exec_ctx needs to be flushed before calling grpc_pollset_destroy()
-  grpc_exec_ctx_flush(exec_ctx);
-  grpc_pollset_destroy(exec_ctx, args->pollset);
+  grpc_core::ExecCtx::Get()->Flush();
+  grpc_pollset_destroy(args->pollset);
   gpr_free(args->pollset);
 }
 
@@ -79,36 +79,33 @@
 
 static void actually_poll(void* argsp) {
   args_struct* args = static_cast<args_struct*>(argsp);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_millis deadline = n_sec_deadline(10);
   while (true) {
+    grpc_core::ExecCtx exec_ctx;
     bool done = gpr_atm_acq_load(&args->done_atm) != 0;
     if (done) {
       break;
     }
-    grpc_millis time_left = deadline - grpc_exec_ctx_now(&exec_ctx);
+    grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
     gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
     GPR_ASSERT(time_left >= 0);
     grpc_pollset_worker* worker = nullptr;
     gpr_mu_lock(args->mu);
-    GRPC_LOG_IF_ERROR("pollset_work",
-                      grpc_pollset_work(&exec_ctx, args->pollset, &worker,
-                                        n_sec_deadline(1)));
+    GRPC_LOG_IF_ERROR("pollset_work", grpc_pollset_work(args->pollset, &worker,
+                                                        n_sec_deadline(1)));
     gpr_mu_unlock(args->mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   }
   gpr_event_set(&args->ev, (void*)1);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void poll_pollset_until_request_done(args_struct* args) {
   gpr_atm_rel_store(&args->done_atm, 0);
   gpr_thd_id id;
-  gpr_thd_new(&id, actually_poll, args, nullptr);
+  gpr_thd_new(&id, "grpc_poll_pollset", actually_poll, args, nullptr);
 }
 
-static void must_succeed(grpc_exec_ctx* exec_ctx, void* argsp,
-                         grpc_error* err) {
+static void must_succeed(void* argsp, grpc_error* err) {
   args_struct* args = static_cast<args_struct*>(argsp);
   GPR_ASSERT(err == GRPC_ERROR_NONE);
   GPR_ASSERT(args->addrs != nullptr);
@@ -116,29 +113,28 @@
   gpr_atm_rel_store(&args->done_atm, 1);
 }
 
-static void must_fail(grpc_exec_ctx* exec_ctx, void* argsp, grpc_error* err) {
+static void must_fail(void* argsp, grpc_error* err) {
   args_struct* args = static_cast<args_struct*>(argsp);
   GPR_ASSERT(err != GRPC_ERROR_NONE);
   gpr_atm_rel_store(&args->done_atm, 1);
 }
 
 static void test_unix_socket(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   poll_pollset_until_request_done(&args);
   grpc_resolve_address(
-      &exec_ctx, "unix:/path/name", nullptr, args.pollset_set,
+      "unix:/path/name", nullptr, args.pollset_set,
       GRPC_CLOSURE_CREATE(must_succeed, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 static void test_unix_socket_path_name_too_long(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   const char prefix[] = "unix:/path/name";
   size_t path_name_length =
       GPR_ARRAY_SIZE(((struct sockaddr_un*)nullptr)->sun_path) + 6;
@@ -150,22 +146,23 @@
 
   poll_pollset_until_request_done(&args);
   grpc_resolve_address(
-      &exec_ctx, path_name, nullptr, args.pollset_set,
+      path_name, nullptr, args.pollset_set,
       GRPC_CLOSURE_CREATE(must_fail, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
   gpr_free(path_name);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  test_unix_socket();
-  test_unix_socket_path_name_too_long();
-  grpc_executor_shutdown(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+
+  {
+    grpc_core::ExecCtx exec_ctx;
+    test_unix_socket();
+    test_unix_socket_path_name_too_long();
+  }
+
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/resolve_address_test.cc b/test/core/iomgr/resolve_address_test.cc
index 1c5aa38..a0dc484 100644
--- a/test/core/iomgr/resolve_address_test.cc
+++ b/test/core/iomgr/resolve_address_test.cc
@@ -39,32 +39,32 @@
   grpc_pollset_set* pollset_set;
 } args_struct;
 
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void do_nothing(void* arg, grpc_error* error) {}
 
-void args_init(grpc_exec_ctx* exec_ctx, args_struct* args) {
+void args_init(args_struct* args) {
   gpr_event_init(&args->ev);
   args->pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
   grpc_pollset_init(args->pollset, &args->mu);
   args->pollset_set = grpc_pollset_set_create();
-  grpc_pollset_set_add_pollset(exec_ctx, args->pollset_set, args->pollset);
+  grpc_pollset_set_add_pollset(args->pollset_set, args->pollset);
   args->addrs = nullptr;
   gpr_atm_rel_store(&args->done_atm, 0);
 }
 
-void args_finish(grpc_exec_ctx* exec_ctx, args_struct* args) {
+void args_finish(args_struct* args) {
   GPR_ASSERT(gpr_event_wait(&args->ev, test_deadline()));
   grpc_resolved_addresses_destroy(args->addrs);
-  grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
-  grpc_pollset_set_destroy(exec_ctx, args->pollset_set);
+  grpc_pollset_set_del_pollset(args->pollset_set, args->pollset);
+  grpc_pollset_set_destroy(args->pollset_set);
   grpc_closure do_nothing_cb;
   GRPC_CLOSURE_INIT(&do_nothing_cb, do_nothing, nullptr,
                     grpc_schedule_on_exec_ctx);
   gpr_mu_lock(args->mu);
-  grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
+  grpc_pollset_shutdown(args->pollset, &do_nothing_cb);
   gpr_mu_unlock(args->mu);
   // exec_ctx needs to be flushed before calling grpc_pollset_destroy()
-  grpc_exec_ctx_flush(exec_ctx);
-  grpc_pollset_destroy(exec_ctx, args->pollset);
+  grpc_core::ExecCtx::Get()->Flush();
+  grpc_pollset_destroy(args->pollset);
   gpr_free(args->pollset);
 }
 
@@ -74,119 +74,109 @@
 }
 
 static void poll_pollset_until_request_done(args_struct* args) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_millis deadline = n_sec_deadline(10);
   while (true) {
     bool done = gpr_atm_acq_load(&args->done_atm) != 0;
     if (done) {
       break;
     }
-    grpc_millis time_left = deadline - grpc_exec_ctx_now(&exec_ctx);
+    grpc_millis time_left = deadline - grpc_core::ExecCtx::Get()->Now();
     gpr_log(GPR_DEBUG, "done=%d, time_left=%" PRIdPTR, done, time_left);
     GPR_ASSERT(time_left >= 0);
     grpc_pollset_worker* worker = nullptr;
     gpr_mu_lock(args->mu);
-    GRPC_LOG_IF_ERROR("pollset_work",
-                      grpc_pollset_work(&exec_ctx, args->pollset, &worker,
-                                        n_sec_deadline(1)));
+    GRPC_LOG_IF_ERROR("pollset_work", grpc_pollset_work(args->pollset, &worker,
+                                                        n_sec_deadline(1)));
     gpr_mu_unlock(args->mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   }
   gpr_event_set(&args->ev, (void*)1);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void must_succeed(grpc_exec_ctx* exec_ctx, void* argsp,
-                         grpc_error* err) {
+static void must_succeed(void* argsp, grpc_error* err) {
   args_struct* args = static_cast<args_struct*>(argsp);
   GPR_ASSERT(err == GRPC_ERROR_NONE);
   GPR_ASSERT(args->addrs != nullptr);
   GPR_ASSERT(args->addrs->naddrs > 0);
   gpr_atm_rel_store(&args->done_atm, 1);
   gpr_mu_lock(args->mu);
-  GRPC_LOG_IF_ERROR("pollset_kick",
-                    grpc_pollset_kick(exec_ctx, args->pollset, nullptr));
+  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, nullptr));
   gpr_mu_unlock(args->mu);
 }
 
-static void must_fail(grpc_exec_ctx* exec_ctx, void* argsp, grpc_error* err) {
+static void must_fail(void* argsp, grpc_error* err) {
   args_struct* args = static_cast<args_struct*>(argsp);
   GPR_ASSERT(err != GRPC_ERROR_NONE);
   gpr_atm_rel_store(&args->done_atm, 1);
   gpr_mu_lock(args->mu);
-  GRPC_LOG_IF_ERROR("pollset_kick",
-                    grpc_pollset_kick(exec_ctx, args->pollset, nullptr));
+  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, nullptr));
   gpr_mu_unlock(args->mu);
 }
 
 static void test_localhost(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   grpc_resolve_address(
-      &exec_ctx, "localhost:1", nullptr, args.pollset_set,
+      "localhost:1", nullptr, args.pollset_set,
       GRPC_CLOSURE_CREATE(must_succeed, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   poll_pollset_until_request_done(&args);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 static void test_default_port(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   grpc_resolve_address(
-      &exec_ctx, "localhost", "1", args.pollset_set,
+      "localhost", "1", args.pollset_set,
       GRPC_CLOSURE_CREATE(must_succeed, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   poll_pollset_until_request_done(&args);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 static void test_non_numeric_default_port(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   grpc_resolve_address(
-      &exec_ctx, "localhost", "https", args.pollset_set,
+      "localhost", "https", args.pollset_set,
       GRPC_CLOSURE_CREATE(must_succeed, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   poll_pollset_until_request_done(&args);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 static void test_missing_default_port(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   grpc_resolve_address(
-      &exec_ctx, "localhost", nullptr, args.pollset_set,
+      "localhost", nullptr, args.pollset_set,
       GRPC_CLOSURE_CREATE(must_fail, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   poll_pollset_until_request_done(&args);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 static void test_ipv6_with_port(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   args_struct args;
-  args_init(&exec_ctx, &args);
+  args_init(&args);
   grpc_resolve_address(
-      &exec_ctx, "[2001:db8::1]:1", nullptr, args.pollset_set,
+      "[2001:db8::1]:1", nullptr, args.pollset_set,
       GRPC_CLOSURE_CREATE(must_succeed, &args, grpc_schedule_on_exec_ctx),
       &args.addrs);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   poll_pollset_until_request_done(&args);
-  args_finish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  args_finish(&args);
 }
 
 static void test_ipv6_without_port(void) {
@@ -197,17 +187,16 @@
   };
   unsigned i;
   for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     args_struct args;
-    args_init(&exec_ctx, &args);
+    args_init(&args);
     grpc_resolve_address(
-        &exec_ctx, kCases[i], "80", args.pollset_set,
+        kCases[i], "80", args.pollset_set,
         GRPC_CLOSURE_CREATE(must_succeed, &args, grpc_schedule_on_exec_ctx),
         &args.addrs);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     poll_pollset_until_request_done(&args);
-    args_finish(&exec_ctx, &args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    args_finish(&args);
   }
 }
 
@@ -218,17 +207,16 @@
   };
   unsigned i;
   for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     args_struct args;
-    args_init(&exec_ctx, &args);
+    args_init(&args);
     grpc_resolve_address(
-        &exec_ctx, kCases[i], nullptr, args.pollset_set,
+        kCases[i], nullptr, args.pollset_set,
         GRPC_CLOSURE_CREATE(must_fail, &args, grpc_schedule_on_exec_ctx),
         &args.addrs);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     poll_pollset_until_request_done(&args);
-    args_finish(&exec_ctx, &args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    args_finish(&args);
   }
 }
 
@@ -238,34 +226,35 @@
   };
   unsigned i;
   for (i = 0; i < sizeof(kCases) / sizeof(*kCases); i++) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     args_struct args;
-    args_init(&exec_ctx, &args);
+    args_init(&args);
     grpc_resolve_address(
-        &exec_ctx, kCases[i], "1", args.pollset_set,
+        kCases[i], "1", args.pollset_set,
         GRPC_CLOSURE_CREATE(must_fail, &args, grpc_schedule_on_exec_ctx),
         &args.addrs);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     poll_pollset_until_request_done(&args);
-    args_finish(&exec_ctx, &args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    args_finish(&args);
   }
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  test_localhost();
-  test_default_port();
-  test_non_numeric_default_port();
-  test_missing_default_port();
-  test_ipv6_with_port();
-  test_ipv6_without_port();
-  test_invalid_ip_addresses();
-  test_unparseable_hostports();
-  grpc_executor_shutdown(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    test_localhost();
+    test_default_port();
+    test_non_numeric_default_port();
+    test_missing_default_port();
+    test_ipv6_with_port();
+    test_ipv6_without_port();
+    test_invalid_ip_addresses();
+    test_unparseable_hostports();
+    grpc_executor_shutdown();
+  }
+
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/resource_quota_test.cc b/test/core/iomgr/resource_quota_test.cc
index 6851702..ae26f72 100644
--- a/test/core/iomgr/resource_quota_test.cc
+++ b/test/core/iomgr/resource_quota_test.cc
@@ -27,7 +27,7 @@
 gpr_mu g_mu;
 gpr_cv g_cv;
 
-static void inc_int_cb(grpc_exec_ctx* exec_ctx, void* a, grpc_error* error) {
+static void inc_int_cb(void* a, grpc_error* error) {
   gpr_mu_lock(&g_mu);
   ++*(int*)a;
   gpr_cv_signal(&g_cv);
@@ -43,7 +43,7 @@
   gpr_mu_unlock(&g_mu);
 }
 
-static void set_event_cb(grpc_exec_ctx* exec_ctx, void* a, grpc_error* error) {
+static void set_event_cb(void* a, grpc_error* error) {
   gpr_event_set((gpr_event*)a, (void*)1);
 }
 grpc_closure* set_event(gpr_event* ev) {
@@ -56,13 +56,12 @@
   grpc_closure* then;
 } reclaimer_args;
 
-static void reclaimer_cb(grpc_exec_ctx* exec_ctx, void* args,
-                         grpc_error* error) {
+static void reclaimer_cb(void* args, grpc_error* error) {
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   reclaimer_args* a = static_cast<reclaimer_args*>(args);
-  grpc_resource_user_free(exec_ctx, a->resource_user, a->size);
-  grpc_resource_user_finish_reclamation(exec_ctx, a->resource_user);
-  GRPC_CLOSURE_RUN(exec_ctx, a->then, GRPC_ERROR_NONE);
+  grpc_resource_user_free(a->resource_user, a->size);
+  grpc_resource_user_finish_reclamation(a->resource_user);
+  GRPC_CLOSURE_RUN(a->then, GRPC_ERROR_NONE);
   gpr_free(a);
 }
 
@@ -75,10 +74,9 @@
   return GRPC_CLOSURE_CREATE(reclaimer_cb, a, grpc_schedule_on_exec_ctx);
 }
 
-static void unused_reclaimer_cb(grpc_exec_ctx* exec_ctx, void* arg,
-                                grpc_error* error) {
+static void unused_reclaimer_cb(void* arg, grpc_error* error) {
   GPR_ASSERT(error == GRPC_ERROR_CANCELLED);
-  GRPC_CLOSURE_RUN(exec_ctx, static_cast<grpc_closure*>(arg), GRPC_ERROR_NONE);
+  GRPC_CLOSURE_RUN(static_cast<grpc_closure*>(arg), GRPC_ERROR_NONE);
 }
 grpc_closure* make_unused_reclaimer(grpc_closure* then) {
   return GRPC_CLOSURE_CREATE(unused_reclaimer_cb, then,
@@ -86,9 +84,8 @@
 }
 
 static void destroy_user(grpc_resource_user* usr) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_resource_user_unref(&exec_ctx, usr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_resource_user_unref(usr);
 }
 
 static void test_no_op(void) {
@@ -120,14 +117,12 @@
   grpc_resource_quota_resize(q, 1024 * 1024);
   grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, nullptr);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, NULL);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -140,10 +135,9 @@
   grpc_resource_quota_resize(q, 1024 * 1024);
   grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, nullptr);
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, NULL);
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -158,16 +152,15 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -182,9 +175,9 @@
   gpr_event ev;
   gpr_event_init(&ev);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(
                    &ev, grpc_timeout_milliseconds_to_deadline(100)) == nullptr);
   }
@@ -193,9 +186,8 @@
              nullptr);
   ;
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -210,32 +202,30 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr1, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr1, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr1, 1024);
   }
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr2, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr2, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr2, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr1);
@@ -251,33 +241,32 @@
   gpr_event ev;
   {
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr1, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
   }
   {
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr2, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(
                    &ev, grpc_timeout_milliseconds_to_deadline(100)) == nullptr);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr1, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr1, 1024);
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr2, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr2, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr1);
@@ -293,9 +282,9 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
@@ -303,18 +292,16 @@
   gpr_event reclaim_done;
   gpr_event_init(&reclaim_done);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, false,
-        make_reclaimer(usr, 1024, set_event(&reclaim_done)));
-    grpc_exec_ctx_finish(&exec_ctx);
+        usr, false, make_reclaimer(usr, 1024, set_event(&reclaim_done)));
   }
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&reclaim_done,
                               grpc_timeout_seconds_to_deadline(5)) != nullptr);
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
@@ -322,9 +309,8 @@
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -340,9 +326,9 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr1, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr1, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
@@ -350,18 +336,16 @@
   gpr_event reclaim_done;
   gpr_event_init(&reclaim_done);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr1, false,
-        make_reclaimer(usr1, 1024, set_event(&reclaim_done)));
-    grpc_exec_ctx_finish(&exec_ctx);
+        usr1, false, make_reclaimer(usr1, 1024, set_event(&reclaim_done)));
   }
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr2, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr2, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&reclaim_done,
                               grpc_timeout_seconds_to_deadline(5)) != nullptr);
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
@@ -369,9 +353,8 @@
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr2, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr2, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr1);
@@ -387,9 +370,9 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
@@ -397,18 +380,16 @@
   gpr_event reclaim_done;
   gpr_event_init(&reclaim_done);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, true,
-        make_reclaimer(usr, 1024, set_event(&reclaim_done)));
-    grpc_exec_ctx_finish(&exec_ctx);
+        usr, true, make_reclaimer(usr, 1024, set_event(&reclaim_done)));
   }
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&reclaim_done,
                               grpc_timeout_seconds_to_deadline(5)) != nullptr);
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
@@ -416,9 +397,8 @@
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -435,13 +415,12 @@
   gpr_event destructive_done;
   gpr_event_init(&destructive_done);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, false, make_unused_reclaimer(set_event(&benign_done)));
+        usr, false, make_unused_reclaimer(set_event(&benign_done)));
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, true,
-        make_unused_reclaimer(set_event(&destructive_done)));
-    grpc_exec_ctx_finish(&exec_ctx);
+        usr, true, make_unused_reclaimer(set_event(&destructive_done)));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&benign_done,
                               grpc_timeout_milliseconds_to_deadline(100)) ==
                nullptr);
@@ -470,22 +449,20 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, false,
-        make_reclaimer(usr, 1024, set_event(&benign_done)));
+        usr, false, make_reclaimer(usr, 1024, set_event(&benign_done)));
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, true,
-        make_unused_reclaimer(set_event(&destructive_done)));
-    grpc_exec_ctx_finish(&exec_ctx);
+        usr, true, make_unused_reclaimer(set_event(&destructive_done)));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&benign_done,
                               grpc_timeout_milliseconds_to_deadline(100)) ==
                nullptr);
@@ -496,9 +473,9 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&benign_done,
                               grpc_timeout_seconds_to_deadline(5)) != nullptr);
     GPR_ASSERT(gpr_event_wait(&destructive_done,
@@ -508,9 +485,8 @@
                nullptr);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -533,22 +509,20 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&ev, grpc_timeout_seconds_to_deadline(5)) !=
                nullptr);
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, false,
-        make_reclaimer(usr, 512, set_event(&benign_done)));
+        usr, false, make_reclaimer(usr, 512, set_event(&benign_done)));
     grpc_resource_user_post_reclaimer(
-        &exec_ctx, usr, true,
-        make_reclaimer(usr, 512, set_event(&destructive_done)));
-    grpc_exec_ctx_finish(&exec_ctx);
+        usr, true, make_reclaimer(usr, 512, set_event(&destructive_done)));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&benign_done,
                               grpc_timeout_milliseconds_to_deadline(100)) ==
                nullptr);
@@ -559,9 +533,9 @@
   {
     gpr_event ev;
     gpr_event_init(&ev);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&ev));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&ev));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&benign_done,
                               grpc_timeout_seconds_to_deadline(5)) != nullptr);
     GPR_ASSERT(gpr_event_wait(&destructive_done,
@@ -571,9 +545,8 @@
     ;
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   grpc_resource_quota_unref(q);
   destroy_user(usr);
@@ -591,20 +564,17 @@
   grpc_resource_quota_resize(q, 1024 * 1024);
   grpc_resource_user* usr = grpc_resource_user_create(q, "usr");
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, nullptr);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, NULL);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resource_quota_unref(q);
-    grpc_resource_user_unref(&exec_ctx, usr);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_resource_user_unref(usr);
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
 }
 
@@ -624,11 +594,10 @@
     gpr_event reclaimer_cancelled;
     gpr_event_init(&reclaimer_cancelled);
     {
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+      grpc_core::ExecCtx exec_ctx;
       grpc_resource_user_post_reclaimer(
-          &exec_ctx, usr, false,
-          make_unused_reclaimer(set_event(&reclaimer_cancelled)));
-      grpc_exec_ctx_finish(&exec_ctx);
+          usr, false, make_unused_reclaimer(set_event(&reclaimer_cancelled)));
+      grpc_core::ExecCtx::Get()->Flush();
       GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
                                 grpc_timeout_milliseconds_to_deadline(100)) ==
                  nullptr);
@@ -636,27 +605,27 @@
     {
       gpr_event allocated;
       gpr_event_init(&allocated);
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-      grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated));
-      grpc_exec_ctx_finish(&exec_ctx);
-      GPR_ASSERT(gpr_event_wait(&allocated, grpc_timeout_seconds_to_deadline(
-                                                5)) != nullptr);
+      grpc_core::ExecCtx exec_ctx;
+      grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
+      grpc_core::ExecCtx::Get()->Flush();
+      GPR_ASSERT(gpr_event_wait(&allocated,
+                                grpc_timeout_seconds_to_deadline(5)) != NULL);
       GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
                                 grpc_timeout_milliseconds_to_deadline(100)) ==
                  nullptr);
     }
     {
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-      grpc_resource_user_unref(&exec_ctx, usr);
-      grpc_exec_ctx_finish(&exec_ctx);
+      grpc_core::ExecCtx exec_ctx;
+      grpc_resource_user_unref(usr);
+      grpc_core::ExecCtx::Get()->Flush();
       GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
                                 grpc_timeout_milliseconds_to_deadline(100)) ==
                  nullptr);
     }
     {
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-      grpc_resource_user_free(&exec_ctx, usr, 1024);
-      grpc_exec_ctx_finish(&exec_ctx);
+      grpc_core::ExecCtx exec_ctx;
+      grpc_resource_user_free(usr, 1024);
+      grpc_core::ExecCtx::Get()->Flush();
       GPR_ASSERT(gpr_event_wait(&reclaimer_cancelled,
                                 grpc_timeout_seconds_to_deadline(5)) !=
                  nullptr);
@@ -674,9 +643,9 @@
   {
     gpr_event allocated;
     gpr_event_init(&allocated);
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
+    grpc_core::ExecCtx::Get()->Flush();
     GPR_ASSERT(gpr_event_wait(&allocated,
                               grpc_timeout_seconds_to_deadline(5)) != nullptr);
   }
@@ -684,11 +653,10 @@
     gpr_event reclaimer_done;
     gpr_event_init(&reclaimer_done);
     {
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+      grpc_core::ExecCtx exec_ctx;
       grpc_resource_user_post_reclaimer(
-          &exec_ctx, usr, false,
-          make_reclaimer(usr, 1024, set_event(&reclaimer_done)));
-      grpc_exec_ctx_finish(&exec_ctx);
+          usr, false, make_reclaimer(usr, 1024, set_event(&reclaimer_done)));
+      grpc_core::ExecCtx::Get()->Flush();
       GPR_ASSERT(gpr_event_wait(&reclaimer_done,
                                 grpc_timeout_milliseconds_to_deadline(100)) ==
                  nullptr);
@@ -696,20 +664,19 @@
     {
       gpr_event allocated;
       gpr_event_init(&allocated);
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-      grpc_resource_user_alloc(&exec_ctx, usr, 1024, set_event(&allocated));
-      grpc_exec_ctx_finish(&exec_ctx);
-      GPR_ASSERT(gpr_event_wait(&allocated, grpc_timeout_seconds_to_deadline(
-                                                5)) != nullptr);
+      grpc_core::ExecCtx exec_ctx;
+      grpc_resource_user_alloc(usr, 1024, set_event(&allocated));
+      grpc_core::ExecCtx::Get()->Flush();
+      GPR_ASSERT(gpr_event_wait(&allocated,
+                                grpc_timeout_seconds_to_deadline(5)) != NULL);
       GPR_ASSERT(gpr_event_wait(&reclaimer_done,
                                 grpc_timeout_seconds_to_deadline(5)) !=
                  nullptr);
     }
   }
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_free(&exec_ctx, usr, 1024);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_free(usr, 1024);
   }
   destroy_user(usr);
   grpc_resource_quota_unref(q);
@@ -732,16 +699,15 @@
 
   {
     const int start_allocs = num_allocs;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc_slices(&alloc, 1024, 1, &buffer);
+    grpc_core::ExecCtx::Get()->Flush();
     assert_counter_becomes(&num_allocs, start_allocs + 1);
   }
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_slice_buffer_destroy_internal(&buffer);
   }
   destroy_user(usr);
   grpc_resource_quota_unref(q);
@@ -765,23 +731,21 @@
 
   {
     const int start_allocs = num_allocs;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc_slices(&alloc, 1024, 1, &buffer);
+    grpc_core::ExecCtx::Get()->Flush();
     assert_counter_becomes(&num_allocs, start_allocs + 1);
   }
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_unref(&exec_ctx, usr);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_unref(usr);
   }
 
   grpc_resource_quota_unref(q);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_slice_buffer_destroy_internal(&buffer);
   }
 }
 
@@ -809,9 +773,9 @@
 
   {
     const int start_allocs = num_allocs;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_alloc_slices(&exec_ctx, &alloc, 1024, 1, &buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_alloc_slices(&alloc, 1024, 1, &buffer);
+    grpc_core::ExecCtx::Get()->Flush();
     assert_counter_becomes(&num_allocs, start_allocs + 1);
   }
 
@@ -822,16 +786,14 @@
   GPR_ASSERT(grpc_resource_quota_get_memory_pressure(q) > 1 - eps);
 
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_resource_user_unref(&exec_ctx, usr);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_resource_user_unref(usr);
   }
 
   grpc_resource_quota_unref(q);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_slice_buffer_destroy_internal(&buffer);
   }
 }
 
diff --git a/test/core/iomgr/tcp_client_posix_test.cc b/test/core/iomgr/tcp_client_posix_test.cc
index 9fb1a2d..40a050e 100644
--- a/test/core/iomgr/tcp_client_posix_test.cc
+++ b/test/core/iomgr/tcp_client_posix_test.cc
@@ -53,26 +53,24 @@
 static void finish_connection() {
   gpr_mu_lock(g_mu);
   g_connections_complete++;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(&exec_ctx, g_pollset, nullptr)));
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
+
   gpr_mu_unlock(g_mu);
 }
 
-static void must_succeed(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
+static void must_succeed(void* arg, grpc_error* error) {
   GPR_ASSERT(g_connecting != nullptr);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
-  grpc_endpoint_shutdown(
-      exec_ctx, g_connecting,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("must_succeed called"));
-  grpc_endpoint_destroy(exec_ctx, g_connecting);
+  grpc_endpoint_shutdown(g_connecting, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                           "must_succeed called"));
+  grpc_endpoint_destroy(g_connecting);
   g_connecting = nullptr;
   finish_connection();
 }
 
-static void must_fail(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void must_fail(void* arg, grpc_error* error) {
   GPR_ASSERT(g_connecting == nullptr);
   GPR_ASSERT(error != GRPC_ERROR_NONE);
   finish_connection();
@@ -85,7 +83,7 @@
   int r;
   int connections_complete_before;
   grpc_closure done;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_DEBUG, "test_succeeds");
 
@@ -108,8 +106,8 @@
   GPR_ASSERT(getsockname(svr_fd, (struct sockaddr*)addr,
                          (socklen_t*)&resolved_addr.len) == 0);
   GRPC_CLOSURE_INIT(&done, must_succeed, nullptr, grpc_schedule_on_exec_ctx);
-  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
-                          nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
+  grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, nullptr,
+                          &resolved_addr, GRPC_MILLIS_INF_FUTURE);
 
   /* await the connection */
   do {
@@ -125,17 +123,15 @@
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
         "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+        grpc_pollset_work(g_pollset, &worker,
                           grpc_timespec_to_millis_round_up(
                               grpc_timeout_seconds_to_deadline(5)))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
 
   gpr_mu_unlock(g_mu);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void test_fails(void) {
@@ -143,7 +139,7 @@
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   int connections_complete_before;
   grpc_closure done;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_DEBUG, "test_fails");
 
@@ -157,8 +153,8 @@
 
   /* connect to a broken address */
   GRPC_CLOSURE_INIT(&done, must_fail, nullptr, grpc_schedule_on_exec_ctx);
-  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set,
-                          nullptr, &resolved_addr, GRPC_MILLIS_INF_FUTURE);
+  grpc_tcp_client_connect(&done, &g_connecting, g_pollset_set, nullptr,
+                          &resolved_addr, GRPC_MILLIS_INF_FUTURE);
 
   gpr_mu_lock(g_mu);
 
@@ -166,7 +162,7 @@
   while (g_connections_complete == connections_complete_before) {
     grpc_pollset_worker* worker = nullptr;
     grpc_millis polling_deadline = test_deadline();
-    switch (grpc_timer_check(&exec_ctx, &polling_deadline)) {
+    switch (grpc_timer_check(&polling_deadline)) {
       case GRPC_TIMERS_FIRED:
         break;
       case GRPC_TIMERS_NOT_CHECKED:
@@ -174,42 +170,43 @@
       /* fall through */
       case GRPC_TIMERS_CHECKED_AND_EMPTY:
         GPR_ASSERT(GRPC_LOG_IF_ERROR(
-            "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
-                                              polling_deadline)));
+            "pollset_work",
+            grpc_pollset_work(g_pollset, &worker, polling_deadline)));
         break;
     }
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
 
   gpr_mu_unlock(g_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
   grpc_init();
-  g_pollset_set = grpc_pollset_set_create();
-  g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
-  grpc_pollset_init(g_pollset, &g_mu);
-  grpc_pollset_set_add_pollset(&exec_ctx, g_pollset_set, g_pollset);
-  grpc_exec_ctx_finish(&exec_ctx);
-  test_succeeds();
-  gpr_log(GPR_ERROR, "End of first test");
-  test_fails();
-  grpc_pollset_set_destroy(&exec_ctx, g_pollset_set);
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset_set = grpc_pollset_set_create();
+    g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+    grpc_pollset_init(g_pollset, &g_mu);
+    grpc_pollset_set_add_pollset(g_pollset_set, g_pollset);
+
+    test_succeeds();
+    gpr_log(GPR_ERROR, "End of first test");
+    test_fails();
+    grpc_pollset_set_destroy(g_pollset_set);
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(g_pollset, &destroyed);
+  }
+
   grpc_shutdown();
   gpr_free(g_pollset);
   return 0;
diff --git a/test/core/iomgr/tcp_client_uv_test.cc b/test/core/iomgr/tcp_client_uv_test.cc
index 101d7bf..0c6250e 100644
--- a/test/core/iomgr/tcp_client_uv_test.cc
+++ b/test/core/iomgr/tcp_client_uv_test.cc
@@ -46,30 +46,28 @@
   return grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
 }
 
-static void finish_connection(grpc_exec_ctx* exec_ctx) {
+static void finish_connection() {
   gpr_mu_lock(g_mu);
   g_connections_complete++;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
-                               grpc_pollset_kick(exec_ctx, g_pollset, NULL)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, NULL)));
   gpr_mu_unlock(g_mu);
 }
 
-static void must_succeed(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
+static void must_succeed(void* arg, grpc_error* error) {
   GPR_ASSERT(g_connecting != NULL);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
-  grpc_endpoint_shutdown(
-      exec_ctx, g_connecting,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("must_succeed called"));
-  grpc_endpoint_destroy(exec_ctx, g_connecting);
+  grpc_endpoint_shutdown(g_connecting, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                           "must_succeed called"));
+  grpc_endpoint_destroy(g_connecting);
   g_connecting = NULL;
-  finish_connection(exec_ctx);
+  finish_connection();
 }
 
-static void must_fail(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void must_fail(void* arg, grpc_error* error) {
   GPR_ASSERT(g_connecting == NULL);
   GPR_ASSERT(error != GRPC_ERROR_NONE);
-  finish_connection(exec_ctx);
+  finish_connection();
 }
 
 static void close_cb(uv_handle_t* handle) { gpr_free(handle); }
@@ -89,7 +87,7 @@
   uv_tcp_t* svr_handle = static_cast<uv_tcp_t*>(gpr_malloc(sizeof(uv_tcp_t)));
   int connections_complete_before;
   grpc_closure done;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_DEBUG, "test_succeeds");
 
@@ -110,8 +108,8 @@
   GPR_ASSERT(uv_tcp_getsockname(svr_handle, (struct sockaddr*)addr,
                                 (int*)&resolved_addr.len) == 0);
   GRPC_CLOSURE_INIT(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
-  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL,
-                          &resolved_addr, GRPC_MILLIS_INF_FUTURE);
+  grpc_tcp_client_connect(&done, &g_connecting, NULL, NULL, &resolved_addr,
+                          GRPC_MILLIS_INF_FUTURE);
 
   gpr_mu_lock(g_mu);
 
@@ -119,11 +117,11 @@
     grpc_pollset_worker* worker = NULL;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
         "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+        grpc_pollset_work(g_pollset, &worker,
                           grpc_timespec_to_millis_round_up(
                               grpc_timeout_seconds_to_deadline(5)))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
 
@@ -131,8 +129,6 @@
   uv_close((uv_handle_t*)svr_handle, close_cb);
 
   gpr_mu_unlock(g_mu);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void test_fails(void) {
@@ -140,7 +136,7 @@
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   int connections_complete_before;
   grpc_closure done;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_DEBUG, "test_fails");
 
@@ -154,8 +150,8 @@
 
   /* connect to a broken address */
   GRPC_CLOSURE_INIT(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
-  grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, NULL, NULL,
-                          &resolved_addr, GRPC_MILLIS_INF_FUTURE);
+  grpc_tcp_client_connect(&done, &g_connecting, NULL, NULL, &resolved_addr,
+                          GRPC_MILLIS_INF_FUTURE);
 
   gpr_mu_lock(g_mu);
 
@@ -164,7 +160,7 @@
     grpc_pollset_worker* worker = NULL;
     gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
     grpc_millis polling_deadline = test_deadline();
-    switch (grpc_timer_check(&exec_ctx, &polling_deadline)) {
+    switch (grpc_timer_check(&polling_deadline)) {
       case GRPC_TIMERS_FIRED:
         break;
       case GRPC_TIMERS_NOT_CHECKED:
@@ -172,39 +168,37 @@
       /* fall through */
       case GRPC_TIMERS_CHECKED_AND_EMPTY:
         GPR_ASSERT(GRPC_LOG_IF_ERROR(
-            "pollset_work", grpc_pollset_work(&exec_ctx, g_pollset, &worker,
-                                              polling_deadline)));
+            "pollset_work",
+            grpc_pollset_work(g_pollset, &worker, polling_deadline)));
         break;
     }
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
 
   gpr_mu_unlock(g_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_test_init(argc, argv);
   grpc_init();
   g_pollset = static_cast<grpc_pollset*>(gpr_malloc(grpc_pollset_size()));
   grpc_pollset_init(g_pollset, &g_mu);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   test_succeeds();
   gpr_log(GPR_ERROR, "End of first test");
   test_fails();
   GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_pollset_shutdown(g_pollset, &destroyed);
+
   grpc_shutdown();
   gpr_free(g_pollset);
   return 0;
diff --git a/test/core/iomgr/tcp_posix_test.cc b/test/core/iomgr/tcp_posix_test.cc
index 7986dc2..f4acba8 100644
--- a/test/core/iomgr/tcp_posix_test.cc
+++ b/test/core/iomgr/tcp_posix_test.cc
@@ -131,8 +131,7 @@
   return num_bytes;
 }
 
-static void read_cb(grpc_exec_ctx* exec_ctx, void* user_data,
-                    grpc_error* error) {
+static void read_cb(void* user_data, grpc_error* error) {
   struct read_socket_state* state = (struct read_socket_state*)user_data;
   size_t read_bytes;
   int current_data;
@@ -147,11 +146,11 @@
   gpr_log(GPR_INFO, "Read %" PRIuPTR " bytes of %" PRIuPTR, read_bytes,
           state->target_read_bytes);
   if (state->read_bytes >= state->target_read_bytes) {
-    GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+    GPR_ASSERT(
+        GRPC_LOG_IF_ERROR("kick", grpc_pollset_kick(g_pollset, nullptr)));
     gpr_mu_unlock(g_mu);
   } else {
-    grpc_endpoint_read(exec_ctx, state->ep, &state->incoming, &state->read_cb);
+    grpc_endpoint_read(state->ep, &state->incoming, &state->read_cb);
     gpr_mu_unlock(g_mu);
   }
 }
@@ -164,7 +163,7 @@
   size_t written_bytes;
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_INFO, "Read test of size %" PRIuPTR ", slice size %" PRIuPTR,
           num_bytes, slice_size);
@@ -175,9 +174,8 @@
   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
   a[0].type = GRPC_ARG_INTEGER, a[0].value.integer = (int)slice_size;
   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
-  ep = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], "read_test"), &args,
-                       "test");
-  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), &args, "test");
+  grpc_endpoint_add_to_pollset(ep, g_pollset);
 
   written_bytes = fill_socket_partial(sv[0], num_bytes);
   gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);
@@ -188,24 +186,22 @@
   grpc_slice_buffer_init(&state.incoming);
   GRPC_CLOSURE_INIT(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
 
-  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
+  grpc_endpoint_read(ep, &state.incoming, &state.read_cb);
 
   gpr_mu_lock(g_mu);
   while (state.read_bytes < state.target_read_bytes) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   GPR_ASSERT(state.read_bytes == state.target_read_bytes);
   gpr_mu_unlock(g_mu);
 
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
-  grpc_endpoint_destroy(&exec_ctx, ep);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_buffer_destroy_internal(&state.incoming);
+  grpc_endpoint_destroy(ep);
 }
 
 /* Write to a socket until it fills up, then read from it using the grpc_tcp
@@ -217,7 +213,7 @@
   ssize_t written_bytes;
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_INFO, "Start large read test, slice size %" PRIuPTR, slice_size);
 
@@ -228,9 +224,8 @@
   a[0].type = GRPC_ARG_INTEGER;
   a[0].value.integer = (int)slice_size;
   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
-  ep = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], "large_read_test"),
-                       &args, "test");
-  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1], "large_read_test"), &args, "test");
+  grpc_endpoint_add_to_pollset(ep, g_pollset);
 
   written_bytes = fill_socket(sv[0]);
   gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);
@@ -241,24 +236,22 @@
   grpc_slice_buffer_init(&state.incoming);
   GRPC_CLOSURE_INIT(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
 
-  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
+  grpc_endpoint_read(ep, &state.incoming, &state.read_cb);
 
   gpr_mu_lock(g_mu);
   while (state.read_bytes < state.target_read_bytes) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   GPR_ASSERT(state.read_bytes == state.target_read_bytes);
   gpr_mu_unlock(g_mu);
 
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
-  grpc_endpoint_destroy(&exec_ctx, ep);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_buffer_destroy_internal(&state.incoming);
+  grpc_endpoint_destroy(ep);
 }
 
 struct write_socket_state {
@@ -289,16 +282,15 @@
   return slices;
 }
 
-static void write_done(grpc_exec_ctx* exec_ctx,
-                       void* user_data /* write_socket_state */,
+static void write_done(void* user_data /* write_socket_state */,
                        grpc_error* error) {
   struct write_socket_state* state = (struct write_socket_state*)user_data;
   gpr_log(GPR_INFO, "Write done callback called");
   gpr_mu_lock(g_mu);
   gpr_log(GPR_INFO, "Signalling write done");
   state->write_done = 1;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
@@ -309,7 +301,7 @@
   int flags;
   int current = 0;
   int i;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   flags = fcntl(fd, F_GETFL, 0);
   GPR_ASSERT(fcntl(fd, F_SETFL, flags & ~O_NONBLOCK) == 0);
@@ -319,11 +311,11 @@
     gpr_mu_lock(g_mu);
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
         "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker,
+        grpc_pollset_work(g_pollset, &worker,
                           grpc_timespec_to_millis_round_up(
                               grpc_timeout_milliseconds_to_deadline(10)))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     do {
       bytes_read =
           read(fd, buf, bytes_left > read_size ? read_size : bytes_left);
@@ -356,7 +348,7 @@
   grpc_closure write_done_closure;
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_INFO,
           "Start write test with %" PRIuPTR " bytes, slice size %" PRIuPTR,
@@ -368,9 +360,8 @@
   a[0].key = const_cast<char*>(GRPC_ARG_TCP_READ_CHUNK_SIZE);
   a[0].type = GRPC_ARG_INTEGER, a[0].value.integer = (int)slice_size;
   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
-  ep = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], "write_test"), &args,
-                       "test");
-  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
+  ep = grpc_tcp_create(grpc_fd_create(sv[1], "write_test"), &args, "test");
+  grpc_endpoint_add_to_pollset(ep, g_pollset);
 
   state.ep = ep;
   state.write_done = 0;
@@ -382,7 +373,7 @@
   GRPC_CLOSURE_INIT(&write_done_closure, write_done, &state,
                     grpc_schedule_on_exec_ctx);
 
-  grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
+  grpc_endpoint_write(ep, &outgoing, &write_done_closure);
   drain_socket_blocking(sv[0], num_bytes, num_bytes);
   gpr_mu_lock(g_mu);
   for (;;) {
@@ -391,25 +382,23 @@
       break;
     }
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_mu_unlock(g_mu);
 
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &outgoing);
-  grpc_endpoint_destroy(&exec_ctx, ep);
+  grpc_slice_buffer_destroy_internal(&outgoing);
+  grpc_endpoint_destroy(ep);
   gpr_free(slices);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-void on_fd_released(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* errors) {
+void on_fd_released(void* arg, grpc_error* errors) {
   int* done = (int*)arg;
   *done = 1;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
 }
 
 /* Do a read_test, then release fd and try to read/write again. Verify that
@@ -422,7 +411,7 @@
   int fd;
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(20));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_closure fd_released_cb;
   int fd_released_done = 0;
   GRPC_CLOSURE_INIT(&fd_released_cb, &on_fd_released, &fd_released_done,
@@ -439,10 +428,9 @@
   a[0].type = GRPC_ARG_INTEGER;
   a[0].value.integer = (int)slice_size;
   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
-  ep = grpc_tcp_create(&exec_ctx, grpc_fd_create(sv[1], "read_test"), &args,
-                       "test");
+  ep = grpc_tcp_create(grpc_fd_create(sv[1], "read_test"), &args, "test");
   GPR_ASSERT(grpc_tcp_fd(ep) == sv[1] && sv[1] >= 0);
-  grpc_endpoint_add_to_pollset(&exec_ctx, ep, g_pollset);
+  grpc_endpoint_add_to_pollset(ep, g_pollset);
 
   written_bytes = fill_socket_partial(sv[0], num_bytes);
   gpr_log(GPR_INFO, "Wrote %" PRIuPTR " bytes", written_bytes);
@@ -453,38 +441,35 @@
   grpc_slice_buffer_init(&state.incoming);
   GRPC_CLOSURE_INIT(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
 
-  grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
+  grpc_endpoint_read(ep, &state.incoming, &state.read_cb);
 
   gpr_mu_lock(g_mu);
   while (state.read_bytes < state.target_read_bytes) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
     gpr_log(GPR_DEBUG, "wakeup: read=%" PRIdPTR " target=%" PRIdPTR,
             state.read_bytes, state.target_read_bytes);
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(g_mu);
   }
   GPR_ASSERT(state.read_bytes == state.target_read_bytes);
   gpr_mu_unlock(g_mu);
 
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &state.incoming);
-  grpc_tcp_destroy_and_release_fd(&exec_ctx, ep, &fd, &fd_released_cb);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_slice_buffer_destroy_internal(&state.incoming);
+  grpc_tcp_destroy_and_release_fd(ep, &fd, &fd_released_cb);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_mu_lock(g_mu);
   while (!fd_released_done) {
     grpc_pollset_worker* worker = nullptr;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "pollset_work",
-        grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+        "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
     gpr_log(GPR_DEBUG, "wakeup: fd_released_done=%d", fd_released_done);
   }
   gpr_mu_unlock(g_mu);
   GPR_ASSERT(fd_released_done == 1);
   GPR_ASSERT(fd == sv[1]);
-  grpc_exec_ctx_finish(&exec_ctx);
 
   written_bytes = fill_socket_partial(sv[0], num_bytes);
   drain_socket_blocking(fd, written_bytes, written_bytes);
@@ -522,7 +507,7 @@
     size_t slice_size) {
   int sv[2];
   grpc_endpoint_test_fixture f;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   create_sockets(sv);
   grpc_resource_quota* resource_quota =
@@ -532,15 +517,13 @@
   a[0].type = GRPC_ARG_INTEGER;
   a[0].value.integer = (int)slice_size;
   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
-  f.client_ep = grpc_tcp_create(
-      &exec_ctx, grpc_fd_create(sv[0], "fixture:client"), &args, "test");
-  f.server_ep = grpc_tcp_create(
-      &exec_ctx, grpc_fd_create(sv[1], "fixture:server"), &args, "test");
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
-  grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
-  grpc_endpoint_add_to_pollset(&exec_ctx, f.server_ep, g_pollset);
-
-  grpc_exec_ctx_finish(&exec_ctx);
+  f.client_ep =
+      grpc_tcp_create(grpc_fd_create(sv[0], "fixture:client"), &args, "test");
+  f.server_ep =
+      grpc_tcp_create(grpc_fd_create(sv[1], "fixture:server"), &args, "test");
+  grpc_resource_quota_unref_internal(resource_quota);
+  grpc_endpoint_add_to_pollset(f.client_ep, g_pollset);
+  grpc_endpoint_add_to_pollset(f.server_ep, g_pollset);
 
   return f;
 }
@@ -549,24 +532,26 @@
     {"tcp/tcp_socketpair", create_fixture_tcp_socketpair, clean_up},
 };
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, (grpc_pollset*)p);
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy((grpc_pollset*)p);
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
   grpc_init();
-  g_pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
-  grpc_pollset_init(g_pollset, &g_mu);
-  grpc_endpoint_tests(configs[0], g_pollset, g_mu);
-  run_tests();
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+    grpc_pollset_init(g_pollset, &g_mu);
+    grpc_endpoint_tests(configs[0], g_pollset, g_mu);
+    run_tests();
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(g_pollset, &destroyed);
+
+    grpc_core::ExecCtx::Get()->Flush();
+  }
   grpc_shutdown();
   gpr_free(g_pollset);
 
diff --git a/test/core/iomgr/tcp_server_posix_test.cc b/test/core/iomgr/tcp_server_posix_test.cc
index 48d8d42..3c9ca21 100644
--- a/test/core/iomgr/tcp_server_posix_test.cc
+++ b/test/core/iomgr/tcp_server_posix_test.cc
@@ -110,8 +110,7 @@
       result->server, acceptor->port_index, acceptor->fd_index);
 }
 
-static void server_weak_ref_shutdown(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* error) {
+static void server_weak_ref_shutdown(void* arg, grpc_error* error) {
   server_weak_ref* weak_ref = static_cast<server_weak_ref*>(arg);
   weak_ref->server = nullptr;
 }
@@ -145,12 +144,11 @@
   }
 }
 
-static void on_connect(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* tcp,
-                       grpc_pollset* pollset,
+static void on_connect(void* arg, grpc_endpoint* tcp, grpc_pollset* pollset,
                        grpc_tcp_server_acceptor* acceptor) {
-  grpc_endpoint_shutdown(exec_ctx, tcp,
+  grpc_endpoint_shutdown(tcp,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected"));
-  grpc_endpoint_destroy(exec_ctx, tcp);
+  grpc_endpoint_destroy(tcp);
 
   on_connect_result temp_result;
   on_connect_result_set(&temp_result, acceptor);
@@ -159,38 +157,33 @@
   gpr_mu_lock(g_mu);
   g_result = temp_result;
   g_nconnects++;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
 static void test_no_op(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, nullptr, nullptr, &s));
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
+  grpc_tcp_server_unref(s);
 }
 
 static void test_no_op_with_start(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, nullptr, nullptr, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
   LOG_TEST("test_no_op_with_start");
-  grpc_tcp_server_start(&exec_ctx, s, nullptr, 0, on_connect, nullptr);
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_start(s, nullptr, 0, on_connect, nullptr);
+  grpc_tcp_server_unref(s);
 }
 
 static void test_no_op_with_port(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, nullptr, nullptr, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
   LOG_TEST("test_no_op_with_port");
 
   memset(&resolved_addr, 0, sizeof(resolved_addr));
@@ -201,17 +194,15 @@
                  GRPC_ERROR_NONE &&
              port > 0);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
 }
 
 static void test_no_op_with_port_and_start(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, nullptr, nullptr, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(nullptr, nullptr, &s));
   LOG_TEST("test_no_op_with_port_and_start");
   int port = -1;
 
@@ -222,13 +213,12 @@
                  GRPC_ERROR_NONE &&
              port > 0);
 
-  grpc_tcp_server_start(&exec_ctx, s, nullptr, 0, on_connect, nullptr);
+  grpc_tcp_server_start(s, nullptr, 0, on_connect, nullptr);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
 }
 
-static grpc_error* tcp_connect(grpc_exec_ctx* exec_ctx, const test_addr* remote,
+static grpc_error* tcp_connect(const test_addr* remote,
                                on_connect_result* result) {
   grpc_millis deadline =
       grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
@@ -254,17 +244,17 @@
   }
   gpr_log(GPR_DEBUG, "wait");
   while (g_nconnects == nconnects_before &&
-         deadline > grpc_exec_ctx_now(exec_ctx)) {
+         deadline > grpc_core::ExecCtx::Get()->Now()) {
     grpc_pollset_worker* worker = nullptr;
     grpc_error* err;
-    if ((err = grpc_pollset_work(exec_ctx, g_pollset, &worker, deadline)) !=
+    if ((err = grpc_pollset_work(g_pollset, &worker, deadline)) !=
         GRPC_ERROR_NONE) {
       gpr_mu_unlock(g_mu);
       close(clifd);
       return err;
     }
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_log(GPR_DEBUG, "wait done");
@@ -279,7 +269,7 @@
   gpr_mu_unlock(g_mu);
   gpr_log(GPR_INFO, "Result (%d, %d) fd %d", result->port_index,
           result->fd_index, result->server_fd);
-  grpc_tcp_server_unref(exec_ctx, result->server);
+  grpc_tcp_server_unref(result->server);
   return GRPC_ERROR_NONE;
 }
 
@@ -292,7 +282,7 @@
 static void test_connect(size_t num_connects,
                          const grpc_channel_args* channel_args,
                          test_addrs* dst_addrs, bool test_dst_addrs) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   grpc_resolved_address resolved_addr1;
   struct sockaddr_storage* const addr =
@@ -307,7 +297,7 @@
   grpc_tcp_server* s;
   const unsigned num_ports = 2;
   GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, nullptr, channel_args, &s));
+             grpc_tcp_server_create(nullptr, channel_args, &s));
   unsigned port_num;
   server_weak_ref weak_ref;
   server_weak_ref_init(&weak_ref);
@@ -352,7 +342,7 @@
   svr1_fd_count = grpc_tcp_server_port_fd_count(s, 1);
   GPR_ASSERT(svr1_fd_count >= 1);
 
-  grpc_tcp_server_start(&exec_ctx, s, &g_pollset, 1, on_connect, nullptr);
+  grpc_tcp_server_start(s, &g_pollset, 1, on_connect, nullptr);
 
   if (dst_addrs != nullptr) {
     int ports[] = {svr_port, svr1_port};
@@ -372,7 +362,7 @@
         test_addr_init_str(&dst);
         ++num_tested;
         on_connect_result_init(&result);
-        if ((err = tcp_connect(&exec_ctx, &dst, &result)) == GRPC_ERROR_NONE &&
+        if ((err = tcp_connect(&dst, &result)) == GRPC_ERROR_NONE &&
             result.server_fd >= 0 && result.server == s) {
           continue;
         }
@@ -403,8 +393,8 @@
         for (connect_num = 0; connect_num < num_connects; ++connect_num) {
           on_connect_result result;
           on_connect_result_init(&result);
-          GPR_ASSERT(GRPC_LOG_IF_ERROR("tcp_connect",
-                                       tcp_connect(&exec_ctx, &dst, &result)));
+          GPR_ASSERT(
+              GRPC_LOG_IF_ERROR("tcp_connect", tcp_connect(&dst, &result)));
           GPR_ASSERT(result.server_fd == fd);
           GPR_ASSERT(result.port_index == port_num);
           GPR_ASSERT(result.fd_index == fd_num);
@@ -420,21 +410,19 @@
   GPR_ASSERT(weak_ref.server != nullptr);
   GPR_ASSERT(grpc_tcp_server_port_fd(s, 0, 0) >= 0);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Weak ref lost. */
   GPR_ASSERT(weak_ref.server == nullptr);
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_arg chan_args[1];
   chan_args[0].type = GRPC_ARG_INTEGER;
   chan_args[0].key = const_cast<char*>(GRPC_ARG_EXPAND_WILDCARD_ADDRS);
@@ -447,58 +435,61 @@
       static_cast<test_addrs*>(gpr_zalloc(sizeof(*dst_addrs)));
   grpc_test_init(argc, argv);
   grpc_init();
-  g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
-  grpc_pollset_init(g_pollset, &g_mu);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
+    grpc_pollset_init(g_pollset, &g_mu);
 
-  test_no_op();
-  test_no_op_with_start();
-  test_no_op_with_port();
-  test_no_op_with_port_and_start();
+    test_no_op();
+    test_no_op_with_start();
+    test_no_op_with_port();
+    test_no_op_with_port_and_start();
 
-  if (getifaddrs(&ifa) != 0 || ifa == nullptr) {
-    gpr_log(GPR_ERROR, "getifaddrs: %s", strerror(errno));
-    return EXIT_FAILURE;
-  }
-  dst_addrs->naddrs = 0;
-  for (ifa_it = ifa; ifa_it != nullptr && dst_addrs->naddrs < MAX_ADDRS;
-       ifa_it = ifa_it->ifa_next) {
-    if (ifa_it->ifa_addr == nullptr) {
-      continue;
-    } else if (ifa_it->ifa_addr->sa_family == AF_INET) {
-      dst_addrs->addrs[dst_addrs->naddrs].addr.len = sizeof(struct sockaddr_in);
-    } else if (ifa_it->ifa_addr->sa_family == AF_INET6) {
-      dst_addrs->addrs[dst_addrs->naddrs].addr.len =
-          sizeof(struct sockaddr_in6);
-    } else {
-      continue;
+    if (getifaddrs(&ifa) != 0 || ifa == nullptr) {
+      gpr_log(GPR_ERROR, "getifaddrs: %s", strerror(errno));
+      return EXIT_FAILURE;
     }
-    memcpy(dst_addrs->addrs[dst_addrs->naddrs].addr.addr, ifa_it->ifa_addr,
-           dst_addrs->addrs[dst_addrs->naddrs].addr.len);
-    GPR_ASSERT(
-        grpc_sockaddr_set_port(&dst_addrs->addrs[dst_addrs->naddrs].addr, 0));
-    test_addr_init_str(&dst_addrs->addrs[dst_addrs->naddrs]);
-    ++dst_addrs->naddrs;
+    dst_addrs->naddrs = 0;
+    for (ifa_it = ifa; ifa_it != nullptr && dst_addrs->naddrs < MAX_ADDRS;
+         ifa_it = ifa_it->ifa_next) {
+      if (ifa_it->ifa_addr == nullptr) {
+        continue;
+      } else if (ifa_it->ifa_addr->sa_family == AF_INET) {
+        dst_addrs->addrs[dst_addrs->naddrs].addr.len =
+            sizeof(struct sockaddr_in);
+      } else if (ifa_it->ifa_addr->sa_family == AF_INET6) {
+        dst_addrs->addrs[dst_addrs->naddrs].addr.len =
+            sizeof(struct sockaddr_in6);
+      } else {
+        continue;
+      }
+      memcpy(dst_addrs->addrs[dst_addrs->naddrs].addr.addr, ifa_it->ifa_addr,
+             dst_addrs->addrs[dst_addrs->naddrs].addr.len);
+      GPR_ASSERT(
+          grpc_sockaddr_set_port(&dst_addrs->addrs[dst_addrs->naddrs].addr, 0));
+      test_addr_init_str(&dst_addrs->addrs[dst_addrs->naddrs]);
+      ++dst_addrs->naddrs;
+    }
+    freeifaddrs(ifa);
+    ifa = nullptr;
+
+    /* Connect to same addresses as listeners. */
+    test_connect(1, nullptr, nullptr, false);
+    test_connect(10, nullptr, nullptr, false);
+
+    /* Set dst_addrs->addrs[i].len=0 for dst_addrs that are unreachable with a
+       "::" listener. */
+    test_connect(1, nullptr, dst_addrs, true);
+
+    /* Test connect(2) with dst_addrs. */
+    test_connect(1, &channel_args, dst_addrs, false);
+    /* Test connect(2) with dst_addrs. */
+    test_connect(10, &channel_args, dst_addrs, false);
+
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(g_pollset, &destroyed);
   }
-  freeifaddrs(ifa);
-  ifa = nullptr;
-
-  /* Connect to same addresses as listeners. */
-  test_connect(1, nullptr, nullptr, false);
-  test_connect(10, nullptr, nullptr, false);
-
-  /* Set dst_addrs->addrs[i].len=0 for dst_addrs that are unreachable with a
-     "::" listener. */
-  test_connect(1, nullptr, dst_addrs, true);
-
-  /* Test connect(2) with dst_addrs. */
-  test_connect(1, &channel_args, dst_addrs, false);
-  /* Test connect(2) with dst_addrs. */
-  test_connect(10, &channel_args, dst_addrs, false);
-
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
   gpr_free(dst_addrs);
   gpr_free(g_pollset);
diff --git a/test/core/iomgr/tcp_server_uv_test.cc b/test/core/iomgr/tcp_server_uv_test.cc
index dd047a0..35d62b5 100644
--- a/test/core/iomgr/tcp_server_uv_test.cc
+++ b/test/core/iomgr/tcp_server_uv_test.cc
@@ -74,8 +74,7 @@
   result->fd_index = acceptor->fd_index;
 }
 
-static void server_weak_ref_shutdown(grpc_exec_ctx* exec_ctx, void* arg,
-                                     grpc_error* error) {
+static void server_weak_ref_shutdown(void* arg, grpc_error* error) {
   server_weak_ref* weak_ref = static_cast<server_weak_ref*>(arg);
   weak_ref->server = NULL;
 }
@@ -97,12 +96,11 @@
   weak_ref->server = server;
 }
 
-static void on_connect(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* tcp,
-                       grpc_pollset* pollset,
+static void on_connect(void* arg, grpc_endpoint* tcp, grpc_pollset* pollset,
                        grpc_tcp_server_acceptor* acceptor) {
-  grpc_endpoint_shutdown(exec_ctx, tcp,
+  grpc_endpoint_shutdown(tcp,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected"));
-  grpc_endpoint_destroy(exec_ctx, tcp);
+  grpc_endpoint_destroy(tcp);
 
   on_connect_result temp_result;
   on_connect_result_set(&temp_result, acceptor);
@@ -111,38 +109,33 @@
   gpr_mu_lock(g_mu);
   g_result = temp_result;
   g_nconnects++;
-  GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
-                               grpc_pollset_kick(exec_ctx, g_pollset, NULL)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, NULL)));
   gpr_mu_unlock(g_mu);
 }
 
 static void test_no_op(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
+  grpc_tcp_server_unref(s);
 }
 
 static void test_no_op_with_start(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   LOG_TEST("test_no_op_with_start");
-  grpc_tcp_server_start(&exec_ctx, s, NULL, 0, on_connect, NULL);
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_start(s, NULL, 0, on_connect, NULL);
+  grpc_tcp_server_unref(s);
 }
 
 static void test_no_op_with_port(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   LOG_TEST("test_no_op_with_port");
 
   memset(&resolved_addr, 0, sizeof(resolved_addr));
@@ -153,17 +146,15 @@
                  GRPC_ERROR_NONE &&
              port > 0);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
 }
 
 static void test_no_op_with_port_and_start(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   LOG_TEST("test_no_op_with_port_and_start");
   int port;
 
@@ -174,10 +165,9 @@
                  GRPC_ERROR_NONE &&
              port > 0);
 
-  grpc_tcp_server_start(&exec_ctx, s, NULL, 0, on_connect, NULL);
+  grpc_tcp_server_start(s, NULL, 0, on_connect, NULL);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
 }
 
 static void connect_cb(uv_connect_t* req, int status) {
@@ -187,8 +177,8 @@
 
 static void close_cb(uv_handle_t* handle) { gpr_free(handle); }
 
-static void tcp_connect(grpc_exec_ctx* exec_ctx, const struct sockaddr* remote,
-                        socklen_t remote_len, on_connect_result* result) {
+static void tcp_connect(const struct sockaddr* remote, socklen_t remote_len,
+                        on_connect_result* result) {
   gpr_timespec deadline = grpc_timeout_seconds_to_deadline(10);
   uv_tcp_t* client_handle =
       static_cast<uv_tcp_t*>(gpr_malloc(sizeof(uv_tcp_t)));
@@ -208,10 +198,10 @@
     grpc_pollset_worker* worker = NULL;
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
         "pollset_work",
-        grpc_pollset_work(exec_ctx, g_pollset, &worker,
+        grpc_pollset_work(g_pollset, &worker,
                           grpc_timespec_to_millis_round_up(deadline))));
     gpr_mu_unlock(g_mu);
-    grpc_exec_ctx_finish(exec_ctx);
+
     gpr_mu_lock(g_mu);
   }
   gpr_log(GPR_DEBUG, "wait done");
@@ -224,7 +214,7 @@
 
 /* Tests a tcp server with multiple ports. */
 static void test_connect(unsigned n) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   grpc_resolved_address resolved_addr1;
   struct sockaddr_storage* addr = (struct sockaddr_storage*)resolved_addr.addr;
@@ -233,8 +223,7 @@
   int svr_port;
   int svr1_port;
   grpc_tcp_server* s;
-  GPR_ASSERT(GRPC_ERROR_NONE ==
-             grpc_tcp_server_create(&exec_ctx, NULL, NULL, &s));
+  GPR_ASSERT(GRPC_ERROR_NONE == grpc_tcp_server_create(NULL, NULL, &s));
   unsigned i;
   server_weak_ref weak_ref;
   server_weak_ref_init(&weak_ref);
@@ -257,48 +246,45 @@
                  GRPC_ERROR_NONE &&
              svr_port == svr1_port);
 
-  grpc_tcp_server_start(&exec_ctx, s, &g_pollset, 1, on_connect, NULL);
+  grpc_tcp_server_start(s, &g_pollset, 1, on_connect, NULL);
 
   GPR_ASSERT(uv_ip6_addr("::", svr_port, (struct sockaddr_in6*)addr1) == 0);
 
   for (i = 0; i < n; i++) {
     on_connect_result result;
     on_connect_result_init(&result);
-    tcp_connect(&exec_ctx, (struct sockaddr*)addr, (socklen_t)resolved_addr.len,
-                &result);
+    tcp_connect((struct sockaddr*)addr, (socklen_t)resolved_addr.len, &result);
     GPR_ASSERT(result.port_index == 0);
     GPR_ASSERT(result.server == s);
     if (weak_ref.server == NULL) {
       server_weak_ref_set(&weak_ref, result.server);
     }
-    grpc_tcp_server_unref(&exec_ctx, result.server);
+    grpc_tcp_server_unref(result.server);
 
     on_connect_result_init(&result);
-    tcp_connect(&exec_ctx, (struct sockaddr*)addr1,
-                (socklen_t)resolved_addr1.len, &result);
+    tcp_connect((struct sockaddr*)addr1, (socklen_t)resolved_addr1.len,
+                &result);
     GPR_ASSERT(result.port_index == 1);
     GPR_ASSERT(result.server == s);
-    grpc_tcp_server_unref(&exec_ctx, result.server);
+    grpc_tcp_server_unref(result.server);
   }
 
   /* Weak ref to server valid until final unref. */
   GPR_ASSERT(weak_ref.server != NULL);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
 
   /* Weak ref lost. */
   GPR_ASSERT(weak_ref.server == NULL);
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_test_init(argc, argv);
   grpc_init();
   g_pollset = static_cast<grpc_pollset*>(gpr_malloc(grpc_pollset_size()));
@@ -313,8 +299,8 @@
 
   GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_pollset_shutdown(g_pollset, &destroyed);
+
   grpc_shutdown();
   gpr_free(g_pollset);
   return 0;
diff --git a/test/core/iomgr/timer_list_test.cc b/test/core/iomgr/timer_list_test.cc
index d74ea4f..deb8c4d 100644
--- a/test/core/iomgr/timer_list_test.cc
+++ b/test/core/iomgr/timer_list_test.cc
@@ -25,6 +25,7 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/log.h>
 #include "src/core/lib/debug/trace.h"
 #include "test/core/util/test_config.h"
@@ -37,127 +38,125 @@
 
 static int cb_called[MAX_CB][2];
 
-static void cb(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void cb(void* arg, grpc_error* error) {
   cb_called[(intptr_t)arg][error == GRPC_ERROR_NONE]++;
 }
 
 static void add_test(void) {
   int i;
   grpc_timer timers[20];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_INFO, "add_test");
 
-  grpc_timer_list_init(&exec_ctx);
+  grpc_timer_list_init();
   grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_trace);
   grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_check_trace);
   memset(cb_called, 0, sizeof(cb_called));
 
-  grpc_millis start = grpc_exec_ctx_now(&exec_ctx);
+  grpc_millis start = grpc_core::ExecCtx::Get()->Now();
 
   /* 10 ms timers.  will expire in the current epoch */
   for (i = 0; i < 10; i++) {
     grpc_timer_init(
-        &exec_ctx, &timers[i], start + 10,
+        &timers[i], start + 10,
         GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)i, grpc_schedule_on_exec_ctx));
   }
 
   /* 1010 ms timers.  will expire in the next epoch */
   for (i = 10; i < 20; i++) {
     grpc_timer_init(
-        &exec_ctx, &timers[i], start + 1010,
+        &timers[i], start + 1010,
         GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)i, grpc_schedule_on_exec_ctx));
   }
 
   /* collect timers.  Only the first batch should be ready. */
-  exec_ctx.now = start + 500;
-  GPR_ASSERT(grpc_timer_check(&exec_ctx, nullptr) == GRPC_TIMERS_FIRED);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 500);
+  GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED);
+  grpc_core::ExecCtx::Get()->Flush();
   for (i = 0; i < 20; i++) {
     GPR_ASSERT(cb_called[i][1] == (i < 10));
     GPR_ASSERT(cb_called[i][0] == 0);
   }
 
-  exec_ctx.now = start + 600;
-  GPR_ASSERT(grpc_timer_check(&exec_ctx, nullptr) ==
-             GRPC_TIMERS_CHECKED_AND_EMPTY);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 600);
+  GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_CHECKED_AND_EMPTY);
+  grpc_core::ExecCtx::Get()->Flush();
   for (i = 0; i < 30; i++) {
     GPR_ASSERT(cb_called[i][1] == (i < 10));
     GPR_ASSERT(cb_called[i][0] == 0);
   }
 
   /* collect the rest of the timers */
-  exec_ctx.now = start + 1500;
-  GPR_ASSERT(grpc_timer_check(&exec_ctx, nullptr) == GRPC_TIMERS_FIRED);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 1500);
+  GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED);
+  grpc_core::ExecCtx::Get()->Flush();
   for (i = 0; i < 30; i++) {
     GPR_ASSERT(cb_called[i][1] == (i < 20));
     GPR_ASSERT(cb_called[i][0] == 0);
   }
 
-  exec_ctx.now = start + 1600;
-  GPR_ASSERT(grpc_timer_check(&exec_ctx, nullptr) ==
-             GRPC_TIMERS_CHECKED_AND_EMPTY);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(start + 1600);
+  GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_CHECKED_AND_EMPTY);
   for (i = 0; i < 30; i++) {
     GPR_ASSERT(cb_called[i][1] == (i < 20));
     GPR_ASSERT(cb_called[i][0] == 0);
   }
 
-  grpc_timer_list_shutdown(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_timer_list_shutdown();
 }
 
 /* Cleaning up a list with pending timers. */
 void destruction_test(void) {
   grpc_timer timers[5];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   gpr_log(GPR_INFO, "destruction_test");
 
-  exec_ctx.now_is_valid = true;
-  exec_ctx.now = 0;
-  grpc_timer_list_init(&exec_ctx);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(0);
+  grpc_timer_list_init();
   grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_trace);
   grpc_core::testing::grpc_tracer_enable_flag(&grpc_timer_check_trace);
   memset(cb_called, 0, sizeof(cb_called));
 
   grpc_timer_init(
-      &exec_ctx, &timers[0], 100,
+      &timers[0], 100,
       GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)0, grpc_schedule_on_exec_ctx));
   grpc_timer_init(
-      &exec_ctx, &timers[1], 3,
+      &timers[1], 3,
       GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)1, grpc_schedule_on_exec_ctx));
   grpc_timer_init(
-      &exec_ctx, &timers[2], 100,
+      &timers[2], 100,
       GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)2, grpc_schedule_on_exec_ctx));
   grpc_timer_init(
-      &exec_ctx, &timers[3], 3,
+      &timers[3], 3,
       GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)3, grpc_schedule_on_exec_ctx));
   grpc_timer_init(
-      &exec_ctx, &timers[4], 1,
+      &timers[4], 1,
       GRPC_CLOSURE_CREATE(cb, (void*)(intptr_t)4, grpc_schedule_on_exec_ctx));
-  exec_ctx.now = 2;
-  GPR_ASSERT(grpc_timer_check(&exec_ctx, nullptr) == GRPC_TIMERS_FIRED);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->TestOnlySetNow(2);
+  GPR_ASSERT(grpc_timer_check(nullptr) == GRPC_TIMERS_FIRED);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(1 == cb_called[4][1]);
-  grpc_timer_cancel(&exec_ctx, &timers[0]);
-  grpc_timer_cancel(&exec_ctx, &timers[3]);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_timer_cancel(&timers[0]);
+  grpc_timer_cancel(&timers[3]);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(1 == cb_called[0][0]);
   GPR_ASSERT(1 == cb_called[3][0]);
 
-  grpc_timer_list_shutdown(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_timer_list_shutdown();
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(1 == cb_called[1][0]);
   GPR_ASSERT(1 == cb_called[2][0]);
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_core::ExecCtx::GlobalInit();
   gpr_set_log_verbosity(GPR_LOG_SEVERITY_DEBUG);
   add_test();
   destruction_test();
+  grpc_core::ExecCtx::GlobalShutdown();
   return 0;
 }
 
diff --git a/test/core/iomgr/udp_server_test.cc b/test/core/iomgr/udp_server_test.cc
index 803f017..dc1248b 100644
--- a/test/core/iomgr/udp_server_test.cc
+++ b/test/core/iomgr/udp_server_test.cc
@@ -49,8 +49,11 @@
 static int g_number_of_writes = 0;
 static int g_number_of_bytes_read = 0;
 static int g_number_of_orphan_calls = 0;
+static int g_number_of_starts = 0;
 
-static void on_read(grpc_exec_ctx* exec_ctx, grpc_fd* emfd, void* user_data) {
+static void on_start(grpc_fd* emfd, void* user_data) { g_number_of_starts++; }
+
+static bool on_read(grpc_fd* emfd) {
   char read_buffer[512];
   ssize_t byte_count;
 
@@ -61,24 +64,27 @@
   g_number_of_reads++;
   g_number_of_bytes_read += (int)byte_count;
 
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
+  return false;
 }
 
-static void on_write(grpc_exec_ctx* exec_ctx, grpc_fd* emfd, void* user_data) {
+static void on_write(grpc_fd* emfd, void* user_data,
+                     grpc_closure* notify_on_write_closure) {
   gpr_mu_lock(g_mu);
   g_number_of_writes++;
 
-  GPR_ASSERT(GRPC_LOG_IF_ERROR(
-      "pollset_kick", grpc_pollset_kick(exec_ctx, g_pollset, nullptr)));
+  GPR_ASSERT(
+      GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(g_pollset, nullptr)));
   gpr_mu_unlock(g_mu);
 }
 
-static void on_fd_orphaned(grpc_exec_ctx* exec_ctx, grpc_fd* emfd,
-                           grpc_closure* closure, void* user_data) {
+static void on_fd_orphaned(grpc_fd* emfd, grpc_closure* closure,
+                           void* user_data) {
   gpr_log(GPR_INFO, "gRPC FD about to be orphaned: %d",
           grpc_fd_wrapped_fd(emfd));
+  GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
   g_number_of_orphan_calls++;
 }
 
@@ -126,25 +132,43 @@
   return factory;
 }
 
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(p));
+}
+
+static void shutdown_and_destroy_pollset() {
+  gpr_mu_lock(g_mu);
+  auto closure = GRPC_CLOSURE_CREATE(destroy_pollset, g_pollset,
+                                     grpc_schedule_on_exec_ctx);
+  grpc_pollset_shutdown(g_pollset, closure);
+  gpr_mu_unlock(g_mu);
+  /* Flush exec_ctx to run |destroyed| */
+  grpc_core::ExecCtx::Get()->Flush();
+}
+
 static void test_no_op(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_pollset_init(g_pollset, &g_mu);
+  grpc_core::ExecCtx exec_ctx;
   grpc_udp_server* s = grpc_udp_server_create(nullptr);
-  grpc_udp_server_destroy(&exec_ctx, s, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  LOG_TEST("test_no_op");
+  grpc_udp_server_destroy(s, nullptr);
+  shutdown_and_destroy_pollset();
 }
 
 static void test_no_op_with_start(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_pollset_init(g_pollset, &g_mu);
+  grpc_core::ExecCtx exec_ctx;
   grpc_udp_server* s = grpc_udp_server_create(nullptr);
   LOG_TEST("test_no_op_with_start");
-  grpc_udp_server_start(&exec_ctx, s, nullptr, 0, nullptr);
-  grpc_udp_server_destroy(&exec_ctx, s, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_udp_server_start(s, nullptr, 0, nullptr);
+  grpc_udp_server_destroy(s, nullptr);
+  shutdown_and_destroy_pollset();
 }
 
 static void test_no_op_with_port(void) {
+  grpc_pollset_init(g_pollset, &g_mu);
   g_number_of_orphan_calls = 0;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   grpc_udp_server* s = grpc_udp_server_create(nullptr);
@@ -153,19 +177,20 @@
   memset(&resolved_addr, 0, sizeof(resolved_addr));
   resolved_addr.len = sizeof(struct sockaddr_in);
   addr->sin_family = AF_INET;
-  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_read, on_write,
-                                      on_fd_orphaned));
+  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read,
+                                      on_write, on_fd_orphaned));
 
-  grpc_udp_server_destroy(&exec_ctx, s, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_udp_server_destroy(s, nullptr);
 
   /* The server had a single FD, which should have been orphaned. */
   GPR_ASSERT(g_number_of_orphan_calls == 1);
+  shutdown_and_destroy_pollset();
 }
 
 static void test_no_op_with_port_and_socket_factory(void) {
+  grpc_pollset_init(g_pollset, &g_mu);
   g_number_of_orphan_calls = 0;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
 
@@ -175,29 +200,31 @@
   grpc_channel_args* channel_args =
       grpc_channel_args_copy_and_add(nullptr, &socket_factory_arg, 1);
   grpc_udp_server* s = grpc_udp_server_create(channel_args);
-  grpc_channel_args_destroy(&exec_ctx, channel_args);
+  grpc_channel_args_destroy(channel_args);
 
   LOG_TEST("test_no_op_with_port_and_socket_factory");
 
   memset(&resolved_addr, 0, sizeof(resolved_addr));
   resolved_addr.len = sizeof(struct sockaddr_in);
   addr->sin_family = AF_INET;
-  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_read, on_write,
-                                      on_fd_orphaned));
+  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read,
+                                      on_write, on_fd_orphaned));
   GPR_ASSERT(socket_factory->number_of_socket_calls == 1);
   GPR_ASSERT(socket_factory->number_of_bind_calls == 1);
 
-  grpc_udp_server_destroy(&exec_ctx, s, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_udp_server_destroy(s, nullptr);
+
   grpc_socket_factory_unref(&socket_factory->base);
 
   /* The server had a single FD, which should have been orphaned. */
   GPR_ASSERT(g_number_of_orphan_calls == 1);
+  shutdown_and_destroy_pollset();
 }
 
 static void test_no_op_with_port_and_start(void) {
+  grpc_pollset_init(g_pollset, &g_mu);
   g_number_of_orphan_calls = 0;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   grpc_udp_server* s = grpc_udp_server_create(nullptr);
@@ -206,27 +233,27 @@
   memset(&resolved_addr, 0, sizeof(resolved_addr));
   resolved_addr.len = sizeof(struct sockaddr_in);
   addr->sin_family = AF_INET;
-  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_read, on_write,
-                                      on_fd_orphaned));
+  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read,
+                                      on_write, on_fd_orphaned));
 
-  grpc_udp_server_start(&exec_ctx, s, nullptr, 0, nullptr);
-
-  grpc_udp_server_destroy(&exec_ctx, s, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_udp_server_start(s, nullptr, 0, nullptr);
+  GPR_ASSERT(g_number_of_starts == 1);
+  grpc_udp_server_destroy(s, nullptr);
 
   /* The server had a single FD, which is orphaned exactly once in *
    * grpc_udp_server_destroy. */
   GPR_ASSERT(g_number_of_orphan_calls == 1);
+  shutdown_and_destroy_pollset();
 }
 
 static void test_receive(int number_of_clients) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_pollset_init(g_pollset, &g_mu);
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_storage* addr = (struct sockaddr_storage*)resolved_addr.addr;
   int clifd, svrfd;
   grpc_udp_server* s = grpc_udp_server_create(nullptr);
   int i;
-  int number_of_reads_before;
   grpc_millis deadline;
   grpc_pollset* pollsets[1];
   LOG_TEST("test_receive");
@@ -238,8 +265,8 @@
   memset(&resolved_addr, 0, sizeof(resolved_addr));
   resolved_addr.len = sizeof(struct sockaddr_storage);
   addr->ss_family = AF_INET;
-  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_read, on_write,
-                                      on_fd_orphaned));
+  GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, on_start, on_read,
+                                      on_write, on_fd_orphaned));
 
   svrfd = grpc_udp_server_get_fd(s, 0);
   GPR_ASSERT(svrfd >= 0);
@@ -248,7 +275,7 @@
   GPR_ASSERT(resolved_addr.len <= sizeof(struct sockaddr_storage));
 
   pollsets[0] = g_pollset;
-  grpc_udp_server_start(&exec_ctx, s, pollsets, 1, nullptr);
+  grpc_udp_server_start(s, pollsets, 1, nullptr);
 
   gpr_mu_lock(g_mu);
 
@@ -256,67 +283,53 @@
     deadline =
         grpc_timespec_to_millis_round_up(grpc_timeout_seconds_to_deadline(10));
 
-    number_of_reads_before = g_number_of_reads;
+    int number_of_bytes_read_before = g_number_of_bytes_read;
     /* Create a socket, send a packet to the UDP server. */
     clifd = socket(addr->ss_family, SOCK_DGRAM, 0);
     GPR_ASSERT(clifd >= 0);
     GPR_ASSERT(connect(clifd, (struct sockaddr*)addr,
                        (socklen_t)resolved_addr.len) == 0);
     GPR_ASSERT(5 == write(clifd, "hello", 5));
-    while (g_number_of_reads == number_of_reads_before &&
-           deadline > grpc_exec_ctx_now(&exec_ctx)) {
+    while (g_number_of_bytes_read < (number_of_bytes_read_before + 5) &&
+           deadline > grpc_core::ExecCtx::Get()->Now()) {
       grpc_pollset_worker* worker = nullptr;
       GPR_ASSERT(GRPC_LOG_IF_ERROR(
-          "pollset_work",
-          grpc_pollset_work(&exec_ctx, g_pollset, &worker, deadline)));
+          "pollset_work", grpc_pollset_work(g_pollset, &worker, deadline)));
       gpr_mu_unlock(g_mu);
-      grpc_exec_ctx_flush(&exec_ctx);
+      grpc_core::ExecCtx::Get()->Flush();
       gpr_mu_lock(g_mu);
     }
-    GPR_ASSERT(g_number_of_reads == number_of_reads_before + 1);
     close(clifd);
   }
   GPR_ASSERT(g_number_of_bytes_read == 5 * number_of_clients);
 
   gpr_mu_unlock(g_mu);
 
-  grpc_udp_server_destroy(&exec_ctx, s, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_udp_server_destroy(s, nullptr);
 
   /* The server had a single FD, which is orphaned exactly once in *
    * grpc_udp_server_destroy. */
   GPR_ASSERT(g_number_of_orphan_calls == 1);
-
-  /* The write callback should have fired a few times. */
-  GPR_ASSERT(g_number_of_writes > 0);
-}
-
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(p));
+  shutdown_and_destroy_pollset();
 }
 
 int main(int argc, char** argv) {
-  grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
   grpc_init();
-  g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
-  grpc_pollset_init(g_pollset, &g_mu);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
 
-  test_no_op();
-  test_no_op_with_start();
-  test_no_op_with_port();
-  test_no_op_with_port_and_socket_factory();
-  test_no_op_with_port_and_start();
-  test_receive(1);
-  test_receive(10);
+    test_no_op();
+    test_no_op_with_start();
+    test_no_op_with_port();
+    test_no_op_with_port_and_socket_factory();
+    test_no_op_with_port_and_start();
+    test_receive(1);
+    test_receive(10);
 
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
-  gpr_free(g_pollset);
+    gpr_free(g_pollset);
+  }
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/iomgr/wakeup_fd_cv_test.cc b/test/core/iomgr/wakeup_fd_cv_test.cc
index dc1d77a..d4e05bd 100644
--- a/test/core/iomgr/wakeup_fd_cv_test.cc
+++ b/test/core/iomgr/wakeup_fd_cv_test.cc
@@ -138,7 +138,7 @@
 
   opt = gpr_thd_options_default();
   gpr_thd_options_set_joinable(&opt);
-  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
+  gpr_thd_new(&t_id, "grpc_background_poll", &background_poll, &pargs, &opt);
 
   // Wakeup wakeup_fd not listening for events
   GPR_ASSERT(grpc_wakeup_fd_wakeup(&cvfd1) == GRPC_ERROR_NONE);
@@ -154,7 +154,7 @@
   // Pollin on socket fd
   pargs.timeout = -1;
   pargs.result = -2;
-  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
+  gpr_thd_new(&t_id, "grpc_background_poll", &background_poll, &pargs, &opt);
   trigger_socket_event();
   gpr_thd_join(t_id);
   GPR_ASSERT(pargs.result == 1);
@@ -168,7 +168,7 @@
   // Pollin on wakeup fd
   reset_socket_event();
   pargs.result = -2;
-  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
+  gpr_thd_new(&t_id, "grpc_background_poll", &background_poll, &pargs, &opt);
   GPR_ASSERT(grpc_wakeup_fd_wakeup(&cvfd2) == GRPC_ERROR_NONE);
   gpr_thd_join(t_id);
 
@@ -182,7 +182,7 @@
 
   // Pollin on wakeupfd before poll()
   pargs.result = -2;
-  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
+  gpr_thd_new(&t_id, "grpc_background_poll", &background_poll, &pargs, &opt);
   gpr_thd_join(t_id);
 
   GPR_ASSERT(pargs.result == 1);
@@ -199,7 +199,7 @@
   reset_socket_event();
   GPR_ASSERT(grpc_wakeup_fd_consume_wakeup(&cvfd1) == GRPC_ERROR_NONE);
   GPR_ASSERT(grpc_wakeup_fd_consume_wakeup(&cvfd2) == GRPC_ERROR_NONE);
-  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
+  gpr_thd_new(&t_id, "grpc_background_poll", &background_poll, &pargs, &opt);
   gpr_thd_join(t_id);
 
   GPR_ASSERT(pargs.result == 0);
diff --git a/test/core/nanopb/fuzzer_response.cc b/test/core/nanopb/fuzzer_response.cc
index 7039c80..3a70dea 100644
--- a/test/core/nanopb/fuzzer_response.cc
+++ b/test/core/nanopb/fuzzer_response.cc
@@ -19,6 +19,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@@ -29,6 +30,7 @@
 static void dont_log(gpr_log_func_args* args) {}
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  grpc_init();
   if (squelch) gpr_set_log_function(dont_log);
   grpc_slice slice = grpc_slice_from_copied_buffer((const char*)data, size);
   grpc_grpclb_initial_response* response;
@@ -36,5 +38,6 @@
     grpc_grpclb_initial_response_destroy(response);
   }
   grpc_slice_unref(slice);
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/nanopb/fuzzer_serverlist.cc b/test/core/nanopb/fuzzer_serverlist.cc
index 0a6b176..d0af117 100644
--- a/test/core/nanopb/fuzzer_serverlist.cc
+++ b/test/core/nanopb/fuzzer_serverlist.cc
@@ -19,6 +19,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 
 #include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@@ -29,6 +30,7 @@
 static void dont_log(gpr_log_func_args* args) {}
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+  grpc_init();
   if (squelch) gpr_set_log_function(dont_log);
   grpc_slice slice = grpc_slice_from_copied_buffer((const char*)data, size);
   grpc_grpclb_serverlist* serverlist;
@@ -36,5 +38,6 @@
     grpc_grpclb_destroy_serverlist(serverlist);
   }
   grpc_slice_unref(slice);
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/network_benchmarks/low_level_ping_pong.cc b/test/core/network_benchmarks/low_level_ping_pong.cc
index 687395d..96b0745 100644
--- a/test/core/network_benchmarks/low_level_ping_pong.cc
+++ b/test/core/network_benchmarks/low_level_ping_pong.cc
@@ -36,13 +36,13 @@
 
 #include <grpc/support/alloc.h>
 #include <grpc/support/cmdline.h>
-#include <grpc/support/histogram.h>
 #include <grpc/support/log.h>
 #include <grpc/support/thd.h>
 #include <grpc/support/time.h>
 #include <grpc/support/useful.h>
 #include "src/core/lib/iomgr/error.h"
 #include "src/core/lib/iomgr/socket_utils_posix.h"
+#include "test/core/util/histogram.h"
 
 typedef struct fd_pair {
   int read_fd;
@@ -275,14 +275,14 @@
   server_thread(args);
 }
 
-static void print_histogram(gpr_histogram* histogram) {
+static void print_histogram(grpc_histogram* histogram) {
   /* TODO(klempner): Print more detailed information, such as detailed histogram
      buckets */
   gpr_log(GPR_INFO, "latency (50/95/99/99.9): %f/%f/%f/%f",
-          gpr_histogram_percentile(histogram, 50),
-          gpr_histogram_percentile(histogram, 95),
-          gpr_histogram_percentile(histogram, 99),
-          gpr_histogram_percentile(histogram, 99.9));
+          grpc_histogram_percentile(histogram, 50),
+          grpc_histogram_percentile(histogram, 95),
+          grpc_histogram_percentile(histogram, 99),
+          grpc_histogram_percentile(histogram, 99.9));
 }
 
 static double now(void) {
@@ -293,7 +293,7 @@
 static void client_thread(thread_args* args) {
   char* buf = static_cast<char*>(gpr_malloc(args->msg_size * sizeof(char)));
   memset(buf, 0, args->msg_size * sizeof(char));
-  gpr_histogram* histogram = gpr_histogram_create(0.01, 60e9);
+  grpc_histogram* histogram = grpc_histogram_create(0.01, 60e9);
   double start_time;
   double end_time;
   double interval;
@@ -316,13 +316,13 @@
     end_time = now();
     if (i > kNumIters / 2) {
       interval = end_time - start_time;
-      gpr_histogram_add(histogram, interval);
+      grpc_histogram_add(histogram, interval);
     }
   }
   print_histogram(histogram);
 error:
   gpr_free(buf);
-  gpr_histogram_destroy(histogram);
+  grpc_histogram_destroy(histogram);
 }
 
 /* This roughly matches tcp_server's create_listening_socket */
@@ -583,7 +583,7 @@
   gpr_log(GPR_INFO, "Starting test %s %s %zu", client_args->strategy_name,
           socket_type, client_args->msg_size);
 
-  gpr_thd_new(&tid, server_thread_wrap, server_args, nullptr);
+  gpr_thd_new(&tid, "server_thread", server_thread_wrap, server_args, nullptr);
   client_thread(client_args);
   return 0;
 }
diff --git a/test/core/security/credentials_test.cc b/test/core/security/credentials_test.cc
index 64d383a..ecc6192 100644
--- a/test/core/security/credentials_test.cc
+++ b/test/core/security/credentials_test.cc
@@ -148,41 +148,37 @@
 /* -- Tests. -- */
 
 static void test_empty_md_array(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_credentials_mdelem_array md_array;
   memset(&md_array, 0, sizeof(md_array));
   GPR_ASSERT(md_array.md == nullptr);
   GPR_ASSERT(md_array.size == 0);
-  grpc_credentials_mdelem_array_destroy(&exec_ctx, &md_array);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_credentials_mdelem_array_destroy(&md_array);
 }
 
 static void test_add_to_empty_md_array(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_credentials_mdelem_array md_array;
   memset(&md_array, 0, sizeof(md_array));
   const char* key = "hello";
   const char* value = "there blah blah blah blah blah blah blah";
-  grpc_mdelem md =
-      grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_copied_string(key),
-                              grpc_slice_from_copied_string(value));
+  grpc_mdelem md = grpc_mdelem_from_slices(
+      grpc_slice_from_copied_string(key), grpc_slice_from_copied_string(value));
   grpc_credentials_mdelem_array_add(&md_array, md);
   GPR_ASSERT(md_array.size == 1);
   GPR_ASSERT(grpc_mdelem_eq(md, md_array.md[0]));
-  GRPC_MDELEM_UNREF(&exec_ctx, md);
-  grpc_credentials_mdelem_array_destroy(&exec_ctx, &md_array);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(md);
+  grpc_credentials_mdelem_array_destroy(&md_array);
 }
 
 static void test_add_abunch_to_md_array(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_credentials_mdelem_array md_array;
   memset(&md_array, 0, sizeof(md_array));
   const char* key = "hello";
   const char* value = "there blah blah blah blah blah blah blah";
-  grpc_mdelem md =
-      grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_copied_string(key),
-                              grpc_slice_from_copied_string(value));
+  grpc_mdelem md = grpc_mdelem_from_slices(
+      grpc_slice_from_copied_string(key), grpc_slice_from_copied_string(value));
   size_t num_entries = 1000;
   for (size_t i = 0; i < num_entries; ++i) {
     grpc_credentials_mdelem_array_add(&md_array, md);
@@ -190,57 +186,52 @@
   for (size_t i = 0; i < num_entries; ++i) {
     GPR_ASSERT(grpc_mdelem_eq(md_array.md[i], md));
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, md);
-  grpc_credentials_mdelem_array_destroy(&exec_ctx, &md_array);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(md);
+  grpc_credentials_mdelem_array_destroy(&md_array);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_ok(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response =
       http_response(200, valid_oauth2_json_response);
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
-             GRPC_CREDENTIALS_OK);
+                 &response, &token_md, &token_lifetime) == GRPC_CREDENTIALS_OK);
   GPR_ASSERT(token_lifetime == 3599 * GPR_MS_PER_SEC);
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(token_md), "authorization") == 0);
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(token_md),
                                 "Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_") ==
              0);
-  GRPC_MDELEM_UNREF(&exec_ctx, token_md);
+  GRPC_MDELEM_UNREF(token_md);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_bad_http_status(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response =
       http_response(401, valid_oauth2_json_response);
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
+                 &response, &token_md, &token_lifetime) ==
              GRPC_CREDENTIALS_ERROR);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_empty_http_body(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response = http_response(200, "");
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
+                 &response, &token_md, &token_lifetime) ==
              GRPC_CREDENTIALS_ERROR);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_invalid_json(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response =
@@ -249,14 +240,13 @@
                     " \"expires_in\":3599, "
                     " \"token_type\":\"Bearer\"");
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
+                 &response, &token_md, &token_lifetime) ==
              GRPC_CREDENTIALS_ERROR);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_missing_token(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response = http_response(200,
@@ -264,14 +254,13 @@
                                                  " \"expires_in\":3599, "
                                                  " \"token_type\":\"Bearer\"}");
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
+                 &response, &token_md, &token_lifetime) ==
              GRPC_CREDENTIALS_ERROR);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_missing_token_type(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response =
@@ -280,15 +269,14 @@
                     " \"expires_in\":3599, "
                     "}");
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
+                 &response, &token_md, &token_lifetime) ==
              GRPC_CREDENTIALS_ERROR);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_token_fetcher_creds_parsing_missing_token_lifetime(
     void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem token_md = GRPC_MDNULL;
   grpc_millis token_lifetime;
   grpc_httpcli_response response =
@@ -296,10 +284,9 @@
                     "{\"access_token\":\"ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_\","
                     " \"token_type\":\"Bearer\"}");
   GPR_ASSERT(grpc_oauth2_token_fetcher_credentials_parse_server_response(
-                 &exec_ctx, &response, &token_md, &token_lifetime) ==
+                 &response, &token_md, &token_lifetime) ==
              GRPC_CREDENTIALS_ERROR);
   grpc_http_response_destroy(&response);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 typedef struct {
@@ -336,8 +323,7 @@
   }
 }
 
-static void check_request_metadata(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void check_request_metadata(void* arg, grpc_error* error) {
   request_metadata_state* state = (request_metadata_state*)arg;
   gpr_log(GPR_INFO, "expected_error: %s",
           grpc_error_string(state->expected_error));
@@ -358,9 +344,8 @@
           state->expected_size, state->md_array.size);
   GPR_ASSERT(state->md_array.size == state->expected_size);
   check_metadata(state->expected, &state->md_array);
-  grpc_credentials_mdelem_array_destroy(exec_ctx, &state->md_array);
-  grpc_pollset_set_destroy(exec_ctx,
-                           grpc_polling_entity_pollset_set(&state->pollent));
+  grpc_credentials_mdelem_array_destroy(&state->md_array);
+  grpc_pollset_set_destroy(grpc_polling_entity_pollset_set(&state->pollent));
   gpr_free(state);
 }
 
@@ -379,22 +364,21 @@
   return state;
 }
 
-static void run_request_metadata_test(grpc_exec_ctx* exec_ctx,
-                                      grpc_call_credentials* creds,
+static void run_request_metadata_test(grpc_call_credentials* creds,
                                       grpc_auth_metadata_context auth_md_ctx,
                                       request_metadata_state* state) {
   grpc_error* error = GRPC_ERROR_NONE;
   if (grpc_call_credentials_get_request_metadata(
-          exec_ctx, creds, &state->pollent, auth_md_ctx, &state->md_array,
+          creds, &state->pollent, auth_md_ctx, &state->md_array,
           &state->on_request_metadata, &error)) {
     // Synchronous result.  Invoke the callback directly.
-    check_request_metadata(exec_ctx, state, error);
+    check_request_metadata(state, error);
     GRPC_ERROR_UNREF(error);
   }
 }
 
 static void test_google_iam_creds(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   expected_md emd[] = {{GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
                         test_google_iam_authorization_token},
                        {GRPC_IAM_AUTHORITY_SELECTOR_METADATA_KEY,
@@ -406,13 +390,12 @@
       nullptr);
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_call_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_call_credentials_unref(creds);
 }
 
 static void test_access_token_creds(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   expected_md emd[] = {{GRPC_AUTHORIZATION_METADATA_KEY, "Bearer blah"}};
   request_metadata_state* state =
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
@@ -421,16 +404,14 @@
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
   GPR_ASSERT(strcmp(creds->type, GRPC_CALL_CREDENTIALS_TYPE_OAUTH2) == 0);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_call_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_call_credentials_unref(creds);
 }
 
 static grpc_security_status check_channel_oauth2_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c,
-    grpc_call_credentials* call_creds, const char* target,
-    const grpc_channel_args* args, grpc_channel_security_connector** sc,
-    grpc_channel_args** new_args) {
+    grpc_channel_credentials* c, grpc_call_credentials* call_creds,
+    const char* target, const grpc_channel_args* args,
+    grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
   GPR_ASSERT(strcmp(c->type, "mock") == 0);
   GPR_ASSERT(call_creds != nullptr);
   GPR_ASSERT(strcmp(call_creds->type, GRPC_CALL_CREDENTIALS_TYPE_OAUTH2) == 0);
@@ -438,7 +419,7 @@
 }
 
 static void test_channel_oauth2_composite_creds(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_args* new_args;
   grpc_channel_credentials_vtable vtable = {
       nullptr, check_channel_oauth2_create_security_connector, nullptr};
@@ -452,14 +433,13 @@
   grpc_channel_credentials_release(channel_creds);
   grpc_call_credentials_release(oauth2_creds);
   GPR_ASSERT(grpc_channel_credentials_create_security_connector(
-                 &exec_ctx, channel_oauth2_creds, nullptr, nullptr, nullptr,
-                 &new_args) == GRPC_SECURITY_OK);
+                 channel_oauth2_creds, nullptr, nullptr, nullptr, &new_args) ==
+             GRPC_SECURITY_OK);
   grpc_channel_credentials_release(channel_oauth2_creds);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_oauth2_google_iam_composite_creds(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   expected_md emd[] = {
       {GRPC_AUTHORIZATION_METADATA_KEY, test_oauth2_bearer_token},
       {GRPC_IAM_AUTHORIZATION_TOKEN_METADATA_KEY,
@@ -471,15 +451,15 @@
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
   grpc_call_credentials* oauth2_creds = grpc_md_only_test_credentials_create(
-      &exec_ctx, "authorization", test_oauth2_bearer_token, 0);
+      "authorization", test_oauth2_bearer_token, 0);
   grpc_call_credentials* google_iam_creds = grpc_google_iam_credentials_create(
       test_google_iam_authorization_token, test_google_iam_authority_selector,
       nullptr);
   grpc_call_credentials* composite_creds =
       grpc_composite_call_credentials_create(oauth2_creds, google_iam_creds,
                                              nullptr);
-  grpc_call_credentials_unref(&exec_ctx, oauth2_creds);
-  grpc_call_credentials_unref(&exec_ctx, google_iam_creds);
+  grpc_call_credentials_unref(oauth2_creds);
+  grpc_call_credentials_unref(google_iam_creds);
   GPR_ASSERT(
       strcmp(composite_creds->type, GRPC_CALL_CREDENTIALS_TYPE_COMPOSITE) == 0);
   const grpc_call_credentials_array* creds_array =
@@ -489,17 +469,15 @@
                     GRPC_CALL_CREDENTIALS_TYPE_OAUTH2) == 0);
   GPR_ASSERT(strcmp(creds_array->creds_array[1]->type,
                     GRPC_CALL_CREDENTIALS_TYPE_IAM) == 0);
-  run_request_metadata_test(&exec_ctx, composite_creds, auth_md_ctx, state);
-  grpc_call_credentials_unref(&exec_ctx, composite_creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  run_request_metadata_test(composite_creds, auth_md_ctx, state);
+  grpc_call_credentials_unref(composite_creds);
 }
 
 static grpc_security_status
 check_channel_oauth2_google_iam_create_security_connector(
-    grpc_exec_ctx* exec_ctx, grpc_channel_credentials* c,
-    grpc_call_credentials* call_creds, const char* target,
-    const grpc_channel_args* args, grpc_channel_security_connector** sc,
-    grpc_channel_args** new_args) {
+    grpc_channel_credentials* c, grpc_call_credentials* call_creds,
+    const char* target, const grpc_channel_args* args,
+    grpc_channel_security_connector** sc, grpc_channel_args** new_args) {
   const grpc_call_credentials_array* creds_array;
   GPR_ASSERT(strcmp(c->type, "mock") == 0);
   GPR_ASSERT(call_creds != nullptr);
@@ -514,7 +492,7 @@
 }
 
 static void test_channel_oauth2_google_iam_composite_creds(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_args* new_args;
   grpc_channel_credentials_vtable vtable = {
       nullptr, check_channel_oauth2_google_iam_create_security_connector,
@@ -538,11 +516,10 @@
   grpc_call_credentials_release(google_iam_creds);
 
   GPR_ASSERT(grpc_channel_credentials_create_security_connector(
-                 &exec_ctx, channel_oauth2_iam_creds, nullptr, nullptr, nullptr,
+                 channel_oauth2_iam_creds, nullptr, nullptr, nullptr,
                  &new_args) == GRPC_SECURITY_OK);
 
   grpc_channel_credentials_release(channel_oauth2_iam_creds);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void validate_compute_engine_http_request(
@@ -559,35 +536,32 @@
 }
 
 static int compute_engine_httpcli_get_success_override(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    grpc_millis deadline, grpc_closure* on_done,
-    grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, grpc_millis deadline,
+    grpc_closure* on_done, grpc_httpcli_response* response) {
   validate_compute_engine_http_request(request);
   *response = http_response(200, valid_oauth2_json_response);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static int compute_engine_httpcli_get_failure_override(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    grpc_millis deadline, grpc_closure* on_done,
-    grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, grpc_millis deadline,
+    grpc_closure* on_done, grpc_httpcli_response* response) {
   validate_compute_engine_http_request(request);
   *response = http_response(403, "Not Authorized.");
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static int httpcli_post_should_not_be_called(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    const char* body_bytes, size_t body_size, grpc_millis deadline,
-    grpc_closure* on_done, grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, const char* body_bytes,
+    size_t body_size, grpc_millis deadline, grpc_closure* on_done,
+    grpc_httpcli_response* response) {
   GPR_ASSERT("HTTP POST should not be called" == nullptr);
   return 1;
 }
 
-static int httpcli_get_should_not_be_called(grpc_exec_ctx* exec_ctx,
-                                            const grpc_httpcli_request* request,
+static int httpcli_get_should_not_be_called(const grpc_httpcli_request* request,
                                             grpc_millis deadline,
                                             grpc_closure* on_done,
                                             grpc_httpcli_response* response) {
@@ -596,7 +570,7 @@
 }
 
 static void test_compute_engine_creds_success(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   expected_md emd[] = {
       {"authorization", "Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_"}};
   grpc_call_credentials* creds =
@@ -609,24 +583,23 @@
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   grpc_httpcli_set_override(compute_engine_httpcli_get_success_override,
                             httpcli_post_should_not_be_called);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Second request: the cached token should be served directly. */
   state =
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   grpc_httpcli_set_override(httpcli_get_should_not_be_called,
                             httpcli_post_should_not_be_called);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  grpc_call_credentials_unref(&exec_ctx, creds);
+  grpc_call_credentials_unref(creds);
   grpc_httpcli_set_override(nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_compute_engine_creds_failure(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   request_metadata_state* state = make_request_metadata_state(
       GRPC_ERROR_CREATE_FROM_STATIC_STRING(
           "Error occured when fetching oauth2 token."),
@@ -637,10 +610,9 @@
       grpc_google_compute_engine_credentials_create(nullptr);
   grpc_httpcli_set_override(compute_engine_httpcli_get_failure_override,
                             httpcli_post_should_not_be_called);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_call_credentials_unref(&exec_ctx, creds);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_call_credentials_unref(creds);
   grpc_httpcli_set_override(nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void validate_refresh_token_http_request(
@@ -667,27 +639,27 @@
 }
 
 static int refresh_token_httpcli_post_success(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    const char* body, size_t body_size, grpc_millis deadline,
-    grpc_closure* on_done, grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, const char* body, size_t body_size,
+    grpc_millis deadline, grpc_closure* on_done,
+    grpc_httpcli_response* response) {
   validate_refresh_token_http_request(request, body, body_size);
   *response = http_response(200, valid_oauth2_json_response);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static int refresh_token_httpcli_post_failure(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    const char* body, size_t body_size, grpc_millis deadline,
-    grpc_closure* on_done, grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, const char* body, size_t body_size,
+    grpc_millis deadline, grpc_closure* on_done,
+    grpc_httpcli_response* response) {
   validate_refresh_token_http_request(request, body, body_size);
   *response = http_response(403, "Not Authorized.");
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static void test_refresh_token_creds_success(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   expected_md emd[] = {
       {"authorization", "Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_"}};
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
@@ -700,24 +672,23 @@
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   grpc_httpcli_set_override(httpcli_get_should_not_be_called,
                             refresh_token_httpcli_post_success);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Second request: the cached token should be served directly. */
   state =
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   grpc_httpcli_set_override(httpcli_get_should_not_be_called,
                             httpcli_post_should_not_be_called);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  grpc_call_credentials_unref(&exec_ctx, creds);
+  grpc_call_credentials_unref(creds);
   grpc_httpcli_set_override(nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_refresh_token_creds_failure(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   request_metadata_state* state = make_request_metadata_state(
       GRPC_ERROR_CREATE_FROM_STATIC_STRING(
           "Error occured when fetching oauth2 token."),
@@ -728,10 +699,9 @@
       test_refresh_token_str, nullptr);
   grpc_httpcli_set_override(httpcli_get_should_not_be_called,
                             refresh_token_httpcli_post_failure);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_call_credentials_unref(&exec_ctx, creds);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_call_credentials_unref(creds);
   grpc_httpcli_set_override(nullptr, nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void validate_jwt_encode_and_sign_params(
@@ -821,7 +791,7 @@
 
 static void test_jwt_creds_success(void) {
   char* json_key_string = test_json_key_str();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
   char* expected_md_value;
@@ -835,16 +805,16 @@
   request_metadata_state* state =
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_success);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Second request: the cached token should be served directly. */
   state =
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   grpc_jwt_encode_and_sign_set_override(
       encode_and_sign_jwt_should_not_be_called);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Third request: Different service url so jwt_encode_and_sign should be
      called again (no caching). */
@@ -852,19 +822,18 @@
       make_request_metadata_state(GRPC_ERROR_NONE, emd, GPR_ARRAY_SIZE(emd));
   auth_md_ctx.service_url = other_test_service_url;
   grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_success);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
-  grpc_call_credentials_unref(&exec_ctx, creds);
+  grpc_call_credentials_unref(creds);
   gpr_free(json_key_string);
   gpr_free(expected_md_value);
   grpc_jwt_encode_and_sign_set_override(nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_jwt_creds_signing_failure(void) {
   char* json_key_string = test_json_key_str();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
   request_metadata_state* state = make_request_metadata_state(
@@ -875,12 +844,11 @@
           json_key_string, grpc_max_auth_token_lifetime(), nullptr);
 
   grpc_jwt_encode_and_sign_set_override(encode_and_sign_jwt_failure);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, state);
+  run_request_metadata_test(creds, auth_md_ctx, state);
 
   gpr_free(json_key_string);
-  grpc_call_credentials_unref(&exec_ctx, creds);
+  grpc_call_credentials_unref(creds);
   grpc_jwt_encode_and_sign_set_override(nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void set_google_default_creds_env_var_with_file_contents(
@@ -897,7 +865,7 @@
 }
 
 static void test_google_default_creds_auth_key(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_service_account_jwt_access_credentials* jwt;
   grpc_composite_channel_credentials* creds;
   char* json_key = test_json_key_str();
@@ -913,13 +881,12 @@
       strcmp(jwt->key.client_id,
              "777-abaslkan11hlb6nmim3bpspl31ud.apps.googleusercontent.com") ==
       0);
-  grpc_channel_credentials_unref(&exec_ctx, &creds->base);
+  grpc_channel_credentials_unref(&creds->base);
   gpr_setenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR, ""); /* Reset. */
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void test_google_default_creds_refresh_token(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_google_refresh_token_credentials* refresh;
   grpc_composite_channel_credentials* creds;
   grpc_flush_cached_google_default_credentials();
@@ -931,15 +898,13 @@
   refresh = (grpc_google_refresh_token_credentials*)creds->call_creds;
   GPR_ASSERT(strcmp(refresh->refresh_token.client_id,
                     "32555999999.apps.googleusercontent.com") == 0);
-  grpc_channel_credentials_unref(&exec_ctx, &creds->base);
+  grpc_channel_credentials_unref(&creds->base);
   gpr_setenv(GRPC_GOOGLE_CREDENTIALS_ENV_VAR, ""); /* Reset. */
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static int default_creds_gce_detection_httpcli_get_success_override(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    grpc_millis deadline, grpc_closure* on_done,
-    grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, grpc_millis deadline,
+    grpc_closure* on_done, grpc_httpcli_response* response) {
   *response = http_response(200, "");
   grpc_http_header* headers =
       static_cast<grpc_http_header*>(gpr_malloc(sizeof(*headers) * 1));
@@ -949,14 +914,14 @@
   response->hdrs = headers;
   GPR_ASSERT(strcmp(request->http.path, "/") == 0);
   GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static char* null_well_known_creds_path_getter(void) { return nullptr; }
 
 static void test_google_default_creds_gce(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   expected_md emd[] = {
       {"authorization", "Bearer ya29.AHES6ZRN3-HlhAPya30GnW_bHSb_"}};
   request_metadata_state* state =
@@ -981,8 +946,8 @@
   GPR_ASSERT(creds->call_creds != nullptr);
   grpc_httpcli_set_override(compute_engine_httpcli_get_success_override,
                             httpcli_post_should_not_be_called);
-  run_request_metadata_test(&exec_ctx, creds->call_creds, auth_md_ctx, state);
-  grpc_exec_ctx_flush(&exec_ctx);
+  run_request_metadata_test(creds->call_creds, auth_md_ctx, state);
+  grpc_core::ExecCtx::Get()->Flush();
 
   /* Check that we get a cached creds if we call
      grpc_google_default_credentials_create again.
@@ -994,22 +959,20 @@
   GPR_ASSERT(cached_creds == &creds->base);
 
   /* Cleanup. */
-  grpc_channel_credentials_unref(&exec_ctx, cached_creds);
-  grpc_channel_credentials_unref(&exec_ctx, &creds->base);
+  grpc_channel_credentials_unref(cached_creds);
+  grpc_channel_credentials_unref(&creds->base);
   grpc_httpcli_set_override(nullptr, nullptr);
   grpc_override_well_known_credentials_path_getter(nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static int default_creds_gce_detection_httpcli_get_failure_override(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    grpc_millis deadline, grpc_closure* on_done,
-    grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, grpc_millis deadline,
+    grpc_closure* on_done, grpc_httpcli_response* response) {
   /* No magic header. */
   GPR_ASSERT(strcmp(request->http.path, "/") == 0);
   GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
   *response = http_response(200, "");
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -1093,7 +1056,7 @@
 static void test_metadata_plugin_success(void) {
   plugin_state state = PLUGIN_INITIAL_STATE;
   grpc_metadata_credentials_plugin plugin;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
   request_metadata_state* md_state = make_request_metadata_state(
@@ -1106,17 +1069,17 @@
   grpc_call_credentials* creds =
       grpc_metadata_credentials_create_from_plugin(plugin, nullptr);
   GPR_ASSERT(state == PLUGIN_INITIAL_STATE);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, md_state);
+  run_request_metadata_test(creds, auth_md_ctx, md_state);
   GPR_ASSERT(state == PLUGIN_GET_METADATA_CALLED_STATE);
-  grpc_call_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_call_credentials_unref(creds);
+
   GPR_ASSERT(state == PLUGIN_DESTROY_CALLED_STATE);
 }
 
 static void test_metadata_plugin_failure(void) {
   plugin_state state = PLUGIN_INITIAL_STATE;
   grpc_metadata_credentials_plugin plugin;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_auth_metadata_context auth_md_ctx = {test_service_url, test_method,
                                             nullptr, nullptr};
   char* expected_error;
@@ -1134,10 +1097,10 @@
   grpc_call_credentials* creds =
       grpc_metadata_credentials_create_from_plugin(plugin, nullptr);
   GPR_ASSERT(state == PLUGIN_INITIAL_STATE);
-  run_request_metadata_test(&exec_ctx, creds, auth_md_ctx, md_state);
+  run_request_metadata_test(creds, auth_md_ctx, md_state);
   GPR_ASSERT(state == PLUGIN_GET_METADATA_CALLED_STATE);
-  grpc_call_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_call_credentials_unref(creds);
+
   GPR_ASSERT(state == PLUGIN_DESTROY_CALLED_STATE);
 }
 
@@ -1158,7 +1121,7 @@
 }
 
 static void test_channel_creds_duplicate_without_call_creds(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   grpc_channel_credentials* channel_creds =
       grpc_fake_transport_security_credentials_create();
@@ -1167,23 +1130,21 @@
       grpc_channel_credentials_duplicate_without_call_credentials(
           channel_creds);
   GPR_ASSERT(dup == channel_creds);
-  grpc_channel_credentials_unref(&exec_ctx, dup);
+  grpc_channel_credentials_unref(dup);
 
   grpc_call_credentials* call_creds =
       grpc_access_token_credentials_create("blah", nullptr);
   grpc_channel_credentials* composite_creds =
       grpc_composite_channel_credentials_create(channel_creds, call_creds,
                                                 nullptr);
-  grpc_call_credentials_unref(&exec_ctx, call_creds);
+  grpc_call_credentials_unref(call_creds);
   dup = grpc_channel_credentials_duplicate_without_call_credentials(
       composite_creds);
   GPR_ASSERT(dup == channel_creds);
-  grpc_channel_credentials_unref(&exec_ctx, dup);
+  grpc_channel_credentials_unref(dup);
 
-  grpc_channel_credentials_unref(&exec_ctx, channel_creds);
-  grpc_channel_credentials_unref(&exec_ctx, composite_creds);
-
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_credentials_unref(channel_creds);
+  grpc_channel_credentials_unref(composite_creds);
 }
 
 typedef struct {
diff --git a/test/core/security/json_token_test.cc b/test/core/security/json_token_test.cc
index 0b6ccd5..aac9cc0 100644
--- a/test/core/security/json_token_test.cc
+++ b/test/core/security/json_token_test.cc
@@ -207,7 +207,7 @@
 
 static grpc_json* parse_json_part_from_jwt(const char* str, size_t len,
                                            char** scratchpad) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   char* b64;
   char* decoded;
   grpc_json* json;
@@ -215,7 +215,7 @@
   b64 = static_cast<char*>(gpr_malloc(len + 1));
   strncpy(b64, str, len);
   b64[len] = '\0';
-  slice = grpc_base64_decode(&exec_ctx, b64, 1);
+  slice = grpc_base64_decode(b64, 1);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(slice));
   decoded = static_cast<char*>(gpr_malloc(GRPC_SLICE_LENGTH(slice) + 1));
   strncpy(decoded, (const char*)GRPC_SLICE_START_PTR(slice),
@@ -225,7 +225,7 @@
   gpr_free(b64);
   *scratchpad = decoded;
   grpc_slice_unref(slice);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return json;
 }
 
@@ -327,12 +327,12 @@
 static void check_jwt_signature(const char* b64_signature, RSA* rsa_key,
                                 const char* signed_data,
                                 size_t signed_data_size) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   EVP_MD_CTX* md_ctx = EVP_MD_CTX_create();
   EVP_PKEY* key = EVP_PKEY_new();
 
-  grpc_slice sig = grpc_base64_decode(&exec_ctx, b64_signature, 1);
+  grpc_slice sig = grpc_base64_decode(b64_signature, 1);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(sig));
   GPR_ASSERT(GRPC_SLICE_LENGTH(sig) == 128);
 
@@ -347,11 +347,9 @@
   GPR_ASSERT(EVP_DigestVerifyFinal(md_ctx, GRPC_SLICE_START_PTR(sig),
                                    GRPC_SLICE_LENGTH(sig)) == 1);
 
-  grpc_slice_unref_internal(&exec_ctx, sig);
+  grpc_slice_unref_internal(sig);
   if (key != nullptr) EVP_PKEY_free(key);
   if (md_ctx != nullptr) EVP_MD_CTX_destroy(md_ctx);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static char* service_account_creds_jwt_encode_and_sign(
@@ -485,6 +483,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_parse_json_key_success();
   test_parse_json_key_failure_bad_json();
   test_parse_json_key_failure_no_type();
@@ -499,5 +498,6 @@
   test_parse_refresh_token_failure_no_client_id();
   test_parse_refresh_token_failure_no_client_secret();
   test_parse_refresh_token_failure_no_refresh_token();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/security/jwt_verifier_test.cc b/test/core/security/jwt_verifier_test.cc
index df0ebe5..e219260 100644
--- a/test/core/security/jwt_verifier_test.cc
+++ b/test/core/security/jwt_verifier_test.cc
@@ -209,8 +209,8 @@
   grpc_json* json = grpc_json_parse_string_with_len(
       (char*)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s));
   GPR_ASSERT(json != nullptr);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  claims = grpc_jwt_claims_from_json(&exec_ctx, json, s);
+  grpc_core::ExecCtx exec_ctx;
+  claims = grpc_jwt_claims_from_json(json, s);
   GPR_ASSERT(claims != nullptr);
   GPR_ASSERT(grpc_jwt_claims_json(claims) == json);
   GPR_ASSERT(strcmp(grpc_jwt_claims_audience(claims), "https://foo.com") == 0);
@@ -219,8 +219,7 @@
   GPR_ASSERT(strcmp(grpc_jwt_claims_id(claims), "jwtuniqueid") == 0);
   GPR_ASSERT(grpc_jwt_claims_check(claims, "https://foo.com") ==
              GRPC_JWT_VERIFIER_OK);
-  grpc_jwt_claims_destroy(&exec_ctx, claims);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_claims_destroy(claims);
 }
 
 static void test_expired_claims_failure(void) {
@@ -232,8 +231,8 @@
   gpr_timespec exp_exp = {120, 0, GPR_CLOCK_REALTIME};
   gpr_timespec exp_nbf = {60, 0, GPR_CLOCK_REALTIME};
   GPR_ASSERT(json != nullptr);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  claims = grpc_jwt_claims_from_json(&exec_ctx, json, s);
+  grpc_core::ExecCtx exec_ctx;
+  claims = grpc_jwt_claims_from_json(json, s);
   GPR_ASSERT(claims != nullptr);
   GPR_ASSERT(grpc_jwt_claims_json(claims) == json);
   GPR_ASSERT(strcmp(grpc_jwt_claims_audience(claims), "https://foo.com") == 0);
@@ -246,17 +245,15 @@
 
   GPR_ASSERT(grpc_jwt_claims_check(claims, "https://foo.com") ==
              GRPC_JWT_VERIFIER_TIME_CONSTRAINT_FAILURE);
-  grpc_jwt_claims_destroy(&exec_ctx, claims);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_claims_destroy(claims);
 }
 
 static void test_invalid_claims_failure(void) {
   grpc_slice s = grpc_slice_from_copied_string(invalid_claims);
   grpc_json* json = grpc_json_parse_string_with_len(
       (char*)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GPR_ASSERT(grpc_jwt_claims_from_json(&exec_ctx, json, s) == nullptr);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GPR_ASSERT(grpc_jwt_claims_from_json(json, s) == nullptr);
 }
 
 static void test_bad_audience_claims_failure(void) {
@@ -265,13 +262,12 @@
   grpc_json* json = grpc_json_parse_string_with_len(
       (char*)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s));
   GPR_ASSERT(json != nullptr);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  claims = grpc_jwt_claims_from_json(&exec_ctx, json, s);
+  grpc_core::ExecCtx exec_ctx;
+  claims = grpc_jwt_claims_from_json(json, s);
   GPR_ASSERT(claims != nullptr);
   GPR_ASSERT(grpc_jwt_claims_check(claims, "https://bar.com") ==
              GRPC_JWT_VERIFIER_BAD_AUDIENCE);
-  grpc_jwt_claims_destroy(&exec_ctx, claims);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_claims_destroy(claims);
 }
 
 static void test_bad_subject_claims_failure(void) {
@@ -280,13 +276,12 @@
   grpc_json* json = grpc_json_parse_string_with_len(
       (char*)GRPC_SLICE_START_PTR(s), GRPC_SLICE_LENGTH(s));
   GPR_ASSERT(json != nullptr);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  claims = grpc_jwt_claims_from_json(&exec_ctx, json, s);
+  grpc_core::ExecCtx exec_ctx;
+  claims = grpc_jwt_claims_from_json(json, s);
   GPR_ASSERT(claims != nullptr);
   GPR_ASSERT(grpc_jwt_claims_check(claims, "https://foo.com") ==
              GRPC_JWT_VERIFIER_BAD_SUBJECT);
-  grpc_jwt_claims_destroy(&exec_ctx, claims);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_claims_destroy(claims);
 }
 
 static char* json_key_str(const char* last_part) {
@@ -323,17 +318,16 @@
 }
 
 static int httpcli_post_should_not_be_called(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    const char* body_bytes, size_t body_size, grpc_millis deadline,
-    grpc_closure* on_done, grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, const char* body_bytes,
+    size_t body_size, grpc_millis deadline, grpc_closure* on_done,
+    grpc_httpcli_response* response) {
   GPR_ASSERT("HTTP POST should not be called" == nullptr);
   return 1;
 }
 
 static int httpcli_get_google_keys_for_email(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    grpc_millis deadline, grpc_closure* on_done,
-    grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, grpc_millis deadline,
+    grpc_closure* on_done, grpc_httpcli_response* response) {
   *response = http_response(200, good_google_email_keys());
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
   GPR_ASSERT(strcmp(request->host, "www.googleapis.com") == 0);
@@ -341,22 +335,22 @@
                     "/robot/v1/metadata/x509/"
                     "777-abaslkan11hlb6nmim3bpspl31ud@developer."
                     "gserviceaccount.com") == 0);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
-static void on_verification_success(grpc_exec_ctx* exec_ctx, void* user_data,
+static void on_verification_success(void* user_data,
                                     grpc_jwt_verifier_status status,
                                     grpc_jwt_claims* claims) {
   GPR_ASSERT(status == GRPC_JWT_VERIFIER_OK);
   GPR_ASSERT(claims != nullptr);
   GPR_ASSERT(user_data == (void*)expected_user_data);
   GPR_ASSERT(strcmp(grpc_jwt_claims_audience(claims), expected_audience) == 0);
-  grpc_jwt_claims_destroy(exec_ctx, claims);
+  grpc_jwt_claims_destroy(claims);
 }
 
 static void test_jwt_verifier_google_email_issuer_success(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(nullptr, 0);
   char* jwt = nullptr;
   char* key_str = json_key_str(json_key_str_part3_for_google_email_issuer);
@@ -369,28 +363,27 @@
                                  nullptr);
   grpc_auth_json_key_destruct(&key);
   GPR_ASSERT(jwt != nullptr);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, jwt, expected_audience,
+  grpc_jwt_verifier_verify(verifier, nullptr, jwt, expected_audience,
                            on_verification_success, (void*)expected_user_data);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(jwt);
   grpc_httpcli_set_override(nullptr, nullptr);
 }
 
 static int httpcli_get_custom_keys_for_email(
-    grpc_exec_ctx* exec_ctx, const grpc_httpcli_request* request,
-    grpc_millis deadline, grpc_closure* on_done,
-    grpc_httpcli_response* response) {
+    const grpc_httpcli_request* request, grpc_millis deadline,
+    grpc_closure* on_done, grpc_httpcli_response* response) {
   *response = http_response(200, gpr_strdup(good_jwk_set));
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
   GPR_ASSERT(strcmp(request->host, "keys.bar.com") == 0);
   GPR_ASSERT(strcmp(request->http.path, "/jwk/foo@bar.com") == 0);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static void test_jwt_verifier_custom_email_issuer_success(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(&custom_mapping, 1);
   char* jwt = nullptr;
   char* key_str = json_key_str(json_key_str_part3_for_custom_email_issuer);
@@ -403,28 +396,26 @@
                                  nullptr);
   grpc_auth_json_key_destruct(&key);
   GPR_ASSERT(jwt != nullptr);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, jwt, expected_audience,
+  grpc_jwt_verifier_verify(verifier, nullptr, jwt, expected_audience,
                            on_verification_success, (void*)expected_user_data);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(jwt);
   grpc_httpcli_set_override(nullptr, nullptr);
 }
 
-static int httpcli_get_jwk_set(grpc_exec_ctx* exec_ctx,
-                               const grpc_httpcli_request* request,
+static int httpcli_get_jwk_set(const grpc_httpcli_request* request,
                                grpc_millis deadline, grpc_closure* on_done,
                                grpc_httpcli_response* response) {
   *response = http_response(200, gpr_strdup(good_jwk_set));
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
   GPR_ASSERT(strcmp(request->host, "www.googleapis.com") == 0);
   GPR_ASSERT(strcmp(request->http.path, "/oauth2/v3/certs") == 0);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
-static int httpcli_get_openid_config(grpc_exec_ctx* exec_ctx,
-                                     const grpc_httpcli_request* request,
+static int httpcli_get_openid_config(const grpc_httpcli_request* request,
                                      grpc_millis deadline,
                                      grpc_closure* on_done,
                                      grpc_httpcli_response* response) {
@@ -434,12 +425,12 @@
   GPR_ASSERT(strcmp(request->http.path, GRPC_OPENID_CONFIG_URL_SUFFIX) == 0);
   grpc_httpcli_set_override(httpcli_get_jwk_set,
                             httpcli_post_should_not_be_called);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static void test_jwt_verifier_url_issuer_success(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(nullptr, 0);
   char* jwt = nullptr;
   char* key_str = json_key_str(json_key_str_part3_for_url_issuer);
@@ -452,16 +443,15 @@
                                  nullptr);
   grpc_auth_json_key_destruct(&key);
   GPR_ASSERT(jwt != nullptr);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, jwt, expected_audience,
+  grpc_jwt_verifier_verify(verifier, nullptr, jwt, expected_audience,
                            on_verification_success, (void*)expected_user_data);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(jwt);
   grpc_httpcli_set_override(nullptr, nullptr);
 }
 
-static void on_verification_key_retrieval_error(grpc_exec_ctx* exec_ctx,
-                                                void* user_data,
+static void on_verification_key_retrieval_error(void* user_data,
                                                 grpc_jwt_verifier_status status,
                                                 grpc_jwt_claims* claims) {
   GPR_ASSERT(status == GRPC_JWT_VERIFIER_KEY_RETRIEVAL_ERROR);
@@ -469,18 +459,17 @@
   GPR_ASSERT(user_data == (void*)expected_user_data);
 }
 
-static int httpcli_get_bad_json(grpc_exec_ctx* exec_ctx,
-                                const grpc_httpcli_request* request,
+static int httpcli_get_bad_json(const grpc_httpcli_request* request,
                                 grpc_millis deadline, grpc_closure* on_done,
                                 grpc_httpcli_response* response) {
   *response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}"));
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
 static void test_jwt_verifier_url_issuer_bad_config(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(nullptr, 0);
   char* jwt = nullptr;
   char* key_str = json_key_str(json_key_str_part3_for_url_issuer);
@@ -493,17 +482,17 @@
                                  nullptr);
   grpc_auth_json_key_destruct(&key);
   GPR_ASSERT(jwt != nullptr);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, jwt, expected_audience,
+  grpc_jwt_verifier_verify(verifier, nullptr, jwt, expected_audience,
                            on_verification_key_retrieval_error,
                            (void*)expected_user_data);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(jwt);
   grpc_httpcli_set_override(nullptr, nullptr);
 }
 
 static void test_jwt_verifier_bad_json_key(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(nullptr, 0);
   char* jwt = nullptr;
   char* key_str = json_key_str(json_key_str_part3_for_google_email_issuer);
@@ -516,11 +505,11 @@
                                  nullptr);
   grpc_auth_json_key_destruct(&key);
   GPR_ASSERT(jwt != nullptr);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, jwt, expected_audience,
+  grpc_jwt_verifier_verify(verifier, nullptr, jwt, expected_audience,
                            on_verification_key_retrieval_error,
                            (void*)expected_user_data);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(jwt);
   grpc_httpcli_set_override(nullptr, nullptr);
 }
@@ -532,9 +521,8 @@
   char* last_dot = strrchr(jwt, '.');
   GPR_ASSERT(last_dot != nullptr);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    sig = grpc_base64_decode(&exec_ctx, last_dot + 1, 1);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    sig = grpc_base64_decode(last_dot + 1, 1);
   }
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(sig));
   sig_bytes = GRPC_SLICE_START_PTR(sig);
@@ -546,8 +534,7 @@
   grpc_slice_unref(sig);
 }
 
-static void on_verification_bad_signature(grpc_exec_ctx* exec_ctx,
-                                          void* user_data,
+static void on_verification_bad_signature(void* user_data,
                                           grpc_jwt_verifier_status status,
                                           grpc_jwt_claims* claims) {
   GPR_ASSERT(status == GRPC_JWT_VERIFIER_BAD_SIGNATURE);
@@ -556,7 +543,7 @@
 }
 
 static void test_jwt_verifier_bad_signature(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(nullptr, 0);
   char* jwt = nullptr;
   char* key_str = json_key_str(json_key_str_part3_for_url_issuer);
@@ -570,17 +557,16 @@
   grpc_auth_json_key_destruct(&key);
   corrupt_jwt_sig(jwt);
   GPR_ASSERT(jwt != nullptr);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, jwt, expected_audience,
+  grpc_jwt_verifier_verify(verifier, nullptr, jwt, expected_audience,
                            on_verification_bad_signature,
                            (void*)expected_user_data);
   gpr_free(jwt);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   grpc_httpcli_set_override(nullptr, nullptr);
 }
 
-static int httpcli_get_should_not_be_called(grpc_exec_ctx* exec_ctx,
-                                            const grpc_httpcli_request* request,
+static int httpcli_get_should_not_be_called(const grpc_httpcli_request* request,
                                             grpc_millis deadline,
                                             grpc_closure* on_done,
                                             grpc_httpcli_response* response) {
@@ -588,7 +574,7 @@
   return 1;
 }
 
-static void on_verification_bad_format(grpc_exec_ctx* exec_ctx, void* user_data,
+static void on_verification_bad_format(void* user_data,
                                        grpc_jwt_verifier_status status,
                                        grpc_jwt_claims* claims) {
   GPR_ASSERT(status == GRPC_JWT_VERIFIER_BAD_FORMAT);
@@ -597,15 +583,15 @@
 }
 
 static void test_jwt_verifier_bad_format(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_jwt_verifier* verifier = grpc_jwt_verifier_create(nullptr, 0);
   grpc_httpcli_set_override(httpcli_get_should_not_be_called,
                             httpcli_post_should_not_be_called);
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, nullptr, "bad jwt",
-                           expected_audience, on_verification_bad_format,
+  grpc_jwt_verifier_verify(verifier, nullptr, "bad jwt", expected_audience,
+                           on_verification_bad_format,
                            (void*)expected_user_data);
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+  grpc_core::ExecCtx::Get()->Flush();
   grpc_httpcli_set_override(nullptr, nullptr);
 }
 
diff --git a/test/core/security/oauth2_utils.cc b/test/core/security/oauth2_utils.cc
index 602041e..0d3a127 100644
--- a/test/core/security/oauth2_utils.cc
+++ b/test/core/security/oauth2_utils.cc
@@ -39,8 +39,7 @@
   grpc_closure closure;
 } oauth2_request;
 
-static void on_oauth2_response(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error) {
+static void on_oauth2_response(void* arg, grpc_error* error) {
   oauth2_request* request = (oauth2_request*)arg;
   char* token = nullptr;
   grpc_slice token_slice;
@@ -54,25 +53,23 @@
            GRPC_SLICE_LENGTH(token_slice));
     token[GRPC_SLICE_LENGTH(token_slice)] = '\0';
   }
-  grpc_credentials_mdelem_array_destroy(exec_ctx, &request->md_array);
+  grpc_credentials_mdelem_array_destroy(&request->md_array);
   gpr_mu_lock(request->mu);
   request->is_done = true;
   request->token = token;
   GRPC_LOG_IF_ERROR(
       "pollset_kick",
-      grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&request->pops),
-                        nullptr));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&request->pops), nullptr));
   gpr_mu_unlock(request->mu);
 }
 
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* unused,
-                       grpc_error* error) {}
+static void do_nothing(void* unused, grpc_error* error) {}
 
 char* grpc_test_fetch_oauth2_token_with_credentials(
     grpc_call_credentials* creds) {
   oauth2_request request;
   memset(&request, 0, sizeof(request));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_closure do_nothing_closure;
   grpc_auth_metadata_context null_ctx = {"", "", nullptr, nullptr};
 
@@ -88,31 +85,30 @@
                     grpc_schedule_on_exec_ctx);
 
   grpc_error* error = GRPC_ERROR_NONE;
-  if (grpc_call_credentials_get_request_metadata(
-          &exec_ctx, creds, &request.pops, null_ctx, &request.md_array,
-          &request.closure, &error)) {
+  if (grpc_call_credentials_get_request_metadata(creds, &request.pops, null_ctx,
+                                                 &request.md_array,
+                                                 &request.closure, &error)) {
     // Synchronous result; invoke callback directly.
-    on_oauth2_response(&exec_ctx, &request, error);
+    on_oauth2_response(&request, error);
     GRPC_ERROR_UNREF(error);
   }
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
 
   gpr_mu_lock(request.mu);
   while (!request.is_done) {
     grpc_pollset_worker* worker = nullptr;
     if (!GRPC_LOG_IF_ERROR(
             "pollset_work",
-            grpc_pollset_work(&exec_ctx,
-                              grpc_polling_entity_pollset(&request.pops),
+            grpc_pollset_work(grpc_polling_entity_pollset(&request.pops),
                               &worker, GRPC_MILLIS_INF_FUTURE))) {
       request.is_done = true;
     }
   }
   gpr_mu_unlock(request.mu);
 
-  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&request.pops),
+  grpc_pollset_shutdown(grpc_polling_entity_pollset(&request.pops),
                         &do_nothing_closure);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   gpr_free(grpc_polling_entity_pollset(&request.pops));
   return request.token;
 }
diff --git a/test/core/security/print_google_default_creds_token.cc b/test/core/security/print_google_default_creds_token.cc
index f4acf02..b3742f5 100644
--- a/test/core/security/print_google_default_creds_token.cc
+++ b/test/core/security/print_google_default_creds_token.cc
@@ -41,8 +41,7 @@
   grpc_closure on_request_metadata;
 } synchronizer;
 
-static void on_metadata_response(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void on_metadata_response(void* arg, grpc_error* error) {
   synchronizer* sync = static_cast<synchronizer*>(arg);
   if (error != GRPC_ERROR_NONE) {
     fprintf(stderr, "Fetching token failed: %s\n", grpc_error_string(error));
@@ -57,14 +56,13 @@
   sync->is_done = true;
   GRPC_LOG_IF_ERROR(
       "pollset_kick",
-      grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&sync->pops),
-                        nullptr));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&sync->pops), nullptr));
   gpr_mu_unlock(sync->mu);
 }
 
 int main(int argc, char** argv) {
   int result = 0;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   synchronizer sync;
   grpc_channel_credentials* creds = nullptr;
   const char* service_url = "https://test.foo.google.com/Foo";
@@ -97,11 +95,10 @@
 
   error = GRPC_ERROR_NONE;
   if (grpc_call_credentials_get_request_metadata(
-          &exec_ctx, ((grpc_composite_channel_credentials*)creds)->call_creds,
-          &sync.pops, context, &sync.md_array, &sync.on_request_metadata,
-          &error)) {
+          ((grpc_composite_channel_credentials*)creds)->call_creds, &sync.pops,
+          context, &sync.md_array, &sync.on_request_metadata, &error)) {
     // Synchronous response.  Invoke callback directly.
-    on_metadata_response(&exec_ctx, &sync, error);
+    on_metadata_response(&sync, error);
     GRPC_ERROR_UNREF(error);
   }
 
@@ -110,18 +107,15 @@
     grpc_pollset_worker* worker = nullptr;
     if (!GRPC_LOG_IF_ERROR(
             "pollset_work",
-            grpc_pollset_work(&exec_ctx,
-                              grpc_polling_entity_pollset(&sync.pops), &worker,
+            grpc_pollset_work(grpc_polling_entity_pollset(&sync.pops), &worker,
                               GRPC_MILLIS_INF_FUTURE)))
       sync.is_done = true;
     gpr_mu_unlock(sync.mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(sync.mu);
   }
   gpr_mu_unlock(sync.mu);
 
-  grpc_exec_ctx_finish(&exec_ctx);
-
   grpc_channel_credentials_release(creds);
   gpr_free(grpc_polling_entity_pollset(&sync.pops));
 
diff --git a/test/core/security/secure_endpoint_test.cc b/test/core/security/secure_endpoint_test.cc
index a12af02..38c78fe 100644
--- a/test/core/security/secure_endpoint_test.cc
+++ b/test/core/security/secure_endpoint_test.cc
@@ -38,7 +38,7 @@
 static grpc_endpoint_test_fixture secure_endpoint_create_fixture_tcp_socketpair(
     size_t slice_size, grpc_slice* leftover_slices, size_t leftover_nslices,
     bool use_zero_copy_protector) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   tsi_frame_protector* fake_read_protector =
       tsi_create_fake_frame_protector(nullptr);
   tsi_frame_protector* fake_write_protector =
@@ -60,8 +60,8 @@
   a[0].value.integer = (int)slice_size;
   grpc_channel_args args = {GPR_ARRAY_SIZE(a), a};
   tcp = grpc_iomgr_create_endpoint_pair("fixture", &args);
-  grpc_endpoint_add_to_pollset(&exec_ctx, tcp.client, g_pollset);
-  grpc_endpoint_add_to_pollset(&exec_ctx, tcp.server, g_pollset);
+  grpc_endpoint_add_to_pollset(tcp.client, g_pollset);
+  grpc_endpoint_add_to_pollset(tcp.server, g_pollset);
 
   if (leftover_nslices == 0) {
     f.client_ep = grpc_secure_endpoint_create(fake_read_protector,
@@ -117,7 +117,7 @@
   f.server_ep = grpc_secure_endpoint_create(fake_write_protector,
                                             fake_write_zero_copy_protector,
                                             tcp.server, nullptr, 0);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   return f;
 }
 
@@ -165,65 +165,62 @@
      clean_up},
 };
 
-static void inc_call_ctr(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
-  ++*(int*)arg;
-}
+static void inc_call_ctr(void* arg, grpc_error* error) { ++*(int*)arg; }
 
 static void test_leftover(grpc_endpoint_test_config config, size_t slice_size) {
   grpc_endpoint_test_fixture f = config.create_fixture(slice_size);
   grpc_slice_buffer incoming;
   grpc_slice s =
       grpc_slice_from_copied_string("hello world 12345678900987654321");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   int n = 0;
   grpc_closure done_closure;
   gpr_log(GPR_INFO, "Start test left over");
 
   grpc_slice_buffer_init(&incoming);
   GRPC_CLOSURE_INIT(&done_closure, inc_call_ctr, &n, grpc_schedule_on_exec_ctx);
-  grpc_endpoint_read(&exec_ctx, f.client_ep, &incoming, &done_closure);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_endpoint_read(f.client_ep, &incoming, &done_closure);
+
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(n == 1);
   GPR_ASSERT(incoming.count == 1);
   GPR_ASSERT(grpc_slice_eq(s, incoming.slices[0]));
 
   grpc_endpoint_shutdown(
-      &exec_ctx, f.client_ep,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("test_leftover end"));
+      f.client_ep, GRPC_ERROR_CREATE_FROM_STATIC_STRING("test_leftover end"));
   grpc_endpoint_shutdown(
-      &exec_ctx, f.server_ep,
-      GRPC_ERROR_CREATE_FROM_STATIC_STRING("test_leftover end"));
-  grpc_endpoint_destroy(&exec_ctx, f.client_ep);
-  grpc_endpoint_destroy(&exec_ctx, f.server_ep);
-  grpc_exec_ctx_finish(&exec_ctx);
-  grpc_slice_unref_internal(&exec_ctx, s);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &incoming);
+      f.server_ep, GRPC_ERROR_CREATE_FROM_STATIC_STRING("test_leftover end"));
+  grpc_endpoint_destroy(f.client_ep);
+  grpc_endpoint_destroy(f.server_ep);
+
+  grpc_slice_unref_internal(s);
+  grpc_slice_buffer_destroy_internal(&incoming);
 
   clean_up();
 }
 
-static void destroy_pollset(grpc_exec_ctx* exec_ctx, void* p,
-                            grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, (grpc_pollset*)p);
+static void destroy_pollset(void* p, grpc_error* error) {
+  grpc_pollset_destroy((grpc_pollset*)p);
 }
 
 int main(int argc, char** argv) {
   grpc_closure destroyed;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_test_init(argc, argv);
-
   grpc_init();
-  g_pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
-  grpc_pollset_init(g_pollset, &g_mu);
-  grpc_endpoint_tests(configs[0], g_pollset, g_mu);
-  grpc_endpoint_tests(configs[1], g_pollset, g_mu);
-  test_leftover(configs[2], 1);
-  test_leftover(configs[3], 1);
-  GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
-                    grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
-  grpc_exec_ctx_finish(&exec_ctx);
+
+  {
+    grpc_core::ExecCtx exec_ctx;
+    g_pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+    grpc_pollset_init(g_pollset, &g_mu);
+    grpc_endpoint_tests(configs[0], g_pollset, g_mu);
+    grpc_endpoint_tests(configs[1], g_pollset, g_mu);
+    test_leftover(configs[2], 1);
+    test_leftover(configs[3], 1);
+    GRPC_CLOSURE_INIT(&destroyed, destroy_pollset, g_pollset,
+                      grpc_schedule_on_exec_ctx);
+    grpc_pollset_shutdown(g_pollset, &destroyed);
+  }
+
   grpc_shutdown();
 
   gpr_free(g_pollset);
diff --git a/test/core/security/ssl_server_fuzzer.cc b/test/core/security/ssl_server_fuzzer.cc
index bbb2f60..6e30698 100644
--- a/test/core/security/ssl_server_fuzzer.cc
+++ b/test/core/security/ssl_server_fuzzer.cc
@@ -40,8 +40,7 @@
   bool done_callback_called;
 };
 
-static void on_handshake_done(grpc_exec_ctx* exec_ctx, void* arg,
-                              grpc_error* error) {
+static void on_handshake_done(void* arg, grpc_error* error) {
   grpc_handshaker_args* args = static_cast<grpc_handshaker_args*>(arg);
   struct handshake_state* state =
       static_cast<struct handshake_state*>(args->user_data);
@@ -56,66 +55,70 @@
   if (squelch) gpr_set_log_function(dont_log);
   if (leak_check) grpc_memory_counters_init();
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  {
+    grpc_core::ExecCtx exec_ctx;
 
-  grpc_resource_quota* resource_quota =
-      grpc_resource_quota_create("ssl_server_fuzzer");
-  grpc_endpoint* mock_endpoint =
-      grpc_mock_endpoint_create(discard_write, resource_quota);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
+    grpc_resource_quota* resource_quota =
+        grpc_resource_quota_create("ssl_server_fuzzer");
+    grpc_endpoint* mock_endpoint =
+        grpc_mock_endpoint_create(discard_write, resource_quota);
+    grpc_resource_quota_unref_internal(resource_quota);
 
-  grpc_mock_endpoint_put_read(
-      &exec_ctx, mock_endpoint,
-      grpc_slice_from_copied_buffer((const char*)data, size));
+    grpc_mock_endpoint_put_read(
+        mock_endpoint, grpc_slice_from_copied_buffer((const char*)data, size));
 
-  // Load key pair and establish server SSL credentials.
-  grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
-  grpc_slice ca_slice, cert_slice, key_slice;
-  ca_slice = grpc_slice_from_static_string(test_root_cert);
-  cert_slice = grpc_slice_from_static_string(test_server1_cert);
-  key_slice = grpc_slice_from_static_string(test_server1_key);
-  const char* ca_cert = (const char*)GRPC_SLICE_START_PTR(ca_slice);
-  pem_key_cert_pair.private_key = (const char*)GRPC_SLICE_START_PTR(key_slice);
-  pem_key_cert_pair.cert_chain = (const char*)GRPC_SLICE_START_PTR(cert_slice);
-  grpc_server_credentials* creds = grpc_ssl_server_credentials_create(
-      ca_cert, &pem_key_cert_pair, 1, 0, nullptr);
+    // Load key pair and establish server SSL credentials.
+    grpc_ssl_pem_key_cert_pair pem_key_cert_pair;
+    grpc_slice ca_slice, cert_slice, key_slice;
+    ca_slice = grpc_slice_from_static_string(test_root_cert);
+    cert_slice = grpc_slice_from_static_string(test_server1_cert);
+    key_slice = grpc_slice_from_static_string(test_server1_key);
+    const char* ca_cert = (const char*)GRPC_SLICE_START_PTR(ca_slice);
+    pem_key_cert_pair.private_key =
+        (const char*)GRPC_SLICE_START_PTR(key_slice);
+    pem_key_cert_pair.cert_chain =
+        (const char*)GRPC_SLICE_START_PTR(cert_slice);
+    grpc_server_credentials* creds = grpc_ssl_server_credentials_create(
+        ca_cert, &pem_key_cert_pair, 1, 0, nullptr);
 
-  // Create security connector
-  grpc_server_security_connector* sc = nullptr;
-  grpc_security_status status =
-      grpc_server_credentials_create_security_connector(&exec_ctx, creds, &sc);
-  GPR_ASSERT(status == GRPC_SECURITY_OK);
-  grpc_millis deadline = GPR_MS_PER_SEC + grpc_exec_ctx_now(&exec_ctx);
+    // Create security connector
+    grpc_server_security_connector* sc = nullptr;
+    grpc_security_status status =
+        grpc_server_credentials_create_security_connector(creds, &sc);
+    GPR_ASSERT(status == GRPC_SECURITY_OK);
+    grpc_millis deadline = GPR_MS_PER_SEC + grpc_core::ExecCtx::Get()->Now();
 
-  struct handshake_state state;
-  state.done_callback_called = false;
-  grpc_handshake_manager* handshake_mgr = grpc_handshake_manager_create();
-  grpc_server_security_connector_add_handshakers(&exec_ctx, sc, handshake_mgr);
-  grpc_handshake_manager_do_handshake(
-      &exec_ctx, handshake_mgr, mock_endpoint, nullptr /* channel_args */,
-      deadline, nullptr /* acceptor */, on_handshake_done, &state);
-  grpc_exec_ctx_flush(&exec_ctx);
+    struct handshake_state state;
+    state.done_callback_called = false;
+    grpc_handshake_manager* handshake_mgr = grpc_handshake_manager_create();
+    grpc_server_security_connector_add_handshakers(sc, handshake_mgr);
+    grpc_handshake_manager_do_handshake(
+        handshake_mgr, nullptr /* interested_parties */, mock_endpoint,
+        nullptr /* channel_args */, deadline, nullptr /* acceptor */,
+        on_handshake_done, &state);
+    grpc_core::ExecCtx::Get()->Flush();
 
-  // If the given string happens to be part of the correct client hello, the
-  // server will wait for more data. Explicitly fail the server by shutting down
-  // the endpoint.
-  if (!state.done_callback_called) {
-    grpc_endpoint_shutdown(
-        &exec_ctx, mock_endpoint,
-        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Explicit close"));
-    grpc_exec_ctx_flush(&exec_ctx);
+    // If the given string happens to be part of the correct client hello, the
+    // server will wait for more data. Explicitly fail the server by shutting
+    // down the endpoint.
+    if (!state.done_callback_called) {
+      grpc_endpoint_shutdown(
+          mock_endpoint,
+          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Explicit close"));
+      grpc_core::ExecCtx::Get()->Flush();
+    }
+
+    GPR_ASSERT(state.done_callback_called);
+
+    grpc_handshake_manager_destroy(handshake_mgr);
+    GRPC_SECURITY_CONNECTOR_UNREF(&sc->base, "test");
+    grpc_server_credentials_release(creds);
+    grpc_slice_unref(cert_slice);
+    grpc_slice_unref(key_slice);
+    grpc_slice_unref(ca_slice);
+    grpc_core::ExecCtx::Get()->Flush();
   }
 
-  GPR_ASSERT(state.done_callback_called);
-
-  grpc_handshake_manager_destroy(&exec_ctx, handshake_mgr);
-  GRPC_SECURITY_CONNECTOR_UNREF(&exec_ctx, &sc->base, "test");
-  grpc_server_credentials_release(creds);
-  grpc_slice_unref(cert_slice);
-  grpc_slice_unref(key_slice);
-  grpc_slice_unref(ca_slice);
-  grpc_exec_ctx_flush(&exec_ctx);
-
   grpc_shutdown();
   if (leak_check) {
     counters = grpc_memory_counters_snapshot();
diff --git a/test/core/security/verify_jwt.cc b/test/core/security/verify_jwt.cc
index 787d58b..e039970 100644
--- a/test/core/security/verify_jwt.cc
+++ b/test/core/security/verify_jwt.cc
@@ -44,7 +44,7 @@
   exit(1);
 }
 
-static void on_jwt_verification_done(grpc_exec_ctx* exec_ctx, void* user_data,
+static void on_jwt_verification_done(void* user_data,
                                      grpc_jwt_verifier_status status,
                                      grpc_jwt_claims* claims) {
   synchronizer* sync = static_cast<synchronizer*>(user_data);
@@ -57,7 +57,7 @@
         grpc_json_dump_to_string((grpc_json*)grpc_jwt_claims_json(claims), 2);
     printf("Claims: \n\n%s\n", claims_str);
     gpr_free(claims_str);
-    grpc_jwt_claims_destroy(exec_ctx, claims);
+    grpc_jwt_claims_destroy(claims);
   } else {
     GPR_ASSERT(claims == nullptr);
     fprintf(stderr, "Verification failed with error %s\n",
@@ -66,8 +66,7 @@
 
   gpr_mu_lock(sync->mu);
   sync->is_done = 1;
-  GRPC_LOG_IF_ERROR("pollset_kick",
-                    grpc_pollset_kick(exec_ctx, sync->pollset, nullptr));
+  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(sync->pollset, nullptr));
   gpr_mu_unlock(sync->mu);
 }
 
@@ -77,7 +76,7 @@
   gpr_cmdline* cl;
   const char* jwt = nullptr;
   const char* aud = nullptr;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   grpc_init();
   cl = gpr_cmdline_create("JWT verifier tool");
@@ -96,26 +95,26 @@
   grpc_pollset_init(sync.pollset, &sync.mu);
   sync.is_done = 0;
 
-  grpc_jwt_verifier_verify(&exec_ctx, verifier, sync.pollset, jwt, aud,
+  grpc_jwt_verifier_verify(verifier, sync.pollset, jwt, aud,
                            on_jwt_verification_done, &sync);
 
   gpr_mu_lock(sync.mu);
   while (!sync.is_done) {
     grpc_pollset_worker* worker = nullptr;
-    if (!GRPC_LOG_IF_ERROR("pollset_work",
-                           grpc_pollset_work(&exec_ctx, sync.pollset, &worker,
-                                             GRPC_MILLIS_INF_FUTURE)))
+    if (!GRPC_LOG_IF_ERROR(
+            "pollset_work",
+            grpc_pollset_work(sync.pollset, &worker, GRPC_MILLIS_INF_FUTURE)))
       sync.is_done = true;
     gpr_mu_unlock(sync.mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(sync.mu);
   }
   gpr_mu_unlock(sync.mu);
 
   gpr_free(sync.pollset);
 
-  grpc_jwt_verifier_destroy(&exec_ctx, verifier);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_jwt_verifier_destroy(verifier);
+
   gpr_cmdline_destroy(cl);
   grpc_shutdown();
   return !sync.success;
diff --git a/test/core/slice/b64_test.cc b/test/core/slice/b64_test.cc
index 479198f..94785fd 100644
--- a/test/core/slice/b64_test.cc
+++ b/test/core/slice/b64_test.cc
@@ -20,6 +20,7 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/slice.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
@@ -44,14 +45,14 @@
   const char* hello = "hello";
   char* hello_b64 =
       grpc_base64_encode(hello, strlen(hello), url_safe, multiline);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice hello_slice = grpc_base64_decode(&exec_ctx, hello_b64, url_safe);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice hello_slice = grpc_base64_decode(hello_b64, url_safe);
   GPR_ASSERT(GRPC_SLICE_LENGTH(hello_slice) == strlen(hello));
   GPR_ASSERT(strncmp((const char*)GRPC_SLICE_START_PTR(hello_slice), hello,
                      GRPC_SLICE_LENGTH(hello_slice)) == 0);
 
-  grpc_slice_unref_internal(&exec_ctx, hello_slice);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_unref_internal(hello_slice);
+
   gpr_free(hello_b64);
 }
 
@@ -64,15 +65,14 @@
 
   /* Try all the different paddings. */
   for (i = 0; i < 3; i++) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     b64 = grpc_base64_encode(orig, sizeof(orig) - i, url_safe, multiline);
-    orig_decoded = grpc_base64_decode(&exec_ctx, b64, url_safe);
+    orig_decoded = grpc_base64_decode(b64, url_safe);
     GPR_ASSERT(GRPC_SLICE_LENGTH(orig_decoded) == (sizeof(orig) - i));
     GPR_ASSERT(buffers_are_equal(orig, GRPC_SLICE_START_PTR(orig_decoded),
                                  sizeof(orig) - i));
-    grpc_slice_unref_internal(&exec_ctx, orig_decoded);
+    grpc_slice_unref_internal(orig_decoded);
     gpr_free(b64);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 }
 
@@ -116,19 +116,18 @@
   int url_safe = 1;
   for (i = 0; i < sizeof(orig); i++) orig[i] = (uint8_t)i;
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   b64 = grpc_base64_encode(orig, sizeof(orig), url_safe, 0);
-  orig_decoded = grpc_base64_decode(&exec_ctx, b64, !url_safe);
+  orig_decoded = grpc_base64_decode(b64, !url_safe);
   GPR_ASSERT(GRPC_SLICE_IS_EMPTY(orig_decoded));
   gpr_free(b64);
-  grpc_slice_unref_internal(&exec_ctx, orig_decoded);
+  grpc_slice_unref_internal(orig_decoded);
 
   b64 = grpc_base64_encode(orig, sizeof(orig), !url_safe, 0);
-  orig_decoded = grpc_base64_decode(&exec_ctx, b64, url_safe);
+  orig_decoded = grpc_base64_decode(b64, url_safe);
   GPR_ASSERT(GRPC_SLICE_IS_EMPTY(orig_decoded));
   gpr_free(b64);
-  grpc_slice_unref_internal(&exec_ctx, orig_decoded);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_slice_unref_internal(orig_decoded);
 }
 
 static void test_rfc4648_test_vectors(void) {
@@ -166,44 +165,44 @@
 static void test_unpadded_decode(void) {
   grpc_slice decoded;
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  decoded = grpc_base64_decode(&exec_ctx, "Zm9vYmFy", 0);
+  grpc_core::ExecCtx exec_ctx;
+  decoded = grpc_base64_decode("Zm9vYmFy", 0);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(decoded));
   GPR_ASSERT(grpc_slice_str_cmp(decoded, "foobar") == 0);
   grpc_slice_unref(decoded);
 
-  decoded = grpc_base64_decode(&exec_ctx, "Zm9vYmE", 0);
+  decoded = grpc_base64_decode("Zm9vYmE", 0);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(decoded));
   GPR_ASSERT(grpc_slice_str_cmp(decoded, "fooba") == 0);
   grpc_slice_unref(decoded);
 
-  decoded = grpc_base64_decode(&exec_ctx, "Zm9vYg", 0);
+  decoded = grpc_base64_decode("Zm9vYg", 0);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(decoded));
   GPR_ASSERT(grpc_slice_str_cmp(decoded, "foob") == 0);
   grpc_slice_unref(decoded);
 
-  decoded = grpc_base64_decode(&exec_ctx, "Zm9v", 0);
+  decoded = grpc_base64_decode("Zm9v", 0);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(decoded));
   GPR_ASSERT(grpc_slice_str_cmp(decoded, "foo") == 0);
   grpc_slice_unref(decoded);
 
-  decoded = grpc_base64_decode(&exec_ctx, "Zm8", 0);
+  decoded = grpc_base64_decode("Zm8", 0);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(decoded));
   GPR_ASSERT(grpc_slice_str_cmp(decoded, "fo") == 0);
   grpc_slice_unref(decoded);
 
-  decoded = grpc_base64_decode(&exec_ctx, "Zg", 0);
+  decoded = grpc_base64_decode("Zg", 0);
   GPR_ASSERT(!GRPC_SLICE_IS_EMPTY(decoded));
   GPR_ASSERT(grpc_slice_str_cmp(decoded, "f") == 0);
   grpc_slice_unref(decoded);
 
-  decoded = grpc_base64_decode(&exec_ctx, "", 0);
+  decoded = grpc_base64_decode("", 0);
   GPR_ASSERT(GRPC_SLICE_IS_EMPTY(decoded));
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_simple_encode_decode_b64_no_multiline();
   test_simple_encode_decode_b64_multiline();
   test_simple_encode_decode_b64_urlsafe_no_multiline();
@@ -215,5 +214,6 @@
   test_url_safe_unsafe_mismatch_failure();
   test_rfc4648_test_vectors();
   test_unpadded_decode();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/slice/percent_decode_fuzzer.cc b/test/core/slice/percent_decode_fuzzer.cc
index 3603177..81eb031 100644
--- a/test/core/slice/percent_decode_fuzzer.cc
+++ b/test/core/slice/percent_decode_fuzzer.cc
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 
@@ -31,6 +32,7 @@
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
   struct grpc_memory_counters counters;
+  grpc_init();
   grpc_memory_counters_init();
   grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size);
   grpc_slice output;
@@ -46,6 +48,7 @@
   grpc_slice_unref(input);
   counters = grpc_memory_counters_snapshot();
   grpc_memory_counters_destroy();
+  grpc_shutdown();
   GPR_ASSERT(counters.total_size_relative == 0);
   return 0;
 }
diff --git a/test/core/slice/percent_encode_fuzzer.cc b/test/core/slice/percent_encode_fuzzer.cc
index c8e3849..201ae27 100644
--- a/test/core/slice/percent_encode_fuzzer.cc
+++ b/test/core/slice/percent_encode_fuzzer.cc
@@ -20,6 +20,7 @@
 #include <stdint.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 
@@ -31,6 +32,7 @@
 
 static void test(const uint8_t* data, size_t size, const uint8_t* dict) {
   struct grpc_memory_counters counters;
+  grpc_init();
   grpc_memory_counters_init();
   grpc_slice input = grpc_slice_from_copied_buffer((const char*)data, size);
   grpc_slice output = grpc_percent_encode_slice(input, dict);
@@ -48,6 +50,7 @@
   grpc_slice_unref(permissive_decoded_output);
   counters = grpc_memory_counters_snapshot();
   grpc_memory_counters_destroy();
+  grpc_shutdown();
   GPR_ASSERT(counters.total_size_relative == 0);
 }
 
diff --git a/test/core/slice/percent_encoding_test.cc b/test/core/slice/percent_encoding_test.cc
index 253240f..11f3995 100644
--- a/test/core/slice/percent_encoding_test.cc
+++ b/test/core/slice/percent_encoding_test.cc
@@ -18,6 +18,7 @@
 
 #include "src/core/lib/slice/percent_encoding.h"
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 
@@ -118,6 +119,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   TEST_VECTOR(
       "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.~",
       "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.~",
@@ -140,5 +142,6 @@
                             grpc_url_percent_encoding_unreserved_bytes);
   TEST_NONCONFORMANT_VECTOR("\0", "\0",
                             grpc_url_percent_encoding_unreserved_bytes);
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/slice/slice_buffer_test.cc b/test/core/slice/slice_buffer_test.cc
index 338e807..e599867 100644
--- a/test/core/slice/slice_buffer_test.cc
+++ b/test/core/slice/slice_buffer_test.cc
@@ -16,6 +16,7 @@
  *
  */
 
+#include <grpc/grpc.h>
 #include <grpc/slice_buffer.h>
 #include <grpc/support/log.h>
 #include "test/core/util/test_config.h"
@@ -106,9 +107,11 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   test_slice_buffer_add();
   test_slice_buffer_move_first();
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/slice/slice_hash_table_test.cc b/test/core/slice/slice_hash_table_test.cc
index 0ee4e86..9fad9a6 100644
--- a/test/core/slice/slice_hash_table_test.cc
+++ b/test/core/slice/slice_hash_table_test.cc
@@ -59,9 +59,7 @@
   grpc_slice_unref(key);
 }
 
-static void destroy_string(grpc_exec_ctx* exec_ctx, void* value) {
-  gpr_free(value);
-}
+static void destroy_string(void* value) { gpr_free(value); }
 
 static grpc_slice_hash_table* create_table_from_entries(
     const test_entry* test_entries, size_t num_test_entries,
@@ -121,9 +119,8 @@
   check_values(test_entries, num_entries, table);
   check_non_existent_value("XX", table);
   // Clean up.
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_hash_table_unref(&exec_ctx, table);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_hash_table_unref(table);
 }
 
 static int value_cmp_fn(void* a, void* b) {
@@ -149,10 +146,9 @@
       create_table_from_entries(test_entries_b, num_entries_b, value_cmp_fn);
 
   GPR_ASSERT(grpc_slice_hash_table_cmp(table_a, table_b) == 0);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_hash_table_unref(&exec_ctx, table_a);
-  grpc_slice_hash_table_unref(&exec_ctx, table_b);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_hash_table_unref(table_a);
+  grpc_slice_hash_table_unref(table_b);
 }
 
 static void test_slice_hash_table_not_eq() {
@@ -221,23 +217,24 @@
       create_table_from_entries(test_entries_h, num_entries_h, pointer_cmp_fn);
   GPR_ASSERT(grpc_slice_hash_table_cmp(table_g, table_h) != 0);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_slice_hash_table_unref(&exec_ctx, table_a);
-  grpc_slice_hash_table_unref(&exec_ctx, table_b_larger);
-  grpc_slice_hash_table_unref(&exec_ctx, table_b_smaller);
-  grpc_slice_hash_table_unref(&exec_ctx, table_c);
-  grpc_slice_hash_table_unref(&exec_ctx, table_d);
-  grpc_slice_hash_table_unref(&exec_ctx, table_e);
-  grpc_slice_hash_table_unref(&exec_ctx, table_f);
-  grpc_slice_hash_table_unref(&exec_ctx, table_g);
-  grpc_slice_hash_table_unref(&exec_ctx, table_h);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_slice_hash_table_unref(table_a);
+  grpc_slice_hash_table_unref(table_b_larger);
+  grpc_slice_hash_table_unref(table_b_smaller);
+  grpc_slice_hash_table_unref(table_c);
+  grpc_slice_hash_table_unref(table_d);
+  grpc_slice_hash_table_unref(table_e);
+  grpc_slice_hash_table_unref(table_f);
+  grpc_slice_hash_table_unref(table_g);
+  grpc_slice_hash_table_unref(table_h);
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_core::ExecCtx::GlobalInit();
   test_slice_hash_table();
   test_slice_hash_table_eq();
   test_slice_hash_table_not_eq();
+  grpc_core::ExecCtx::GlobalShutdown();
   return 0;
 }
diff --git a/test/core/slice/slice_string_helpers_test.cc b/test/core/slice/slice_string_helpers_test.cc
index 260f8c8..f1d4704 100644
--- a/test/core/slice/slice_string_helpers_test.cc
+++ b/test/core/slice/slice_string_helpers_test.cc
@@ -23,6 +23,7 @@
 #include <stdlib.h>
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
@@ -130,7 +131,9 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_dump_slice();
   test_strsplit();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/slice/slice_test.cc b/test/core/slice/slice_test.cc
index 02f6b1e..e40154d 100644
--- a/test/core/slice/slice_test.cc
+++ b/test/core/slice/slice_test.cc
@@ -292,6 +292,7 @@
 int main(int argc, char** argv) {
   unsigned length;
   grpc_test_init(argc, argv);
+  grpc_init();
   test_slice_malloc_returns_something_sensible();
   test_slice_new_returns_something_sensible();
   test_slice_new_with_user_data();
@@ -305,5 +306,6 @@
   test_slice_interning();
   test_static_slice_interning();
   test_static_slice_copy_interning();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/support/BUILD b/test/core/support/BUILD
index 996166a..4372b49 100644
--- a/test/core/support/BUILD
+++ b/test/core/support/BUILD
@@ -69,16 +69,6 @@
 )
 
 grpc_cc_test(
-    name = "histogram_test",
-    srcs = ["histogram_test.cc"],
-    language = "C++",
-    deps = [
-        "//:gpr",
-        "//test/core/util:gpr_test_util",
-    ],
-)
-
-grpc_cc_test(
     name = "host_port_test",
     srcs = ["host_port_test.cc"],
     language = "C++",
@@ -223,3 +213,30 @@
         "//test/core/util:gpr_test_util",
     ],
 )
+
+grpc_cc_test(
+    name = "ref_counted_test",
+    srcs = ["ref_counted_test.cc"],
+    language = "C++",
+    deps = [
+        "//:ref_counted",
+        "//test/core/util:gpr_test_util",
+    ],
+    external_deps = [
+        "gtest",
+    ],
+)
+
+grpc_cc_test(
+    name = "ref_counted_ptr_test",
+    srcs = ["ref_counted_ptr_test.cc"],
+    language = "C++",
+    deps = [
+        "//:ref_counted",
+        "//:ref_counted_ptr",
+        "//test/core/util:gpr_test_util",
+    ],
+    external_deps = [
+        "gtest",
+    ],
+)
diff --git a/test/core/support/arena_test.cc b/test/core/support/arena_test.cc
index 244d860..ada0f43 100644
--- a/test/core/support/arena_test.cc
+++ b/test/core/support/arena_test.cc
@@ -100,7 +100,8 @@
   for (int i = 0; i < CONCURRENT_TEST_THREADS; i++) {
     gpr_thd_options opt = gpr_thd_options_default();
     gpr_thd_options_set_joinable(&opt);
-    gpr_thd_new(&thds[i], concurrent_test_body, &args, &opt);
+    gpr_thd_new(&thds[i], "grpc_concurrent_test", concurrent_test_body, &args,
+                &opt);
   }
 
   gpr_event_set(&args.ev_start, (void*)1);
diff --git a/test/core/support/cpu_test.cc b/test/core/support/cpu_test.cc
index 1783ec3..334c431 100644
--- a/test/core/support/cpu_test.cc
+++ b/test/core/support/cpu_test.cc
@@ -110,11 +110,12 @@
   gpr_cv_init(&ct.done_cv);
   ct.is_done = 0;
   for (i = 0; i < ct.ncores * 3; i++) {
-    GPR_ASSERT(gpr_thd_new(&thd, &worker_thread, &ct, nullptr));
+    GPR_ASSERT(
+        gpr_thd_new(&thd, "grpc_cpu_test", &worker_thread, &ct, nullptr));
   }
   gpr_mu_lock(&ct.mu);
   while (!ct.is_done) {
-    gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&ct.done_cv, &ct.mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&ct.mu);
   fprintf(stderr, "Saw cores [");
diff --git a/test/core/support/histogram_test.cc b/test/core/support/histogram_test.cc
deleted file mode 100644
index 86b7d59..0000000
--- a/test/core/support/histogram_test.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-/*
- *
- * Copyright 2015 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-#include <grpc/support/histogram.h>
-#include <grpc/support/log.h>
-
-#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x);
-
-static void test_no_op(void) {
-  gpr_histogram_destroy(gpr_histogram_create(0.01, 60e9));
-}
-
-static void expect_percentile(gpr_histogram* h, double percentile,
-                              double min_expect, double max_expect) {
-  double got = gpr_histogram_percentile(h, percentile);
-  gpr_log(GPR_INFO, "@%f%%, expect %f <= %f <= %f", percentile, min_expect, got,
-          max_expect);
-  GPR_ASSERT(min_expect <= got);
-  GPR_ASSERT(got <= max_expect);
-}
-
-static void test_simple(void) {
-  gpr_histogram* h;
-
-  LOG_TEST("test_simple");
-
-  h = gpr_histogram_create(0.01, 60e9);
-  gpr_histogram_add(h, 10000);
-  gpr_histogram_add(h, 10000);
-  gpr_histogram_add(h, 11000);
-  gpr_histogram_add(h, 11000);
-
-  expect_percentile(h, 50, 10001, 10999);
-  GPR_ASSERT(gpr_histogram_mean(h) == 10500);
-
-  gpr_histogram_destroy(h);
-}
-
-static void test_percentile(void) {
-  gpr_histogram* h;
-  double last;
-  double i;
-  double cur;
-
-  LOG_TEST("test_percentile");
-
-  h = gpr_histogram_create(0.05, 1e9);
-  gpr_histogram_add(h, 2.5);
-  gpr_histogram_add(h, 2.5);
-  gpr_histogram_add(h, 8);
-  gpr_histogram_add(h, 4);
-
-  GPR_ASSERT(gpr_histogram_count(h) == 4);
-  GPR_ASSERT(gpr_histogram_minimum(h) == 2.5);
-  GPR_ASSERT(gpr_histogram_maximum(h) == 8);
-  GPR_ASSERT(gpr_histogram_sum(h) == 17);
-  GPR_ASSERT(gpr_histogram_sum_of_squares(h) == 92.5);
-  GPR_ASSERT(gpr_histogram_mean(h) == 4.25);
-  GPR_ASSERT(gpr_histogram_variance(h) == 5.0625);
-  GPR_ASSERT(gpr_histogram_stddev(h) == 2.25);
-
-  expect_percentile(h, -10, 2.5, 2.5);
-  expect_percentile(h, 0, 2.5, 2.5);
-  expect_percentile(h, 12.5, 2.5, 2.5);
-  expect_percentile(h, 25, 2.5, 2.5);
-  expect_percentile(h, 37.5, 2.5, 2.8);
-  expect_percentile(h, 50, 3.0, 3.5);
-  expect_percentile(h, 62.5, 3.5, 4.5);
-  expect_percentile(h, 75, 5, 7.9);
-  expect_percentile(h, 100, 8, 8);
-  expect_percentile(h, 110, 8, 8);
-
-  /* test monotonicity */
-  last = 0.0;
-  for (i = 0; i < 100.0; i += 0.01) {
-    cur = gpr_histogram_percentile(h, i);
-    GPR_ASSERT(cur >= last);
-    last = cur;
-  }
-
-  gpr_histogram_destroy(h);
-}
-
-static void test_merge(void) {
-  gpr_histogram *h1, *h2;
-  double last;
-  double i;
-  double cur;
-
-  LOG_TEST("test_merge");
-
-  h1 = gpr_histogram_create(0.05, 1e9);
-  gpr_histogram_add(h1, 2.5);
-  gpr_histogram_add(h1, 2.5);
-  gpr_histogram_add(h1, 8);
-  gpr_histogram_add(h1, 4);
-
-  h2 = gpr_histogram_create(0.01, 1e9);
-  GPR_ASSERT(gpr_histogram_merge(h1, h2) == 0);
-  gpr_histogram_destroy(h2);
-
-  h2 = gpr_histogram_create(0.05, 1e10);
-  GPR_ASSERT(gpr_histogram_merge(h1, h2) == 0);
-  gpr_histogram_destroy(h2);
-
-  h2 = gpr_histogram_create(0.05, 1e9);
-  GPR_ASSERT(gpr_histogram_merge(h1, h2) == 1);
-  GPR_ASSERT(gpr_histogram_count(h1) == 4);
-  GPR_ASSERT(gpr_histogram_minimum(h1) == 2.5);
-  GPR_ASSERT(gpr_histogram_maximum(h1) == 8);
-  GPR_ASSERT(gpr_histogram_sum(h1) == 17);
-  GPR_ASSERT(gpr_histogram_sum_of_squares(h1) == 92.5);
-  GPR_ASSERT(gpr_histogram_mean(h1) == 4.25);
-  GPR_ASSERT(gpr_histogram_variance(h1) == 5.0625);
-  GPR_ASSERT(gpr_histogram_stddev(h1) == 2.25);
-  gpr_histogram_destroy(h2);
-
-  h2 = gpr_histogram_create(0.05, 1e9);
-  gpr_histogram_add(h2, 7.0);
-  gpr_histogram_add(h2, 17.0);
-  gpr_histogram_add(h2, 1.0);
-  GPR_ASSERT(gpr_histogram_merge(h1, h2) == 1);
-  GPR_ASSERT(gpr_histogram_count(h1) == 7);
-  GPR_ASSERT(gpr_histogram_minimum(h1) == 1.0);
-  GPR_ASSERT(gpr_histogram_maximum(h1) == 17.0);
-  GPR_ASSERT(gpr_histogram_sum(h1) == 42.0);
-  GPR_ASSERT(gpr_histogram_sum_of_squares(h1) == 431.5);
-  GPR_ASSERT(gpr_histogram_mean(h1) == 6.0);
-
-  /* test monotonicity */
-  last = 0.0;
-  for (i = 0; i < 100.0; i += 0.01) {
-    cur = gpr_histogram_percentile(h1, i);
-    GPR_ASSERT(cur >= last);
-    last = cur;
-  }
-
-  gpr_histogram_destroy(h1);
-  gpr_histogram_destroy(h2);
-}
-
-int main(void) {
-  test_no_op();
-  test_simple();
-  test_percentile();
-  test_merge();
-  return 0;
-}
diff --git a/test/core/support/mpscq_test.cc b/test/core/support/mpscq_test.cc
index 50ff817..1b83f7d 100644
--- a/test/core/support/mpscq_test.cc
+++ b/test/core/support/mpscq_test.cc
@@ -85,7 +85,8 @@
     ta[i].ctr = 0;
     ta[i].q = &q;
     ta[i].start = &start;
-    GPR_ASSERT(gpr_thd_new(&thds[i], test_thread, &ta[i], &options));
+    GPR_ASSERT(
+        gpr_thd_new(&thds[i], "grpc_mt_test", test_thread, &ta[i], &options));
   }
   size_t num_done = 0;
   size_t spins = 0;
@@ -156,7 +157,8 @@
     ta[i].ctr = 0;
     ta[i].q = &q;
     ta[i].start = &start;
-    GPR_ASSERT(gpr_thd_new(&thds[i], test_thread, &ta[i], &options));
+    GPR_ASSERT(gpr_thd_new(&thds[i], "grpc_multipop_test", test_thread, &ta[i],
+                           &options));
   }
   pull_args pa;
   pa.ta = ta;
@@ -169,7 +171,8 @@
   for (size_t i = 0; i < GPR_ARRAY_SIZE(pull_thds); i++) {
     gpr_thd_options options = gpr_thd_options_default();
     gpr_thd_options_set_joinable(&options);
-    GPR_ASSERT(gpr_thd_new(&pull_thds[i], pull_thread, &pa, &options));
+    GPR_ASSERT(gpr_thd_new(&pull_thds[i], "grpc_multipop_pull", pull_thread,
+                           &pa, &options));
   }
   gpr_event_set(&start, (void*)1);
   for (size_t i = 0; i < GPR_ARRAY_SIZE(pull_thds); i++) {
diff --git a/test/core/support/ref_counted_ptr_test.cc b/test/core/support/ref_counted_ptr_test.cc
new file mode 100644
index 0000000..1830edc
--- /dev/null
+++ b/test/core/support/ref_counted_ptr_test.cc
@@ -0,0 +1,172 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/support/ref_counted_ptr.h"
+
+#include <gtest/gtest.h>
+
+#include <grpc/support/log.h>
+
+#include "src/core/lib/support/memory.h"
+#include "src/core/lib/support/ref_counted.h"
+#include "test/core/util/test_config.h"
+
+namespace grpc_core {
+namespace testing {
+namespace {
+
+class Foo : public RefCounted {
+ public:
+  Foo() : value_(0) {}
+
+  explicit Foo(int value) : value_(value) {}
+
+  int value() const { return value_; }
+
+ private:
+  int value_;
+};
+
+TEST(RefCountedPtr, DefaultConstructor) { RefCountedPtr<Foo> foo; }
+
+TEST(RefCountedPtr, ExplicitConstructorEmpty) {
+  RefCountedPtr<Foo> foo(nullptr);
+}
+
+TEST(RefCountedPtr, ExplicitConstructor) { RefCountedPtr<Foo> foo(New<Foo>()); }
+
+TEST(RefCountedPtr, MoveConstructor) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  RefCountedPtr<Foo> foo2(std::move(foo));
+  EXPECT_EQ(nullptr, foo.get());
+  EXPECT_NE(nullptr, foo2.get());
+}
+
+TEST(RefCountedPtr, MoveAssignment) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  RefCountedPtr<Foo> foo2 = std::move(foo);
+  EXPECT_EQ(nullptr, foo.get());
+  EXPECT_NE(nullptr, foo2.get());
+}
+
+TEST(RefCountedPtr, CopyConstructor) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  RefCountedPtr<Foo> foo2(foo);
+  EXPECT_NE(nullptr, foo.get());
+  EXPECT_EQ(foo.get(), foo2.get());
+}
+
+TEST(RefCountedPtr, CopyAssignment) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  RefCountedPtr<Foo> foo2 = foo;
+  EXPECT_NE(nullptr, foo.get());
+  EXPECT_EQ(foo.get(), foo2.get());
+}
+
+TEST(RefCountedPtr, CopyAssignmentWhenEmpty) {
+  RefCountedPtr<Foo> foo;
+  RefCountedPtr<Foo> foo2;
+  foo2 = foo;
+  EXPECT_EQ(nullptr, foo.get());
+  EXPECT_EQ(nullptr, foo2.get());
+}
+
+TEST(RefCountedPtr, CopyAssignmentToSelf) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  foo = foo;
+}
+
+TEST(RefCountedPtr, EnclosedScope) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  {
+    RefCountedPtr<Foo> foo2(std::move(foo));
+    EXPECT_EQ(nullptr, foo.get());
+    EXPECT_NE(nullptr, foo2.get());
+  }
+  EXPECT_EQ(nullptr, foo.get());
+}
+
+TEST(RefCountedPtr, ResetFromNullToNonNull) {
+  RefCountedPtr<Foo> foo;
+  EXPECT_EQ(nullptr, foo.get());
+  foo.reset(New<Foo>());
+  EXPECT_NE(nullptr, foo.get());
+}
+
+TEST(RefCountedPtr, ResetFromNonNullToNonNull) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  EXPECT_NE(nullptr, foo.get());
+  Foo* original = foo.get();
+  foo.reset(New<Foo>());
+  EXPECT_NE(nullptr, foo.get());
+  EXPECT_NE(original, foo.get());
+}
+
+TEST(RefCountedPtr, ResetFromNonNullToNull) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  EXPECT_NE(nullptr, foo.get());
+  foo.reset();
+  EXPECT_EQ(nullptr, foo.get());
+}
+
+TEST(RefCountedPtr, ResetFromNullToNull) {
+  RefCountedPtr<Foo> foo;
+  EXPECT_EQ(nullptr, foo.get());
+  foo.reset(nullptr);
+  EXPECT_EQ(nullptr, foo.get());
+}
+
+TEST(RefCountedPtr, DerefernceOperators) {
+  RefCountedPtr<Foo> foo(New<Foo>());
+  foo->value();
+  Foo& foo_ref = *foo;
+  foo_ref.value();
+}
+
+TEST(MakeRefCounted, NoArgs) {
+  RefCountedPtr<Foo> foo = MakeRefCounted<Foo>();
+  EXPECT_EQ(0, foo->value());
+}
+
+TEST(MakeRefCounted, Args) {
+  RefCountedPtr<Foo> foo = MakeRefCounted<Foo>(3);
+  EXPECT_EQ(3, foo->value());
+}
+
+TraceFlag foo_tracer(true, "foo");
+
+class FooWithTracing : public RefCountedWithTracing {
+ public:
+  FooWithTracing() : RefCountedWithTracing(&foo_tracer) {}
+};
+
+TEST(RefCountedPtr, RefCountedWithTracing) {
+  RefCountedPtr<FooWithTracing> foo(New<FooWithTracing>());
+  foo->Ref(DEBUG_LOCATION, "foo");
+  foo->Unref(DEBUG_LOCATION, "foo");
+}
+
+}  // namespace
+}  // namespace testing
+}  // namespace grpc_core
+
+int main(int argc, char** argv) {
+  grpc_test_init(argc, argv);
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/test/core/support/ref_counted_test.cc b/test/core/support/ref_counted_test.cc
new file mode 100644
index 0000000..be9b6ff
--- /dev/null
+++ b/test/core/support/ref_counted_test.cc
@@ -0,0 +1,72 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "src/core/lib/support/ref_counted.h"
+
+#include <gtest/gtest.h>
+
+#include "src/core/lib/support/memory.h"
+#include "test/core/util/test_config.h"
+
+namespace grpc_core {
+namespace testing {
+namespace {
+
+class Foo : public RefCounted {
+ public:
+  Foo() {}
+};
+
+TEST(RefCounted, Basic) {
+  Foo* foo = New<Foo>();
+  foo->Unref();
+}
+
+TEST(RefCounted, ExtraRef) {
+  Foo* foo = New<Foo>();
+  foo->Ref();
+  foo->Unref();
+  foo->Unref();
+}
+
+TraceFlag foo_tracer(true, "foo");
+
+class FooWithTracing : public RefCountedWithTracing {
+ public:
+  FooWithTracing() : RefCountedWithTracing(&foo_tracer) {}
+};
+
+TEST(RefCountedWithTracing, Basic) {
+  FooWithTracing* foo = New<FooWithTracing>();
+  foo->Ref(DEBUG_LOCATION, "extra_ref");
+  foo->Unref(DEBUG_LOCATION, "extra_ref");
+  // Can use the no-argument methods, too.
+  foo->Ref();
+  foo->Unref();
+  foo->Unref(DEBUG_LOCATION, "original_ref");
+}
+
+}  // namespace
+}  // namespace testing
+}  // namespace grpc_core
+
+int main(int argc, char** argv) {
+  grpc_test_init(argc, argv);
+  ::testing::InitGoogleTest(&argc, argv);
+  return RUN_ALL_TESTS();
+}
diff --git a/test/core/support/spinlock_test.cc b/test/core/support/spinlock_test.cc
index 3639802..58d5fcd 100644
--- a/test/core/support/spinlock_test.cc
+++ b/test/core/support/spinlock_test.cc
@@ -67,7 +67,8 @@
   for (i = 0; i != m->thread_count; i++) {
     gpr_thd_options opt = gpr_thd_options_default();
     gpr_thd_options_set_joinable(&opt);
-    GPR_ASSERT(gpr_thd_new(&m->threads[i], body, m, &opt));
+    GPR_ASSERT(
+        gpr_thd_new(&m->threads[i], "grpc_create_threads", body, m, &opt));
   }
 }
 
diff --git a/test/core/support/sync_test.cc b/test/core/support/sync_test.cc
index 86e78ce..fb7ec44 100644
--- a/test/core/support/sync_test.cc
+++ b/test/core/support/sync_test.cc
@@ -73,7 +73,7 @@
      corresponding condition variable.  The predicate must be on state
      protected by the lock.  */
   while (q->length == N) {
-    gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&q->non_full, &q->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   if (q->length == 0) { /* Wake threads blocked in queue_remove(). */
     /* It's normal to use gpr_cv_broadcast() or gpr_signal() while
@@ -189,7 +189,7 @@
   gpr_thd_id id;
   int i;
   for (i = 0; i != m->threads; i++) {
-    GPR_ASSERT(gpr_thd_new(&id, body, m, nullptr));
+    GPR_ASSERT(gpr_thd_new(&id, "grpc_create_threads", body, m, nullptr));
   }
 }
 
@@ -197,7 +197,7 @@
 static void test_wait(struct test* m) {
   gpr_mu_lock(&m->mu);
   while (m->done != 0) {
-    gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&m->done_cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&m->mu);
 }
@@ -244,7 +244,7 @@
     m = test_new(10, iterations, incr_step);
     if (extra != nullptr) {
       gpr_thd_id id;
-      GPR_ASSERT(gpr_thd_new(&id, extra, m, nullptr));
+      GPR_ASSERT(gpr_thd_new(&id, name, extra, m, nullptr));
       m->done++; /* one more thread to wait for */
     }
     test_create_threads(m, body);
@@ -297,7 +297,7 @@
   for (i = 0; i != m->iterations; i++) {
     gpr_mu_lock(&m->mu);
     while ((m->counter % m->threads) != id) {
-      gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
+      gpr_cv_wait(&m->cv, &m->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC));
     }
     m->counter++;
     gpr_cv_broadcast(&m->cv);
@@ -314,7 +314,7 @@
   for (i = 0; i != m->iterations; i++) {
     gpr_timespec deadline;
     gpr_mu_lock(&m->mu);
-    deadline = gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+    deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                             gpr_time_from_micros(1000, GPR_TIMESPAN));
     while (!gpr_cv_wait(&m->cv, &m->mu, deadline)) {
     }
@@ -370,14 +370,14 @@
   int64_t i;
   int value;
   for (i = 0; i != n; i++) {
-    queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_REALTIME));
+    queue_remove(&m->q, &value, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_lock(&m->mu);
   m->counter = n;
   gpr_mu_unlock(&m->mu);
   GPR_ASSERT(
       !queue_remove(&m->q, &value,
-                    gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
+                    gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                                  gpr_time_from_micros(1000000, GPR_TIMESPAN))));
   mark_thread_done(m);
 }
diff --git a/test/core/support/thd_test.cc b/test/core/support/thd_test.cc
index 34befd8..b755bf1 100644
--- a/test/core/support/thd_test.cc
+++ b/test/core/support/thd_test.cc
@@ -74,7 +74,7 @@
   t.n = NUM_THREADS;
   t.is_done = 0;
   for (i = 0; i < NUM_THREADS; i++) {
-    GPR_ASSERT(gpr_thd_new(&thd, &thd_body, &t, nullptr));
+    GPR_ASSERT(gpr_thd_new(&thd, "grpc_thread_test", &thd_body, &t, nullptr));
   }
   gpr_mu_lock(&t.mu);
   while (!t.is_done) {
@@ -84,7 +84,8 @@
   GPR_ASSERT(t.n == 0);
   gpr_thd_options_set_joinable(&options);
   for (i = 0; i < NUM_THREADS; i++) {
-    GPR_ASSERT(gpr_thd_new(&thds[i], &thd_body_joinable, nullptr, &options));
+    GPR_ASSERT(gpr_thd_new(&thds[i], "grpc_joinable_thread_test",
+                           &thd_body_joinable, nullptr, &options));
   }
   for (i = 0; i < NUM_THREADS; i++) {
     gpr_thd_join(thds[i]);
diff --git a/test/core/support/tls_test.cc b/test/core/support/tls_test.cc
index 0f64d2e..743b10f 100644
--- a/test/core/support/tls_test.cc
+++ b/test/core/support/tls_test.cc
@@ -56,7 +56,7 @@
   gpr_thd_options_set_joinable(&opt);
 
   for (i = 0; i < NUM_THREADS; i++) {
-    gpr_thd_new(&threads[i], thd_body, nullptr, &opt);
+    gpr_thd_new(&threads[i], "grpc_tls_test", thd_body, nullptr, &opt);
   }
   for (i = 0; i < NUM_THREADS; i++) {
     gpr_thd_join(threads[i]);
diff --git a/test/core/surface/byte_buffer_reader_test.cc b/test/core/surface/byte_buffer_reader_test.cc
index e5d2d7c..94a8615 100644
--- a/test/core/surface/byte_buffer_reader_test.cc
+++ b/test/core/surface/byte_buffer_reader_test.cc
@@ -132,10 +132,8 @@
   memset(GRPC_SLICE_START_PTR(input_slice), 'a', input_size);
   grpc_slice_buffer_add(&sliceb_in, input_slice); /* takes ownership */
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    GPR_ASSERT(
-        grpc_msg_compress(&exec_ctx, algorithm, &sliceb_in, &sliceb_out));
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    GPR_ASSERT(grpc_msg_compress(algorithm, &sliceb_in, &sliceb_out));
   }
 
   buffer = grpc_raw_compressed_byte_buffer_create(sliceb_out.slices,
@@ -267,6 +265,7 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_read_one_slice();
   test_read_one_slice_malloc();
   test_read_none_compressed_slice();
@@ -276,5 +275,6 @@
   test_byte_buffer_from_reader();
   test_byte_buffer_copy();
   test_readall();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/surface/channel_create_test.cc b/test/core/surface/channel_create_test.cc
index f358b0f..37247f8 100644
--- a/test/core/surface/channel_create_test.cc
+++ b/test/core/surface/channel_create_test.cc
@@ -35,11 +35,10 @@
   chan = grpc_insecure_channel_create("blah://blah", nullptr, nullptr);
   GPR_ASSERT(chan != nullptr);
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_channel_element* elem =
       grpc_channel_stack_element(grpc_channel_get_channel_stack(chan), 0);
   GPR_ASSERT(0 == strcmp(elem->filter->name, "lame-client"));
-  grpc_exec_ctx_finish(&exec_ctx);
 
   grpc_channel_destroy(chan);
 }
diff --git a/test/core/surface/completion_queue_test.cc b/test/core/surface/completion_queue_test.cc
index c6e13d2..fefbb3c 100644
--- a/test/core/surface/completion_queue_test.cc
+++ b/test/core/surface/completion_queue_test.cc
@@ -121,8 +121,7 @@
   }
 }
 
-static void do_nothing_end_completion(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_cq_completion* c) {}
+static void do_nothing_end_completion(void* arg, grpc_cq_completion* c) {}
 
 static void test_cq_end_op(void) {
   grpc_event ev;
@@ -131,8 +130,6 @@
   grpc_cq_polling_type polling_types[] = {
       GRPC_CQ_DEFAULT_POLLING, GRPC_CQ_NON_LISTENING, GRPC_CQ_NON_POLLING};
   grpc_completion_queue_attributes attr;
-  grpc_exec_ctx init_exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_exec_ctx exec_ctx;
   void* tag = create_test_tag();
 
   LOG_TEST("test_cq_end_op");
@@ -140,14 +137,14 @@
   attr.version = 1;
   attr.cq_completion_type = GRPC_CQ_NEXT;
   for (size_t i = 0; i < GPR_ARRAY_SIZE(polling_types); i++) {
-    exec_ctx = init_exec_ctx;  // Reset exec_ctx
+    grpc_core::ExecCtx exec_ctx;
     attr.cq_polling_type = polling_types[i];
     cc = grpc_completion_queue_create(
         grpc_completion_queue_factory_lookup(&attr), &attr, nullptr);
 
     GPR_ASSERT(grpc_cq_begin_op(cc, tag));
-    grpc_cq_end_op(&exec_ctx, cc, tag, GRPC_ERROR_NONE,
-                   do_nothing_end_completion, nullptr, &completion);
+    grpc_cq_end_op(cc, tag, GRPC_ERROR_NONE, do_nothing_end_completion, nullptr,
+                   &completion);
 
     ev = grpc_completion_queue_next(cc, gpr_inf_past(GPR_CLOCK_REALTIME),
                                     nullptr);
@@ -156,7 +153,6 @@
     GPR_ASSERT(ev.success);
 
     shutdown_and_destroy(cc);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 }
 
@@ -167,8 +163,6 @@
   grpc_cq_polling_type polling_types[] = {
       GRPC_CQ_DEFAULT_POLLING, GRPC_CQ_NON_LISTENING, GRPC_CQ_NON_POLLING};
   grpc_completion_queue_attributes attr;
-  grpc_exec_ctx init_exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_exec_ctx exec_ctx;
   void* tag = create_test_tag();
   void* res_tag;
   int ok;
@@ -178,15 +172,15 @@
   attr.version = 1;
   attr.cq_completion_type = GRPC_CQ_NEXT;
   for (size_t i = 0; i < GPR_ARRAY_SIZE(polling_types); i++) {
-    exec_ctx = init_exec_ctx;  // Reset exec_ctx
+    grpc_core::ExecCtx exec_ctx;  // Reset exec_ctx
     attr.cq_polling_type = polling_types[i];
     cc = grpc_completion_queue_create(
         grpc_completion_queue_factory_lookup(&attr), &attr, nullptr);
 
     grpc_completion_queue_thread_local_cache_init(cc);
     GPR_ASSERT(grpc_cq_begin_op(cc, tag));
-    grpc_cq_end_op(&exec_ctx, cc, tag, GRPC_ERROR_NONE,
-                   do_nothing_end_completion, nullptr, &completion);
+    grpc_cq_end_op(cc, tag, GRPC_ERROR_NONE, do_nothing_end_completion, nullptr,
+                   &completion);
 
     ev = grpc_completion_queue_next(cc, gpr_inf_past(GPR_CLOCK_REALTIME),
                                     nullptr);
@@ -202,7 +196,6 @@
     GPR_ASSERT(ev.type == GRPC_QUEUE_TIMEOUT);
 
     shutdown_and_destroy(cc);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 }
 
@@ -211,8 +204,6 @@
   grpc_cq_polling_type polling_types[] = {
       GRPC_CQ_DEFAULT_POLLING, GRPC_CQ_NON_LISTENING, GRPC_CQ_NON_POLLING};
   grpc_completion_queue_attributes attr;
-  grpc_exec_ctx init_exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_exec_ctx exec_ctx;
   void* res_tag;
   int ok;
 
@@ -221,7 +212,7 @@
   attr.version = 1;
   attr.cq_completion_type = GRPC_CQ_NEXT;
   for (size_t i = 0; i < GPR_ARRAY_SIZE(polling_types); i++) {
-    exec_ctx = init_exec_ctx;  // Reset exec_ctx
+    grpc_core::ExecCtx exec_ctx;  // Reset exec_ctx
     attr.cq_polling_type = polling_types[i];
     cc = grpc_completion_queue_create(
         grpc_completion_queue_factory_lookup(&attr), &attr, nullptr);
@@ -232,7 +223,6 @@
     GPR_ASSERT(
         grpc_completion_queue_thread_local_cache_flush(cc, &res_tag, &ok) == 0);
     shutdown_and_destroy(cc);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 }
 
@@ -289,8 +279,6 @@
   grpc_cq_polling_type polling_types[] = {
       GRPC_CQ_DEFAULT_POLLING, GRPC_CQ_NON_LISTENING, GRPC_CQ_NON_POLLING};
   grpc_completion_queue_attributes attr;
-  grpc_exec_ctx init_exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_exec_ctx exec_ctx;
   unsigned i, j;
 
   LOG_TEST("test_pluck");
@@ -305,15 +293,15 @@
   attr.version = 1;
   attr.cq_completion_type = GRPC_CQ_PLUCK;
   for (size_t pidx = 0; pidx < GPR_ARRAY_SIZE(polling_types); pidx++) {
-    exec_ctx = init_exec_ctx;  // reset exec_ctx
+    grpc_core::ExecCtx exec_ctx;  // reset exec_ctx
     attr.cq_polling_type = polling_types[pidx];
     cc = grpc_completion_queue_create(
         grpc_completion_queue_factory_lookup(&attr), &attr, nullptr);
 
     for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
       GPR_ASSERT(grpc_cq_begin_op(cc, tags[i]));
-      grpc_cq_end_op(&exec_ctx, cc, tags[i], GRPC_ERROR_NONE,
-                     do_nothing_end_completion, nullptr, &completions[i]);
+      grpc_cq_end_op(cc, tags[i], GRPC_ERROR_NONE, do_nothing_end_completion,
+                     nullptr, &completions[i]);
     }
 
     for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
@@ -324,8 +312,8 @@
 
     for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
       GPR_ASSERT(grpc_cq_begin_op(cc, tags[i]));
-      grpc_cq_end_op(&exec_ctx, cc, tags[i], GRPC_ERROR_NONE,
-                     do_nothing_end_completion, nullptr, &completions[i]);
+      grpc_cq_end_op(cc, tags[i], GRPC_ERROR_NONE, do_nothing_end_completion,
+                     nullptr, &completions[i]);
     }
 
     for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
@@ -336,7 +324,6 @@
     }
 
     shutdown_and_destroy(cc);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 }
 
diff --git a/test/core/surface/completion_queue_threading_test.cc b/test/core/surface/completion_queue_threading_test.cc
index af54e00..4a9e818 100644
--- a/test/core/surface/completion_queue_threading_test.cc
+++ b/test/core/surface/completion_queue_threading_test.cc
@@ -59,8 +59,7 @@
   grpc_completion_queue_destroy(cc);
 }
 
-static void do_nothing_end_completion(grpc_exec_ctx* exec_ctx, void* arg,
-                                      grpc_cq_completion* c) {}
+static void do_nothing_end_completion(void* arg, grpc_cq_completion* c) {}
 
 struct thread_state {
   grpc_completion_queue* cc;
@@ -81,7 +80,7 @@
   gpr_thd_id thread_ids[GPR_ARRAY_SIZE(tags)];
   struct thread_state thread_states[GPR_ARRAY_SIZE(tags)];
   gpr_thd_options thread_options = gpr_thd_options_default();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   unsigned i, j;
 
   LOG_TEST("test_too_many_plucks");
@@ -96,7 +95,8 @@
     }
     thread_states[i].cc = cc;
     thread_states[i].tag = tags[i];
-    gpr_thd_new(thread_ids + i, pluck_one, thread_states + i, &thread_options);
+    gpr_thd_new(thread_ids + i, "grpc_pluck_test", pluck_one, thread_states + i,
+                &thread_options);
   }
 
   /* wait until all other threads are plucking */
@@ -108,8 +108,8 @@
 
   for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
     GPR_ASSERT(grpc_cq_begin_op(cc, tags[i]));
-    grpc_cq_end_op(&exec_ctx, cc, tags[i], GRPC_ERROR_NONE,
-                   do_nothing_end_completion, nullptr, &completions[i]);
+    grpc_cq_end_op(cc, tags[i], GRPC_ERROR_NONE, do_nothing_end_completion,
+                   nullptr, &completions[i]);
   }
 
   for (i = 0; i < GPR_ARRAY_SIZE(tags); i++) {
@@ -117,7 +117,6 @@
   }
 
   shutdown_and_destroy(cc);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 #define TEST_THREAD_EVENTS 10000
@@ -137,15 +136,13 @@
   return grpc_timeout_seconds_to_deadline(10);
 }
 
-static void free_completion(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_cq_completion* completion) {
+static void free_completion(void* arg, grpc_cq_completion* completion) {
   gpr_free(completion);
 }
 
 static void producer_thread(void* arg) {
   test_thread_options* opt = static_cast<test_thread_options*>(arg);
   int i;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
 
   gpr_log(GPR_INFO, "producer %d started", opt->id);
   gpr_event_set(&opt->on_started, (void*)(intptr_t)1);
@@ -162,17 +159,16 @@
 
   gpr_log(GPR_INFO, "producer %d phase 2", opt->id);
   for (i = 0; i < TEST_THREAD_EVENTS; i++) {
-    grpc_cq_end_op(&exec_ctx, opt->cc, (void*)(intptr_t)1, GRPC_ERROR_NONE,
+    grpc_core::ExecCtx exec_ctx;
+    grpc_cq_end_op(opt->cc, (void*)(intptr_t)1, GRPC_ERROR_NONE,
                    free_completion, nullptr,
                    static_cast<grpc_cq_completion*>(
                        gpr_malloc(sizeof(grpc_cq_completion))));
     opt->events_triggered++;
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 
   gpr_log(GPR_INFO, "producer %d phase 2 done", opt->id);
   gpr_event_set(&opt->on_finished, (void*)(intptr_t)1);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void consumer_thread(void* arg) {
@@ -234,6 +230,7 @@
     options[i].cc = cc;
     options[i].id = optid++;
     GPR_ASSERT(gpr_thd_new(&id,
+                           i < producers ? "grpc_producer" : "grpc_consumer",
                            i < producers ? producer_thread : consumer_thread,
                            options + i, nullptr));
     gpr_event_wait(&options[i].on_started, ten_seconds_time());
diff --git a/test/core/surface/concurrent_connectivity_test.cc b/test/core/surface/concurrent_connectivity_test.cc
index 8fa15ab..235d136 100644
--- a/test/core/surface/concurrent_connectivity_test.cc
+++ b/test/core/surface/concurrent_connectivity_test.cc
@@ -49,10 +49,11 @@
 #define NUM_OUTER_LOOPS_SHORT_TIMEOUTS 10
 #define NUM_INNER_LOOPS_SHORT_TIMEOUTS 100
 #define DELAY_MILLIS_SHORT_TIMEOUTS 1
-// in a successful test run, POLL_MILLIS should never be reached beause all runs
-// should
-// end after the shorter delay_millis
+// in a successful test run, POLL_MILLIS should never be reached because all
+// runs should end after the shorter delay_millis
 #define POLL_MILLIS_SHORT_TIMEOUTS 30000
+// it should never take longer that this to shutdown the server
+#define SERVER_SHUTDOWN_TIMEOUT 30000
 
 static void* tag(int n) { return (void*)(uintptr_t)n; }
 static int detag(void* p) { return (int)(uintptr_t)p; }
@@ -95,35 +96,35 @@
 void server_thread(void* vargs) {
   struct server_thread_args* args = (struct server_thread_args*)vargs;
   grpc_event ev;
-  gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
+  gpr_timespec deadline =
+      grpc_timeout_milliseconds_to_deadline(SERVER_SHUTDOWN_TIMEOUT);
   ev = grpc_completion_queue_next(args->cq, deadline, nullptr);
   GPR_ASSERT(ev.type == GRPC_OP_COMPLETE);
   GPR_ASSERT(detag(ev.tag) == 0xd1e);
 }
 
-static void on_connect(grpc_exec_ctx* exec_ctx, void* vargs, grpc_endpoint* tcp,
+static void on_connect(void* vargs, grpc_endpoint* tcp,
                        grpc_pollset* accepting_pollset,
                        grpc_tcp_server_acceptor* acceptor) {
   gpr_free(acceptor);
   struct server_thread_args* args = (struct server_thread_args*)vargs;
-  grpc_endpoint_shutdown(exec_ctx, tcp,
+  grpc_endpoint_shutdown(tcp,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected"));
-  grpc_endpoint_destroy(exec_ctx, tcp);
+  grpc_endpoint_destroy(tcp);
   gpr_mu_lock(args->mu);
-  GRPC_LOG_IF_ERROR("pollset_kick",
-                    grpc_pollset_kick(exec_ctx, args->pollset, nullptr));
+  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, nullptr));
   gpr_mu_unlock(args->mu);
 }
 
 void bad_server_thread(void* vargs) {
   struct server_thread_args* args = (struct server_thread_args*)vargs;
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_resolved_address resolved_addr;
   struct sockaddr_storage* addr = (struct sockaddr_storage*)resolved_addr.addr;
   int port;
   grpc_tcp_server* s;
-  grpc_error* error = grpc_tcp_server_create(&exec_ctx, nullptr, nullptr, &s);
+  grpc_error* error = grpc_tcp_server_create(nullptr, nullptr, &s);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   memset(&resolved_addr, 0, sizeof(resolved_addr));
   addr->ss_family = AF_INET;
@@ -132,35 +133,32 @@
   GPR_ASSERT(port > 0);
   gpr_asprintf(&args->addr, "localhost:%d", port);
 
-  grpc_tcp_server_start(&exec_ctx, s, &args->pollset, 1, on_connect, args);
+  grpc_tcp_server_start(s, &args->pollset, 1, on_connect, args);
   gpr_event_set(&args->ready, (void*)1);
 
   gpr_mu_lock(args->mu);
   while (gpr_atm_acq_load(&args->stop) == 0) {
-    grpc_millis deadline = grpc_exec_ctx_now(&exec_ctx) + 100;
+    grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 100;
 
     grpc_pollset_worker* worker = nullptr;
     if (!GRPC_LOG_IF_ERROR(
             "pollset_work",
-            grpc_pollset_work(&exec_ctx, args->pollset, &worker, deadline))) {
+            grpc_pollset_work(args->pollset, &worker, deadline))) {
       gpr_atm_rel_store(&args->stop, 1);
     }
     gpr_mu_unlock(args->mu);
-    grpc_exec_ctx_finish(&exec_ctx);
+
     gpr_mu_lock(args->mu);
   }
   gpr_mu_unlock(args->mu);
 
-  grpc_tcp_server_unref(&exec_ctx, s);
-
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_tcp_server_unref(s);
 
   gpr_free(args->addr);
 }
 
-static void done_pollset_shutdown(grpc_exec_ctx* exec_ctx, void* pollset,
-                                  grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(pollset));
+static void done_pollset_shutdown(void* pollset, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(pollset));
   gpr_free(pollset);
 }
 
@@ -180,7 +178,8 @@
   /* First round, no server */
   gpr_log(GPR_DEBUG, "Wave 1");
   for (size_t i = 0; i < NUM_THREADS; ++i) {
-    gpr_thd_new(&threads[i], create_loop_destroy, localhost, &options);
+    gpr_thd_new(&threads[i], "grpc_wave_1", create_loop_destroy, localhost,
+                &options);
   }
   for (size_t i = 0; i < NUM_THREADS; ++i) {
     gpr_thd_join(threads[i]);
@@ -196,10 +195,11 @@
   args.cq = grpc_completion_queue_create_for_next(nullptr);
   grpc_server_register_completion_queue(args.server, args.cq, nullptr);
   grpc_server_start(args.server);
-  gpr_thd_new(&server, server_thread, &args, &options);
+  gpr_thd_new(&server, "grpc_wave_2_server", server_thread, &args, &options);
 
   for (size_t i = 0; i < NUM_THREADS; ++i) {
-    gpr_thd_new(&threads[i], create_loop_destroy, args.addr, &options);
+    gpr_thd_new(&threads[i], "grpc_wave_2", create_loop_destroy, args.addr,
+                &options);
   }
   for (size_t i = 0; i < NUM_THREADS; ++i) {
     gpr_thd_join(threads[i]);
@@ -216,11 +216,13 @@
   args.pollset = static_cast<grpc_pollset*>(gpr_zalloc(grpc_pollset_size()));
   grpc_pollset_init(args.pollset, &args.mu);
   gpr_event_init(&args.ready);
-  gpr_thd_new(&server, bad_server_thread, &args, &options);
+  gpr_thd_new(&server, "grpc_wave_3_server", bad_server_thread, &args,
+              &options);
   gpr_event_wait(&args.ready, gpr_inf_future(GPR_CLOCK_MONOTONIC));
 
   for (size_t i = 0; i < NUM_THREADS; ++i) {
-    gpr_thd_new(&threads[i], create_loop_destroy, args.addr, &options);
+    gpr_thd_new(&threads[i], "grpc_wave_3", create_loop_destroy, args.addr,
+                &options);
   }
   for (size_t i = 0; i < NUM_THREADS; ++i) {
     gpr_thd_join(threads[i]);
@@ -228,11 +230,12 @@
 
   gpr_atm_rel_store(&args.stop, 1);
   gpr_thd_join(server);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_pollset_shutdown(&exec_ctx, args.pollset,
-                        GRPC_CLOSURE_CREATE(done_pollset_shutdown, args.pollset,
-                                            grpc_schedule_on_exec_ctx));
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_pollset_shutdown(
+        args.pollset, GRPC_CLOSURE_CREATE(done_pollset_shutdown, args.pollset,
+                                          grpc_schedule_on_exec_ctx));
+  }
 
   grpc_shutdown();
   return 0;
@@ -278,7 +281,8 @@
   gpr_thd_options_set_joinable(&options);
 
   for (size_t i = 0; i < NUM_THREADS; ++i) {
-    gpr_thd_new(&threads[i], watches_with_short_timeouts, localhost, &options);
+    gpr_thd_new(&threads[i], "grpc_short_watches", watches_with_short_timeouts,
+                localhost, &options);
   }
   for (size_t i = 0; i < NUM_THREADS; ++i) {
     gpr_thd_join(threads[i]);
diff --git a/test/core/surface/lame_client_test.cc b/test/core/surface/lame_client_test.cc
index f3df7f3..4bf4056 100644
--- a/test/core/surface/lame_client_test.cc
+++ b/test/core/surface/lame_client_test.cc
@@ -32,20 +32,19 @@
 
 static void* tag(intptr_t x) { return (void*)x; }
 
-void verify_connectivity(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
+void verify_connectivity(void* arg, grpc_error* error) {
   grpc_connectivity_state* state = static_cast<grpc_connectivity_state*>(arg);
   GPR_ASSERT(GRPC_CHANNEL_SHUTDOWN == *state);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
 }
 
-void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+void do_nothing(void* arg, grpc_error* error) {}
 
 void test_transport_op(grpc_channel* channel) {
   grpc_transport_op* op;
   grpc_channel_element* elem;
   grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   GRPC_CLOSURE_INIT(&transport_op_cb, verify_connectivity, &state,
                     grpc_schedule_on_exec_ctx);
@@ -54,14 +53,12 @@
   op->on_connectivity_state_change = &transport_op_cb;
   op->connectivity_state = &state;
   elem = grpc_channel_stack_element(grpc_channel_get_channel_stack(channel), 0);
-  elem->filter->start_transport_op(&exec_ctx, elem, op);
-  grpc_exec_ctx_finish(&exec_ctx);
+  elem->filter->start_transport_op(elem, op);
 
   GRPC_CLOSURE_INIT(&transport_op_cb, do_nothing, nullptr,
                     grpc_schedule_on_exec_ctx);
   op = grpc_make_transport_op(&transport_op_cb);
-  elem->filter->start_transport_op(&exec_ctx, elem, op);
-  grpc_exec_ctx_finish(&exec_ctx);
+  elem->filter->start_transport_op(elem, op);
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/surface/num_external_connectivity_watchers_test.cc b/test/core/surface/num_external_connectivity_watchers_test.cc
index f78d333..9cdd299 100644
--- a/test/core/surface/num_external_connectivity_watchers_test.cc
+++ b/test/core/surface/num_external_connectivity_watchers_test.cc
@@ -178,9 +178,8 @@
   grpc_channel* channel =
       grpc_secure_channel_create(ssl_creds, addr, new_client_args, nullptr);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, new_client_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(new_client_args);
   }
   grpc_channel_credentials_release(ssl_creds);
   return channel;
diff --git a/test/core/surface/public_headers_must_be_c89.c b/test/core/surface/public_headers_must_be_c89.c
index 33dc70a..8d2384b 100644
--- a/test/core/surface/public_headers_must_be_c89.c
+++ b/test/core/surface/public_headers_must_be_c89.c
@@ -50,7 +50,6 @@
 #include <grpc/support/avl.h>
 #include <grpc/support/cmdline.h>
 #include <grpc/support/cpu.h>
-#include <grpc/support/histogram.h>
 #include <grpc/support/host_port.h>
 #include <grpc/support/log.h>
 #include <grpc/support/port_platform.h>
@@ -266,21 +265,6 @@
   printf("%lx", (unsigned long) gpr_cmdline_usage_string);
   printf("%lx", (unsigned long) gpr_cpu_num_cores);
   printf("%lx", (unsigned long) gpr_cpu_current_cpu);
-  printf("%lx", (unsigned long) gpr_histogram_create);
-  printf("%lx", (unsigned long) gpr_histogram_destroy);
-  printf("%lx", (unsigned long) gpr_histogram_add);
-  printf("%lx", (unsigned long) gpr_histogram_merge);
-  printf("%lx", (unsigned long) gpr_histogram_percentile);
-  printf("%lx", (unsigned long) gpr_histogram_mean);
-  printf("%lx", (unsigned long) gpr_histogram_stddev);
-  printf("%lx", (unsigned long) gpr_histogram_variance);
-  printf("%lx", (unsigned long) gpr_histogram_maximum);
-  printf("%lx", (unsigned long) gpr_histogram_minimum);
-  printf("%lx", (unsigned long) gpr_histogram_count);
-  printf("%lx", (unsigned long) gpr_histogram_sum);
-  printf("%lx", (unsigned long) gpr_histogram_sum_of_squares);
-  printf("%lx", (unsigned long) gpr_histogram_get_contents);
-  printf("%lx", (unsigned long) gpr_histogram_merge_contents);
   printf("%lx", (unsigned long) gpr_join_host_port);
   printf("%lx", (unsigned long) gpr_split_host_port);
   printf("%lx", (unsigned long) gpr_log_severity_string);
diff --git a/test/core/surface/secure_channel_create_test.cc b/test/core/surface/secure_channel_create_test.cc
index c31c614..fa22cd6 100644
--- a/test/core/surface/secure_channel_create_test.cc
+++ b/test/core/surface/secure_channel_create_test.cc
@@ -37,10 +37,9 @@
   grpc_channel_element* elem =
       grpc_channel_stack_element(grpc_channel_get_channel_stack(chan), 0);
   GPR_ASSERT(0 == strcmp(elem->filter->name, "lame-client"));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, chan, "test");
-  grpc_channel_credentials_unref(&exec_ctx, creds);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_CHANNEL_INTERNAL_UNREF(chan, "test");
+  grpc_channel_credentials_unref(creds);
 }
 
 void test_security_connector_already_in_arg(void) {
@@ -56,9 +55,8 @@
   grpc_channel_element* elem =
       grpc_channel_stack_element(grpc_channel_get_channel_stack(chan), 0);
   GPR_ASSERT(0 == strcmp(elem->filter->name, "lame-client"));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, chan, "test");
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_CHANNEL_INTERNAL_UNREF(chan, "test");
 }
 
 void test_null_creds(void) {
@@ -67,9 +65,8 @@
   grpc_channel_element* elem =
       grpc_channel_stack_element(grpc_channel_get_channel_stack(chan), 0);
   GPR_ASSERT(0 == strcmp(elem->filter->name, "lame-client"));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  GRPC_CHANNEL_INTERNAL_UNREF(&exec_ctx, chan, "test");
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  GRPC_CHANNEL_INTERNAL_UNREF(chan, "test");
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/surface/sequential_connectivity_test.cc b/test/core/surface/sequential_connectivity_test.cc
index 015db92..ac49bd9 100644
--- a/test/core/surface/sequential_connectivity_test.cc
+++ b/test/core/surface/sequential_connectivity_test.cc
@@ -70,7 +70,7 @@
   gpr_thd_id server_thread;
   gpr_thd_options thdopt = gpr_thd_options_default();
   gpr_thd_options_set_joinable(&thdopt);
-  gpr_thd_new(&server_thread, server_thread_func, &sta, &thdopt);
+  gpr_thd_new(&server_thread, "grpc_server", server_thread_func, &sta, &thdopt);
 
   grpc_completion_queue* cq = grpc_completion_queue_create_for_next(nullptr);
   grpc_channel* channels[NUM_CONNECTIONS];
@@ -156,9 +156,8 @@
   grpc_channel* channel =
       grpc_secure_channel_create(ssl_creds, addr, new_client_args, nullptr);
   {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_channel_args_destroy(&exec_ctx, new_client_args);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
+    grpc_channel_args_destroy(new_client_args);
   }
   grpc_channel_credentials_release(ssl_creds);
   return channel;
diff --git a/test/core/transport/bdp_estimator_test.cc b/test/core/transport/bdp_estimator_test.cc
index 2a6fa95..445823b 100644
--- a/test/core/transport/bdp_estimator_test.cc
+++ b/test/core/transport/bdp_estimator_test.cc
@@ -29,7 +29,7 @@
 #include "src/core/lib/support/string.h"
 #include "test/core/util/test_config.h"
 
-extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
+extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
 
 namespace grpc_core {
 namespace testing {
@@ -58,7 +58,7 @@
 void AddSamples(BdpEstimator* estimator, int64_t* samples, size_t n) {
   estimator->AddIncomingBytes(1234567);
   inc_time();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   estimator->SchedulePing();
   estimator->StartPing();
   for (size_t i = 0; i < n; i++) {
@@ -66,9 +66,8 @@
   }
   gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                gpr_time_from_millis(1, GPR_TIMESPAN)));
-  grpc_exec_ctx_invalidate_now(&exec_ctx);
-  estimator->CompletePing(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->InvalidateNow();
+  estimator->CompletePing();
 }
 
 void AddSample(BdpEstimator* estimator, int64_t sample) {
diff --git a/test/core/transport/byte_stream_test.cc b/test/core/transport/byte_stream_test.cc
index 0e34fd7..2aab6e9 100644
--- a/test/core/transport/byte_stream_test.cc
+++ b/test/core/transport/byte_stream_test.cc
@@ -18,6 +18,7 @@
 
 #include "src/core/lib/transport/byte_stream.h"
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include <grpc/support/useful.h>
@@ -30,14 +31,13 @@
 // grpc_slice_buffer_stream tests
 //
 
-static void not_called_closure(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error) {
+static void not_called_closure(void* arg, grpc_error* error) {
   GPR_ASSERT(false);
 }
 
 static void test_slice_buffer_stream_basic(void) {
   gpr_log(GPR_DEBUG, "test_slice_buffer_stream_basic");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   // Create and populate slice buffer.
   grpc_slice_buffer buffer;
   grpc_slice_buffer_init(&buffer);
@@ -57,23 +57,21 @@
                     grpc_schedule_on_exec_ctx);
   // Read each slice.  Note that next() always returns synchronously.
   for (size_t i = 0; i < GPR_ARRAY_SIZE(input); ++i) {
-    GPR_ASSERT(
-        grpc_byte_stream_next(&exec_ctx, &stream.base, ~(size_t)0, &closure));
+    GPR_ASSERT(grpc_byte_stream_next(&stream.base, ~(size_t)0, &closure));
     grpc_slice output;
-    grpc_error* error = grpc_byte_stream_pull(&exec_ctx, &stream.base, &output);
+    grpc_error* error = grpc_byte_stream_pull(&stream.base, &output);
     GPR_ASSERT(error == GRPC_ERROR_NONE);
     GPR_ASSERT(grpc_slice_eq(input[i], output));
-    grpc_slice_unref_internal(&exec_ctx, output);
+    grpc_slice_unref_internal(output);
   }
   // Clean up.
-  grpc_byte_stream_destroy(&exec_ctx, &stream.base);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_byte_stream_destroy(&stream.base);
+  grpc_slice_buffer_destroy_internal(&buffer);
 }
 
 static void test_slice_buffer_stream_shutdown(void) {
   gpr_log(GPR_DEBUG, "test_slice_buffer_stream_shutdown");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   // Create and populate slice buffer.
   grpc_slice_buffer buffer;
   grpc_slice_buffer_init(&buffer);
@@ -92,29 +90,25 @@
   GRPC_CLOSURE_INIT(&closure, not_called_closure, nullptr,
                     grpc_schedule_on_exec_ctx);
   // Read the first slice.
-  GPR_ASSERT(
-      grpc_byte_stream_next(&exec_ctx, &stream.base, ~(size_t)0, &closure));
+  GPR_ASSERT(grpc_byte_stream_next(&stream.base, ~(size_t)0, &closure));
   grpc_slice output;
-  grpc_error* error = grpc_byte_stream_pull(&exec_ctx, &stream.base, &output);
+  grpc_error* error = grpc_byte_stream_pull(&stream.base, &output);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(grpc_slice_eq(input[0], output));
-  grpc_slice_unref_internal(&exec_ctx, output);
+  grpc_slice_unref_internal(output);
   // Now shutdown.
   grpc_error* shutdown_error =
       GRPC_ERROR_CREATE_FROM_STATIC_STRING("shutdown error");
-  grpc_byte_stream_shutdown(&exec_ctx, &stream.base,
-                            GRPC_ERROR_REF(shutdown_error));
+  grpc_byte_stream_shutdown(&stream.base, GRPC_ERROR_REF(shutdown_error));
   // After shutdown, the next pull() should return the error.
-  GPR_ASSERT(
-      grpc_byte_stream_next(&exec_ctx, &stream.base, ~(size_t)0, &closure));
-  error = grpc_byte_stream_pull(&exec_ctx, &stream.base, &output);
+  GPR_ASSERT(grpc_byte_stream_next(&stream.base, ~(size_t)0, &closure));
+  error = grpc_byte_stream_pull(&stream.base, &output);
   GPR_ASSERT(error == shutdown_error);
   GRPC_ERROR_UNREF(error);
   GRPC_ERROR_UNREF(shutdown_error);
   // Clean up.
-  grpc_byte_stream_destroy(&exec_ctx, &stream.base);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_byte_stream_destroy(&stream.base);
+  grpc_slice_buffer_destroy_internal(&buffer);
 }
 
 //
@@ -123,7 +117,7 @@
 
 static void test_caching_byte_stream_basic(void) {
   gpr_log(GPR_DEBUG, "test_caching_byte_stream_basic");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   // Create and populate slice buffer byte stream.
   grpc_slice_buffer buffer;
   grpc_slice_buffer_init(&buffer);
@@ -147,24 +141,22 @@
   // Read each slice.  Note that next() always returns synchronously,
   // because the underlying byte stream always does.
   for (size_t i = 0; i < GPR_ARRAY_SIZE(input); ++i) {
-    GPR_ASSERT(
-        grpc_byte_stream_next(&exec_ctx, &stream.base, ~(size_t)0, &closure));
+    GPR_ASSERT(grpc_byte_stream_next(&stream.base, ~(size_t)0, &closure));
     grpc_slice output;
-    grpc_error* error = grpc_byte_stream_pull(&exec_ctx, &stream.base, &output);
+    grpc_error* error = grpc_byte_stream_pull(&stream.base, &output);
     GPR_ASSERT(error == GRPC_ERROR_NONE);
     GPR_ASSERT(grpc_slice_eq(input[i], output));
-    grpc_slice_unref_internal(&exec_ctx, output);
+    grpc_slice_unref_internal(output);
   }
   // Clean up.
-  grpc_byte_stream_destroy(&exec_ctx, &stream.base);
-  grpc_byte_stream_cache_destroy(&exec_ctx, &cache);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_byte_stream_destroy(&stream.base);
+  grpc_byte_stream_cache_destroy(&cache);
+  grpc_slice_buffer_destroy_internal(&buffer);
 }
 
 static void test_caching_byte_stream_reset(void) {
   gpr_log(GPR_DEBUG, "test_caching_byte_stream_reset");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   // Create and populate slice buffer byte stream.
   grpc_slice_buffer buffer;
   grpc_slice_buffer_init(&buffer);
@@ -186,34 +178,31 @@
   GRPC_CLOSURE_INIT(&closure, not_called_closure, nullptr,
                     grpc_schedule_on_exec_ctx);
   // Read one slice.
-  GPR_ASSERT(
-      grpc_byte_stream_next(&exec_ctx, &stream.base, ~(size_t)0, &closure));
+  GPR_ASSERT(grpc_byte_stream_next(&stream.base, ~(size_t)0, &closure));
   grpc_slice output;
-  grpc_error* error = grpc_byte_stream_pull(&exec_ctx, &stream.base, &output);
+  grpc_error* error = grpc_byte_stream_pull(&stream.base, &output);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(grpc_slice_eq(input[0], output));
-  grpc_slice_unref_internal(&exec_ctx, output);
+  grpc_slice_unref_internal(output);
   // Reset the caching stream.  The reads should start over from the
   // first slice.
   grpc_caching_byte_stream_reset(&stream);
   for (size_t i = 0; i < GPR_ARRAY_SIZE(input); ++i) {
-    GPR_ASSERT(
-        grpc_byte_stream_next(&exec_ctx, &stream.base, ~(size_t)0, &closure));
-    error = grpc_byte_stream_pull(&exec_ctx, &stream.base, &output);
+    GPR_ASSERT(grpc_byte_stream_next(&stream.base, ~(size_t)0, &closure));
+    error = grpc_byte_stream_pull(&stream.base, &output);
     GPR_ASSERT(error == GRPC_ERROR_NONE);
     GPR_ASSERT(grpc_slice_eq(input[i], output));
-    grpc_slice_unref_internal(&exec_ctx, output);
+    grpc_slice_unref_internal(output);
   }
   // Clean up.
-  grpc_byte_stream_destroy(&exec_ctx, &stream.base);
-  grpc_byte_stream_cache_destroy(&exec_ctx, &cache);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_byte_stream_destroy(&stream.base);
+  grpc_byte_stream_cache_destroy(&cache);
+  grpc_slice_buffer_destroy_internal(&buffer);
 }
 
 static void test_caching_byte_stream_shared_cache(void) {
   gpr_log(GPR_DEBUG, "test_caching_byte_stream_shared_cache");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   // Create and populate slice buffer byte stream.
   grpc_slice_buffer buffer;
   grpc_slice_buffer_init(&buffer);
@@ -237,43 +226,41 @@
   GRPC_CLOSURE_INIT(&closure, not_called_closure, nullptr,
                     grpc_schedule_on_exec_ctx);
   // Read one slice from stream1.
-  GPR_ASSERT(
-      grpc_byte_stream_next(&exec_ctx, &stream1.base, ~(size_t)0, &closure));
+  GPR_ASSERT(grpc_byte_stream_next(&stream1.base, ~(size_t)0, &closure));
   grpc_slice output;
-  grpc_error* error = grpc_byte_stream_pull(&exec_ctx, &stream1.base, &output);
+  grpc_error* error = grpc_byte_stream_pull(&stream1.base, &output);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(grpc_slice_eq(input[0], output));
-  grpc_slice_unref_internal(&exec_ctx, output);
+  grpc_slice_unref_internal(output);
   // Read all slices from stream2.
   for (size_t i = 0; i < GPR_ARRAY_SIZE(input); ++i) {
-    GPR_ASSERT(
-        grpc_byte_stream_next(&exec_ctx, &stream2.base, ~(size_t)0, &closure));
-    error = grpc_byte_stream_pull(&exec_ctx, &stream2.base, &output);
+    GPR_ASSERT(grpc_byte_stream_next(&stream2.base, ~(size_t)0, &closure));
+    error = grpc_byte_stream_pull(&stream2.base, &output);
     GPR_ASSERT(error == GRPC_ERROR_NONE);
     GPR_ASSERT(grpc_slice_eq(input[i], output));
-    grpc_slice_unref_internal(&exec_ctx, output);
+    grpc_slice_unref_internal(output);
   }
   // Now read the second slice from stream1.
-  GPR_ASSERT(
-      grpc_byte_stream_next(&exec_ctx, &stream1.base, ~(size_t)0, &closure));
-  error = grpc_byte_stream_pull(&exec_ctx, &stream1.base, &output);
+  GPR_ASSERT(grpc_byte_stream_next(&stream1.base, ~(size_t)0, &closure));
+  error = grpc_byte_stream_pull(&stream1.base, &output);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(grpc_slice_eq(input[1], output));
-  grpc_slice_unref_internal(&exec_ctx, output);
+  grpc_slice_unref_internal(output);
   // Clean up.
-  grpc_byte_stream_destroy(&exec_ctx, &stream1.base);
-  grpc_byte_stream_destroy(&exec_ctx, &stream2.base);
-  grpc_byte_stream_cache_destroy(&exec_ctx, &cache);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &buffer);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_byte_stream_destroy(&stream1.base);
+  grpc_byte_stream_destroy(&stream2.base);
+  grpc_byte_stream_cache_destroy(&cache);
+  grpc_slice_buffer_destroy_internal(&buffer);
 }
 
 int main(int argc, char** argv) {
+  grpc_init();
   grpc_test_init(argc, argv);
   test_slice_buffer_stream_basic();
   test_slice_buffer_stream_shutdown();
   test_caching_byte_stream_basic();
   test_caching_byte_stream_reset();
   test_caching_byte_stream_shared_cache();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/transport/chttp2/bin_decoder_test.cc b/test/core/transport/chttp2/bin_decoder_test.cc
index a29ec8a..6d70a42 100644
--- a/test/core/transport/chttp2/bin_decoder_test.cc
+++ b/test/core/transport/chttp2/bin_decoder_test.cc
@@ -20,6 +20,7 @@
 
 #include <string.h>
 
+#include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/log.h>
 #include "src/core/ext/transport/chttp2/transport/bin_encoder.h"
@@ -29,8 +30,8 @@
 
 static int all_ok = 1;
 
-static void expect_slice_eq(grpc_exec_ctx* exec_ctx, grpc_slice expected,
-                            grpc_slice slice, const char* debug, int line) {
+static void expect_slice_eq(grpc_slice expected, grpc_slice slice,
+                            const char* debug, int line) {
   if (!grpc_slice_eq(slice, expected)) {
     char* hs = grpc_dump_slice(slice, GPR_DUMP_HEX | GPR_DUMP_ASCII);
     char* he = grpc_dump_slice(expected, GPR_DUMP_HEX | GPR_DUMP_ASCII);
@@ -40,104 +41,97 @@
     gpr_free(he);
     all_ok = 0;
   }
-  grpc_slice_unref_internal(exec_ctx, expected);
-  grpc_slice_unref_internal(exec_ctx, slice);
+  grpc_slice_unref_internal(expected);
+  grpc_slice_unref_internal(slice);
 }
 
-static grpc_slice base64_encode(grpc_exec_ctx* exec_ctx, const char* s) {
+static grpc_slice base64_encode(const char* s) {
   grpc_slice ss = grpc_slice_from_copied_string(s);
   grpc_slice out = grpc_chttp2_base64_encode(ss);
-  grpc_slice_unref_internal(exec_ctx, ss);
+  grpc_slice_unref_internal(ss);
   return out;
 }
 
-static grpc_slice base64_decode(grpc_exec_ctx* exec_ctx, const char* s) {
+static grpc_slice base64_decode(const char* s) {
   grpc_slice ss = grpc_slice_from_copied_string(s);
-  grpc_slice out = grpc_chttp2_base64_decode(exec_ctx, ss);
-  grpc_slice_unref_internal(exec_ctx, ss);
+  grpc_slice out = grpc_chttp2_base64_decode(ss);
+  grpc_slice_unref_internal(ss);
   return out;
 }
 
-static grpc_slice base64_decode_with_length(grpc_exec_ctx* exec_ctx,
-                                            const char* s,
+static grpc_slice base64_decode_with_length(const char* s,
                                             size_t output_length) {
   grpc_slice ss = grpc_slice_from_copied_string(s);
-  grpc_slice out =
-      grpc_chttp2_base64_decode_with_length(exec_ctx, ss, output_length);
-  grpc_slice_unref_internal(exec_ctx, ss);
+  grpc_slice out = grpc_chttp2_base64_decode_with_length(ss, output_length);
+  grpc_slice_unref_internal(ss);
   return out;
 }
 
-#define EXPECT_SLICE_EQ(exec_ctx, expected, slice)                             \
-  expect_slice_eq(                                                             \
-      exec_ctx, grpc_slice_from_copied_buffer(expected, sizeof(expected) - 1), \
-      slice, #slice, __LINE__);
+#define EXPECT_SLICE_EQ(expected, slice)                                    \
+  expect_slice_eq(                                                          \
+      grpc_slice_from_copied_buffer(expected, sizeof(expected) - 1), slice, \
+      #slice, __LINE__);
 
-#define ENCODE_AND_DECODE(exec_ctx, s)                   \
-  EXPECT_SLICE_EQ(exec_ctx, s,                           \
-                  grpc_chttp2_base64_decode_with_length( \
-                      exec_ctx, base64_encode(exec_ctx, s), strlen(s)));
+#define ENCODE_AND_DECODE(s) \
+  EXPECT_SLICE_EQ(           \
+      s, grpc_chttp2_base64_decode_with_length(base64_encode(s), strlen(s)));
 
 int main(int argc, char** argv) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_init();
+  {
+    grpc_core::ExecCtx exec_ctx;
 
-  /* ENCODE_AND_DECODE tests grpc_chttp2_base64_decode_with_length(), which
-     takes encoded base64 strings without pad chars, but output length is
-     required. */
-  /* Base64 test vectors from RFC 4648 */
-  ENCODE_AND_DECODE(&exec_ctx, "");
-  ENCODE_AND_DECODE(&exec_ctx, "f");
-  ENCODE_AND_DECODE(&exec_ctx, "foo");
-  ENCODE_AND_DECODE(&exec_ctx, "fo");
-  ENCODE_AND_DECODE(&exec_ctx, "foob");
-  ENCODE_AND_DECODE(&exec_ctx, "fooba");
-  ENCODE_AND_DECODE(&exec_ctx, "foobar");
+    /* ENCODE_AND_DECODE tests grpc_chttp2_base64_decode_with_length(), which
+       takes encoded base64 strings without pad chars, but output length is
+       required. */
+    /* Base64 test vectors from RFC 4648 */
+    ENCODE_AND_DECODE("");
+    ENCODE_AND_DECODE("f");
+    ENCODE_AND_DECODE("foo");
+    ENCODE_AND_DECODE("fo");
+    ENCODE_AND_DECODE("foob");
+    ENCODE_AND_DECODE("fooba");
+    ENCODE_AND_DECODE("foobar");
 
-  ENCODE_AND_DECODE(&exec_ctx, "\xc0\xc1\xc2\xc3\xc4\xc5");
+    ENCODE_AND_DECODE("\xc0\xc1\xc2\xc3\xc4\xc5");
 
-  /* Base64 test vectors from RFC 4648, with pad chars */
-  /* BASE64("") = "" */
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode(&exec_ctx, ""));
-  /* BASE64("f") = "Zg==" */
-  EXPECT_SLICE_EQ(&exec_ctx, "f", base64_decode(&exec_ctx, "Zg=="));
-  /* BASE64("fo") = "Zm8=" */
-  EXPECT_SLICE_EQ(&exec_ctx, "fo", base64_decode(&exec_ctx, "Zm8="));
-  /* BASE64("foo") = "Zm9v" */
-  EXPECT_SLICE_EQ(&exec_ctx, "foo", base64_decode(&exec_ctx, "Zm9v"));
-  /* BASE64("foob") = "Zm9vYg==" */
-  EXPECT_SLICE_EQ(&exec_ctx, "foob", base64_decode(&exec_ctx, "Zm9vYg=="));
-  /* BASE64("fooba") = "Zm9vYmE=" */
-  EXPECT_SLICE_EQ(&exec_ctx, "fooba", base64_decode(&exec_ctx, "Zm9vYmE="));
-  /* BASE64("foobar") = "Zm9vYmFy" */
-  EXPECT_SLICE_EQ(&exec_ctx, "foobar", base64_decode(&exec_ctx, "Zm9vYmFy"));
+    /* Base64 test vectors from RFC 4648, with pad chars */
+    /* BASE64("") = "" */
+    EXPECT_SLICE_EQ("", base64_decode(""));
+    /* BASE64("f") = "Zg==" */
+    EXPECT_SLICE_EQ("f", base64_decode("Zg=="));
+    /* BASE64("fo") = "Zm8=" */
+    EXPECT_SLICE_EQ("fo", base64_decode("Zm8="));
+    /* BASE64("foo") = "Zm9v" */
+    EXPECT_SLICE_EQ("foo", base64_decode("Zm9v"));
+    /* BASE64("foob") = "Zm9vYg==" */
+    EXPECT_SLICE_EQ("foob", base64_decode("Zm9vYg=="));
+    /* BASE64("fooba") = "Zm9vYmE=" */
+    EXPECT_SLICE_EQ("fooba", base64_decode("Zm9vYmE="));
+    /* BASE64("foobar") = "Zm9vYmFy" */
+    EXPECT_SLICE_EQ("foobar", base64_decode("Zm9vYmFy"));
 
-  EXPECT_SLICE_EQ(&exec_ctx, "\xc0\xc1\xc2\xc3\xc4\xc5",
-                  base64_decode(&exec_ctx, "wMHCw8TF"));
+    EXPECT_SLICE_EQ("\xc0\xc1\xc2\xc3\xc4\xc5", base64_decode("wMHCw8TF"));
 
-  // Test illegal input length in grpc_chttp2_base64_decode
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode(&exec_ctx, "a"));
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode(&exec_ctx, "ab"));
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode(&exec_ctx, "abc"));
+    // Test illegal input length in grpc_chttp2_base64_decode
+    EXPECT_SLICE_EQ("", base64_decode("a"));
+    EXPECT_SLICE_EQ("", base64_decode("ab"));
+    EXPECT_SLICE_EQ("", base64_decode("abc"));
 
-  // Test illegal charactors in grpc_chttp2_base64_decode
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode(&exec_ctx, "Zm:v"));
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode(&exec_ctx, "Zm=v"));
+    // Test illegal charactors in grpc_chttp2_base64_decode
+    EXPECT_SLICE_EQ("", base64_decode("Zm:v"));
+    EXPECT_SLICE_EQ("", base64_decode("Zm=v"));
 
-  // Test output_length longer than max possible output length in
-  // grpc_chttp2_base64_decode_with_length
-  EXPECT_SLICE_EQ(&exec_ctx, "", base64_decode_with_length(&exec_ctx, "Zg", 2));
-  EXPECT_SLICE_EQ(&exec_ctx, "",
-                  base64_decode_with_length(&exec_ctx, "Zm8", 3));
-  EXPECT_SLICE_EQ(&exec_ctx, "",
-                  base64_decode_with_length(&exec_ctx, "Zm9v", 4));
+    // Test output_length longer than max possible output length in
+    // grpc_chttp2_base64_decode_with_length
+    EXPECT_SLICE_EQ("", base64_decode_with_length("Zg", 2));
+    EXPECT_SLICE_EQ("", base64_decode_with_length("Zm8", 3));
+    EXPECT_SLICE_EQ("", base64_decode_with_length("Zm9v", 4));
 
-  // Test illegal charactors in grpc_chttp2_base64_decode_with_length
-  EXPECT_SLICE_EQ(&exec_ctx, "",
-                  base64_decode_with_length(&exec_ctx, "Zm:v", 3));
-  EXPECT_SLICE_EQ(&exec_ctx, "",
-                  base64_decode_with_length(&exec_ctx, "Zm=v", 3));
-
-  grpc_exec_ctx_finish(&exec_ctx);
-
+    // Test illegal charactors in grpc_chttp2_base64_decode_with_length
+    EXPECT_SLICE_EQ("", base64_decode_with_length("Zm:v", 3));
+    EXPECT_SLICE_EQ("", base64_decode_with_length("Zm=v", 3));
+  }
+  grpc_shutdown();
   return all_ok ? 0 : 1;
 }
diff --git a/test/core/transport/chttp2/bin_encoder_test.cc b/test/core/transport/chttp2/bin_encoder_test.cc
index 78b8808..44f5de8 100644
--- a/test/core/transport/chttp2/bin_encoder_test.cc
+++ b/test/core/transport/chttp2/bin_encoder_test.cc
@@ -99,6 +99,8 @@
 }
 
 int main(int argc, char** argv) {
+  grpc_init();
+
   /* Base64 test vectors from RFC 4648, with padding removed */
   /* BASE64("") = "" */
   EXPECT_SLICE_EQ("", B64(""));
@@ -169,5 +171,6 @@
   expect_binary_header("foo-bar", 0);
   expect_binary_header("-bin", 0);
 
+  grpc_shutdown();
   return all_ok ? 0 : 1;
 }
diff --git a/test/core/transport/chttp2/hpack_encoder_test.cc b/test/core/transport/chttp2/hpack_encoder_test.cc
index 2d18b72..d2dbd4a 100644
--- a/test/core/transport/chttp2/hpack_encoder_test.cc
+++ b/test/core/transport/chttp2/hpack_encoder_test.cc
@@ -51,8 +51,8 @@
 
 /* verify that the output generated by encoding the stream matches the
    hexstring passed in */
-static void verify(grpc_exec_ctx* exec_ctx, const verify_params params,
-                   const char* expected, size_t nheaders, ...) {
+static void verify(const verify_params params, const char* expected,
+                   size_t nheaders, ...) {
   grpc_slice_buffer output;
   grpc_slice merged;
   grpc_slice expect = parse_hexstring(expected);
@@ -77,8 +77,7 @@
       value_slice = grpc_slice_intern(value_slice);
     }
     e[i].md = grpc_mdelem_from_slices(
-        exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(key)),
-        value_slice);
+        grpc_slice_intern(grpc_slice_from_static_string(key)), value_slice);
   }
   e[0].prev = nullptr;
   e[nheaders - 1].next = nullptr;
@@ -106,11 +105,10 @@
       16384,                           /* max_frame_size */
       &stats                           /* stats */
   };
-  grpc_chttp2_encode_header(exec_ctx, &g_compressor, nullptr, 0, &b, &hopt,
-                            &output);
+  grpc_chttp2_encode_header(&g_compressor, nullptr, 0, &b, &hopt, &output);
   merged = grpc_slice_merge(output.slices, output.count);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &output);
-  grpc_metadata_batch_destroy(exec_ctx, &b);
+  grpc_slice_buffer_destroy_internal(&output);
+  grpc_metadata_batch_destroy(&b);
 
   if (!grpc_slice_eq(merged, expect)) {
     char* expect_str = grpc_dump_slice(expect, GPR_DUMP_HEX | GPR_DUMP_ASCII);
@@ -123,11 +121,11 @@
     g_failure = 1;
   }
 
-  grpc_slice_unref_internal(exec_ctx, merged);
-  grpc_slice_unref_internal(exec_ctx, expect);
+  grpc_slice_unref_internal(merged);
+  grpc_slice_unref_internal(expect);
 }
 
-static void test_basic_headers(grpc_exec_ctx* exec_ctx) {
+static void test_basic_headers() {
   int i;
 
   verify_params params = {
@@ -135,24 +133,22 @@
       false,
       false,
   };
-  verify(exec_ctx, params, "000005 0104 deadbeef 40 0161 0161", 1, "a", "a");
-  verify(exec_ctx, params, "000001 0104 deadbeef be", 1, "a", "a");
-  verify(exec_ctx, params, "000001 0104 deadbeef be", 1, "a", "a");
-  verify(exec_ctx, params, "000006 0104 deadbeef be 40 0162 0163", 2, "a", "a",
-         "b", "c");
-  verify(exec_ctx, params, "000002 0104 deadbeef bf be", 2, "a", "a", "b", "c");
-  verify(exec_ctx, params, "000004 0104 deadbeef 7f 00 0164", 1, "a", "d");
+  verify(params, "000005 0104 deadbeef 40 0161 0161", 1, "a", "a");
+  verify(params, "000001 0104 deadbeef be", 1, "a", "a");
+  verify(params, "000001 0104 deadbeef be", 1, "a", "a");
+  verify(params, "000006 0104 deadbeef be 40 0162 0163", 2, "a", "a", "b", "c");
+  verify(params, "000002 0104 deadbeef bf be", 2, "a", "a", "b", "c");
+  verify(params, "000004 0104 deadbeef 7f 00 0164", 1, "a", "d");
 
   /* flush out what's there to make a few values look very popular */
   for (i = 0; i < 350; i++) {
-    verify(exec_ctx, params, "000003 0104 deadbeef c0 bf be", 3, "a", "a", "b",
-           "c", "a", "d");
+    verify(params, "000003 0104 deadbeef c0 bf be", 3, "a", "a", "b", "c", "a",
+           "d");
   }
 
-  verify(exec_ctx, params, "000006 0104 deadbeef c0 00 016b 0176", 2, "a", "a",
-         "k", "v");
+  verify(params, "000006 0104 deadbeef c0 00 016b 0176", 2, "a", "a", "k", "v");
   /* this could be 000004 0104 deadbeef 0f 30 0176 also */
-  verify(exec_ctx, params, "000004 0104 deadbeef 0f 2f 0176", 1, "a", "v");
+  verify(params, "000004 0104 deadbeef 0f 2f 0176", 1, "a", "v");
 }
 
 static void encode_int_to_str(int i, char* p) {
@@ -163,7 +159,7 @@
   p[2] = 0;
 }
 
-static void test_decode_table_overflow(grpc_exec_ctx* exec_ctx) {
+static void test_decode_table_overflow() {
   int i;
   char key[3], value[3];
   char* expect;
@@ -192,26 +188,24 @@
     }
 
     if (i > 0) {
-      verify(exec_ctx, params, expect, 2, "aa", "ba", key, value);
+      verify(params, expect, 2, "aa", "ba", key, value);
     } else {
-      verify(exec_ctx, params, expect, 1, key, value);
+      verify(params, expect, 1, key, value);
     }
     gpr_free(expect);
   }
 
   /* if the above passes, then we must have just knocked this pair out of the
      decoder stack, and so we'll be forced to re-encode it */
-  verify(exec_ctx, params, "000007 0104 deadbeef 40 026161 026261", 1, "aa",
-         "ba");
+  verify(params, "000007 0104 deadbeef 40 026161 026261", 1, "aa", "ba");
 }
 
-static void verify_table_size_change_match_elem_size(grpc_exec_ctx* exec_ctx,
-                                                     const char* key,
+static void verify_table_size_change_match_elem_size(const char* key,
                                                      const char* value,
                                                      bool use_true_binary) {
   grpc_slice_buffer output;
   grpc_mdelem elem = grpc_mdelem_from_slices(
-      exec_ctx, grpc_slice_intern(grpc_slice_from_static_string(key)),
+      grpc_slice_intern(grpc_slice_from_static_string(key)),
       grpc_slice_intern(grpc_slice_from_static_string(value)));
   size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem, use_true_binary);
   size_t initial_table_size = g_compressor.table_size;
@@ -235,41 +229,38 @@
       use_true_binary, /* use_true_binary_metadata */
       16384,           /* max_frame_size */
       &stats /* stats */};
-  grpc_chttp2_encode_header(exec_ctx, &g_compressor, nullptr, 0, &b, &hopt,
-                            &output);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &output);
-  grpc_metadata_batch_destroy(exec_ctx, &b);
+  grpc_chttp2_encode_header(&g_compressor, nullptr, 0, &b, &hopt, &output);
+  grpc_slice_buffer_destroy_internal(&output);
+  grpc_metadata_batch_destroy(&b);
 
   GPR_ASSERT(g_compressor.table_size == elem_size + initial_table_size);
   gpr_free(e);
 }
 
-static void test_encode_header_size(grpc_exec_ctx* exec_ctx) {
-  verify_table_size_change_match_elem_size(exec_ctx, "hello", "world", false);
-  verify_table_size_change_match_elem_size(exec_ctx, "hello-bin", "world",
-                                           false);
-  verify_table_size_change_match_elem_size(exec_ctx, "true-binary-bin",
+static void test_encode_header_size() {
+  verify_table_size_change_match_elem_size("hello", "world", false);
+  verify_table_size_change_match_elem_size("hello-bin", "world", false);
+  verify_table_size_change_match_elem_size("true-binary-bin",
                                            "I_am_true_binary_value", true);
 }
 
-static void test_interned_key_indexed(grpc_exec_ctx* exec_ctx) {
+static void test_interned_key_indexed() {
   int i;
   verify_params params = {false, false, true};
-  verify(exec_ctx, params, "000009 0104 deadbeef 40 0161 0162 0f2f 0163", 2,
-         "a", "b", "a", "c");
+  verify(params, "000009 0104 deadbeef 40 0161 0162 0f2f 0163", 2, "a", "b",
+         "a", "c");
   for (i = 0; i < 10; i++) {
-    verify(exec_ctx, params, "000008 0104 deadbeef 0f2f 0162 0f2f 0163", 2, "a",
-           "b", "a", "c");
+    verify(params, "000008 0104 deadbeef 0f2f 0162 0f2f 0163", 2, "a", "b", "a",
+           "c");
   }
 }
 
-static void run_test(void (*test)(grpc_exec_ctx* exec_ctx), const char* name) {
+static void run_test(void (*test)(), const char* name) {
   gpr_log(GPR_INFO, "RUN TEST: %s", name);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_chttp2_hpack_compressor_init(&g_compressor);
-  test(&exec_ctx);
-  grpc_chttp2_hpack_compressor_destroy(&exec_ctx, &g_compressor);
-  grpc_exec_ctx_finish(&exec_ctx);
+  test();
+  grpc_chttp2_hpack_compressor_destroy(&g_compressor);
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/transport/chttp2/hpack_parser_fuzzer_test.cc b/test/core/transport/chttp2/hpack_parser_fuzzer_test.cc
index 942f25e..9a195da 100644
--- a/test/core/transport/chttp2/hpack_parser_fuzzer_test.cc
+++ b/test/core/transport/chttp2/hpack_parser_fuzzer_test.cc
@@ -29,9 +29,7 @@
 bool squelch = true;
 bool leak_check = true;
 
-static void onhdr(grpc_exec_ctx* exec_ctx, void* ud, grpc_mdelem md) {
-  GRPC_MDELEM_UNREF(exec_ctx, md);
-}
+static void onhdr(void* ud, grpc_mdelem md) { GRPC_MDELEM_UNREF(md); }
 static void dont_log(gpr_log_func_args* args) {}
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
@@ -39,13 +37,14 @@
   if (squelch) gpr_set_log_function(dont_log);
   grpc_init();
   grpc_chttp2_hpack_parser parser;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &parser);
-  parser.on_header = onhdr;
-  GRPC_ERROR_UNREF(grpc_chttp2_hpack_parser_parse(
-      &exec_ctx, &parser, grpc_slice_from_static_buffer(data, size)));
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &parser);
-  grpc_exec_ctx_finish(&exec_ctx);
+  {
+    grpc_core::ExecCtx exec_ctx;
+    grpc_chttp2_hpack_parser_init(&parser);
+    parser.on_header = onhdr;
+    GRPC_ERROR_UNREF(grpc_chttp2_hpack_parser_parse(
+        &parser, grpc_slice_from_static_buffer(data, size)));
+    grpc_chttp2_hpack_parser_destroy(&parser);
+  }
   grpc_shutdown();
   return 0;
 }
diff --git a/test/core/transport/chttp2/hpack_parser_test.cc b/test/core/transport/chttp2/hpack_parser_test.cc
index 82fb20a..9d3456a 100644
--- a/test/core/transport/chttp2/hpack_parser_test.cc
+++ b/test/core/transport/chttp2/hpack_parser_test.cc
@@ -32,7 +32,7 @@
   va_list args;
 } test_checker;
 
-static void onhdr(grpc_exec_ctx* exec_ctx, void* ud, grpc_mdelem md) {
+static void onhdr(void* ud, grpc_mdelem md) {
   const char *ekey, *evalue;
   test_checker* chk = static_cast<test_checker*>(ud);
   ekey = va_arg(chk->args, char*);
@@ -41,7 +41,7 @@
   GPR_ASSERT(evalue);
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(md), ekey) == 0);
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(md), evalue) == 0);
-  GRPC_MDELEM_UNREF(exec_ctx, md);
+  GRPC_MDELEM_UNREF(md);
 }
 
 static void test_vector(grpc_chttp2_hpack_parser* parser,
@@ -62,10 +62,9 @@
   grpc_slice_unref(input);
 
   for (i = 0; i < nslices; i++) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    GPR_ASSERT(grpc_chttp2_hpack_parser_parse(&exec_ctx, parser, slices[i]) ==
+    grpc_core::ExecCtx exec_ctx;
+    GPR_ASSERT(grpc_chttp2_hpack_parser_parse(parser, slices[i]) ==
                GRPC_ERROR_NONE);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 
   for (i = 0; i < nslices; i++) {
@@ -80,9 +79,9 @@
 
 static void test_vectors(grpc_slice_split_mode mode) {
   grpc_chttp2_hpack_parser parser;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_init(&parser);
   /* D.2.1 */
   test_vector(&parser, mode,
               "400a 6375 7374 6f6d 2d6b 6579 0d63 7573"
@@ -98,9 +97,9 @@
               "password", "secret", NULL);
   /* D.2.4 */
   test_vector(&parser, mode, "82", ":method", "GET", NULL);
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_destroy(&parser);
 
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_init(&parser);
   /* D.3.1 */
   test_vector(&parser, mode,
               "8286 8441 0f77 7777 2e65 7861 6d70 6c65"
@@ -118,9 +117,9 @@
               ":method", "GET", ":scheme", "https", ":path", "/index.html",
               ":authority", "www.example.com", "custom-key", "custom-value",
               NULL);
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_destroy(&parser);
 
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_init(&parser);
   /* D.4.1 */
   test_vector(&parser, mode,
               "8286 8441 8cf1 e3c2 e5f2 3a6b a0ab 90f4"
@@ -138,11 +137,11 @@
               ":method", "GET", ":scheme", "https", ":path", "/index.html",
               ":authority", "www.example.com", "custom-key", "custom-value",
               NULL);
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_destroy(&parser);
 
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &parser);
-  grpc_chttp2_hptbl_set_max_bytes(&exec_ctx, &parser.table, 256);
-  grpc_chttp2_hptbl_set_current_table_size(&exec_ctx, &parser.table, 256);
+  grpc_chttp2_hpack_parser_init(&parser);
+  grpc_chttp2_hptbl_set_max_bytes(&parser.table, 256);
+  grpc_chttp2_hptbl_set_current_table_size(&parser.table, 256);
   /* D.5.1 */
   test_vector(&parser, mode,
               "4803 3330 3258 0770 7269 7661 7465 611d"
@@ -172,11 +171,11 @@
               "https://www.example.com", "content-encoding", "gzip",
               "set-cookie",
               "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", NULL);
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &parser);
+  grpc_chttp2_hpack_parser_destroy(&parser);
 
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &parser);
-  grpc_chttp2_hptbl_set_max_bytes(&exec_ctx, &parser.table, 256);
-  grpc_chttp2_hptbl_set_current_table_size(&exec_ctx, &parser.table, 256);
+  grpc_chttp2_hpack_parser_init(&parser);
+  grpc_chttp2_hptbl_set_max_bytes(&parser.table, 256);
+  grpc_chttp2_hptbl_set_current_table_size(&parser.table, 256);
   /* D.6.1 */
   test_vector(&parser, mode,
               "4882 6402 5885 aec3 771a 4b61 96d0 7abe"
@@ -203,9 +202,7 @@
               "https://www.example.com", "content-encoding", "gzip",
               "set-cookie",
               "foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1", NULL);
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &parser);
-
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_hpack_parser_destroy(&parser);
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/transport/chttp2/hpack_table_test.cc b/test/core/transport/chttp2/hpack_table_test.cc
index ff7c2de..3f3cb2e 100644
--- a/test/core/transport/chttp2/hpack_table_test.cc
+++ b/test/core/transport/chttp2/hpack_table_test.cc
@@ -44,10 +44,10 @@
 }
 
 static void test_static_lookup(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_chttp2_hptbl tbl;
 
-  grpc_chttp2_hptbl_init(&exec_ctx, &tbl);
+  grpc_chttp2_hptbl_init(&tbl);
 
   LOG_TEST("test_static_lookup");
   assert_index(&tbl, 1, ":authority", "");
@@ -112,8 +112,7 @@
   assert_index(&tbl, 60, "via", "");
   assert_index(&tbl, 61, "www-authenticate", "");
 
-  grpc_chttp2_hptbl_destroy(&exec_ctx, &tbl);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_hptbl_destroy(&tbl);
 }
 
 static void test_many_additions(void) {
@@ -124,18 +123,17 @@
 
   LOG_TEST("test_many_additions");
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_chttp2_hptbl_init(&exec_ctx, &tbl);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_chttp2_hptbl_init(&tbl);
 
   for (i = 0; i < 100000; i++) {
     grpc_mdelem elem;
     gpr_asprintf(&key, "K:%d", i);
     gpr_asprintf(&value, "VALUE:%d", i);
-    elem =
-        grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_copied_string(key),
-                                grpc_slice_from_copied_string(value));
-    GPR_ASSERT(grpc_chttp2_hptbl_add(&exec_ctx, &tbl, elem) == GRPC_ERROR_NONE);
-    GRPC_MDELEM_UNREF(&exec_ctx, elem);
+    elem = grpc_mdelem_from_slices(grpc_slice_from_copied_string(key),
+                                   grpc_slice_from_copied_string(value));
+    GPR_ASSERT(grpc_chttp2_hptbl_add(&tbl, elem) == GRPC_ERROR_NONE);
+    GRPC_MDELEM_UNREF(elem);
     assert_index(&tbl, 1 + GRPC_CHTTP2_LAST_STATIC_ENTRY, key, value);
     gpr_free(key);
     gpr_free(value);
@@ -148,25 +146,23 @@
     }
   }
 
-  grpc_chttp2_hptbl_destroy(&exec_ctx, &tbl);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_hptbl_destroy(&tbl);
 }
 
 static grpc_chttp2_hptbl_find_result find_simple(grpc_chttp2_hptbl* tbl,
                                                  const char* key,
                                                  const char* value) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_mdelem md =
-      grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_copied_string(key),
-                              grpc_slice_from_copied_string(value));
+  grpc_core::ExecCtx exec_ctx;
+  grpc_mdelem md = grpc_mdelem_from_slices(
+      grpc_slice_from_copied_string(key), grpc_slice_from_copied_string(value));
   grpc_chttp2_hptbl_find_result r = grpc_chttp2_hptbl_find(tbl, md);
-  GRPC_MDELEM_UNREF(&exec_ctx, md);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(md);
+
   return r;
 }
 
 static void test_find(void) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_chttp2_hptbl tbl;
   uint32_t i;
   char buffer[32];
@@ -175,21 +171,19 @@
 
   LOG_TEST("test_find");
 
-  grpc_chttp2_hptbl_init(&exec_ctx, &tbl);
-  elem =
-      grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_static_string("abc"),
-                              grpc_slice_from_static_string("xyz"));
-  GPR_ASSERT(grpc_chttp2_hptbl_add(&exec_ctx, &tbl, elem) == GRPC_ERROR_NONE);
-  GRPC_MDELEM_UNREF(&exec_ctx, elem);
-  elem =
-      grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_static_string("abc"),
-                              grpc_slice_from_static_string("123"));
-  GPR_ASSERT(grpc_chttp2_hptbl_add(&exec_ctx, &tbl, elem) == GRPC_ERROR_NONE);
-  GRPC_MDELEM_UNREF(&exec_ctx, elem);
-  elem = grpc_mdelem_from_slices(&exec_ctx, grpc_slice_from_static_string("x"),
+  grpc_chttp2_hptbl_init(&tbl);
+  elem = grpc_mdelem_from_slices(grpc_slice_from_static_string("abc"),
+                                 grpc_slice_from_static_string("xyz"));
+  GPR_ASSERT(grpc_chttp2_hptbl_add(&tbl, elem) == GRPC_ERROR_NONE);
+  GRPC_MDELEM_UNREF(elem);
+  elem = grpc_mdelem_from_slices(grpc_slice_from_static_string("abc"),
+                                 grpc_slice_from_static_string("123"));
+  GPR_ASSERT(grpc_chttp2_hptbl_add(&tbl, elem) == GRPC_ERROR_NONE);
+  GRPC_MDELEM_UNREF(elem);
+  elem = grpc_mdelem_from_slices(grpc_slice_from_static_string("x"),
                                  grpc_slice_from_static_string("1"));
-  GPR_ASSERT(grpc_chttp2_hptbl_add(&exec_ctx, &tbl, elem) == GRPC_ERROR_NONE);
-  GRPC_MDELEM_UNREF(&exec_ctx, elem);
+  GPR_ASSERT(grpc_chttp2_hptbl_add(&tbl, elem) == GRPC_ERROR_NONE);
+  GRPC_MDELEM_UNREF(elem);
 
   r = find_simple(&tbl, "abc", "123");
   GPR_ASSERT(r.index == 2 + GRPC_CHTTP2_LAST_STATIC_ENTRY);
@@ -238,11 +232,10 @@
   /* overflow the string buffer, check find still works */
   for (i = 0; i < 10000; i++) {
     int64_ttoa(i, buffer);
-    elem = grpc_mdelem_from_slices(&exec_ctx,
-                                   grpc_slice_from_static_string("test"),
+    elem = grpc_mdelem_from_slices(grpc_slice_from_static_string("test"),
                                    grpc_slice_from_copied_string(buffer));
-    GPR_ASSERT(grpc_chttp2_hptbl_add(&exec_ctx, &tbl, elem) == GRPC_ERROR_NONE);
-    GRPC_MDELEM_UNREF(&exec_ctx, elem);
+    GPR_ASSERT(grpc_chttp2_hptbl_add(&tbl, elem) == GRPC_ERROR_NONE);
+    GRPC_MDELEM_UNREF(elem);
   }
 
   r = find_simple(&tbl, "abc", "123");
@@ -270,8 +263,7 @@
   GPR_ASSERT(r.index != 0);
   GPR_ASSERT(r.has_value == 0);
 
-  grpc_chttp2_hptbl_destroy(&exec_ctx, &tbl);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_hptbl_destroy(&tbl);
 }
 
 int main(int argc, char** argv) {
diff --git a/test/core/transport/chttp2/settings_timeout_test.cc b/test/core/transport/chttp2/settings_timeout_test.cc
index 670eae1..08473c7 100644
--- a/test/core/transport/chttp2/settings_timeout_test.cc
+++ b/test/core/transport/chttp2/settings_timeout_test.cc
@@ -97,7 +97,7 @@
       : server_address_(server_address) {}
 
   void Connect() {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_resolved_addresses* server_addresses = nullptr;
     grpc_error* error =
         grpc_blocking_resolve_address(server_address_, "80", &server_addresses);
@@ -106,56 +106,53 @@
     pollset_ = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
     grpc_pollset_init(pollset_, &mu_);
     grpc_pollset_set* pollset_set = grpc_pollset_set_create();
-    grpc_pollset_set_add_pollset(&exec_ctx, pollset_set, pollset_);
+    grpc_pollset_set_add_pollset(pollset_set, pollset_);
     EventState state;
-    grpc_tcp_client_connect(&exec_ctx, state.closure(), &endpoint_, pollset_set,
+    grpc_tcp_client_connect(state.closure(), &endpoint_, pollset_set,
                             nullptr /* channel_args */, server_addresses->addrs,
                             1000);
     ASSERT_TRUE(PollUntilDone(
-        &exec_ctx, &state,
+        &state,
         grpc_timespec_to_millis_round_up(gpr_inf_future(GPR_CLOCK_MONOTONIC))));
     ASSERT_EQ(GRPC_ERROR_NONE, state.error());
-    grpc_pollset_set_destroy(&exec_ctx, pollset_set);
-    grpc_endpoint_add_to_pollset(&exec_ctx, endpoint_, pollset_);
+    grpc_pollset_set_destroy(pollset_set);
+    grpc_endpoint_add_to_pollset(endpoint_, pollset_);
     grpc_resolved_addresses_destroy(server_addresses);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 
   // Reads until an error is returned.
   // Returns true if an error was encountered before the deadline.
   bool ReadUntilError() {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_slice_buffer read_buffer;
     grpc_slice_buffer_init(&read_buffer);
     bool retval = true;
     // Use a deadline of 3 seconds, which is a lot more than we should
     // need for a 1-second timeout, but this helps avoid flakes.
-    grpc_millis deadline = grpc_exec_ctx_now(&exec_ctx) + 3000;
+    grpc_millis deadline = grpc_core::ExecCtx::Get()->Now() + 3000;
     while (true) {
       EventState state;
-      grpc_endpoint_read(&exec_ctx, endpoint_, &read_buffer, state.closure());
-      if (!PollUntilDone(&exec_ctx, &state, deadline)) {
+      grpc_endpoint_read(endpoint_, &read_buffer, state.closure());
+      if (!PollUntilDone(&state, deadline)) {
         retval = false;
         break;
       }
       if (state.error() != GRPC_ERROR_NONE) break;
       gpr_log(GPR_INFO, "client read %" PRIuPTR " bytes", read_buffer.length);
-      grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, &read_buffer);
+      grpc_slice_buffer_reset_and_unref_internal(&read_buffer);
     }
-    grpc_endpoint_shutdown(&exec_ctx, endpoint_,
+    grpc_endpoint_shutdown(endpoint_,
                            GRPC_ERROR_CREATE_FROM_STATIC_STRING("shutdown"));
-    grpc_slice_buffer_destroy_internal(&exec_ctx, &read_buffer);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_slice_buffer_destroy_internal(&read_buffer);
     return retval;
   }
 
   void Shutdown() {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_endpoint_destroy(&exec_ctx, endpoint_);
-    grpc_pollset_shutdown(&exec_ctx, pollset_,
+    grpc_core::ExecCtx exec_ctx;
+    grpc_endpoint_destroy(endpoint_);
+    grpc_pollset_shutdown(pollset_,
                           GRPC_CLOSURE_CREATE(&Client::PollsetDestroy, pollset_,
                                               grpc_schedule_on_exec_ctx));
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 
  private:
@@ -177,8 +174,7 @@
     grpc_error* error() const { return error_; }
 
    private:
-    static void OnEventDone(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+    static void OnEventDone(void* arg, grpc_error* error) {
       gpr_log(GPR_INFO, "OnEventDone(): %s", grpc_error_string(error));
       EventState* state = (EventState*)arg;
       state->error_ = GRPC_ERROR_REF(error);
@@ -191,24 +187,23 @@
   };
 
   // Returns true if done, or false if deadline exceeded.
-  bool PollUntilDone(grpc_exec_ctx* exec_ctx, EventState* state,
-                     grpc_millis deadline) {
+  bool PollUntilDone(EventState* state, grpc_millis deadline) {
     while (true) {
       grpc_pollset_worker* worker = nullptr;
       gpr_mu_lock(mu_);
-      GRPC_LOG_IF_ERROR("grpc_pollset_work",
-                        grpc_pollset_work(exec_ctx, pollset_, &worker,
-                                          grpc_exec_ctx_now(exec_ctx) + 1000));
+      GRPC_LOG_IF_ERROR(
+          "grpc_pollset_work",
+          grpc_pollset_work(pollset_, &worker,
+                            grpc_core::ExecCtx::Get()->Now() + 1000));
       gpr_mu_unlock(mu_);
       if (state != nullptr && state->done()) return true;
-      if (grpc_exec_ctx_now(exec_ctx) >= deadline) return false;
+      if (grpc_core::ExecCtx::Get()->Now() >= deadline) return false;
     }
   }
 
-  static void PollsetDestroy(grpc_exec_ctx* exec_ctx, void* arg,
-                             grpc_error* error) {
+  static void PollsetDestroy(void* arg, grpc_error* error) {
     grpc_pollset* pollset = (grpc_pollset*)arg;
-    grpc_pollset_destroy(exec_ctx, pollset);
+    grpc_pollset_destroy(pollset);
     gpr_free(pollset);
   }
 
diff --git a/test/core/transport/chttp2/varint_test.cc b/test/core/transport/chttp2/varint_test.cc
index 413b461..36760d0 100644
--- a/test/core/transport/chttp2/varint_test.cc
+++ b/test/core/transport/chttp2/varint_test.cc
@@ -18,6 +18,7 @@
 
 #include "src/core/ext/transport/chttp2/transport/varint.h"
 
+#include <grpc/grpc.h>
 #include <grpc/slice.h>
 #include <grpc/support/log.h>
 
@@ -44,11 +45,13 @@
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   TEST_VARINT(0, 1, 0, "\x00");
   TEST_VARINT(128, 1, 0, "\x7f\x01");
   TEST_VARINT(16384, 1, 0, "\x7f\x81\x7f");
   TEST_VARINT(2097152, 1, 0, "\x7f\x81\xff\x7f");
   TEST_VARINT(268435456, 1, 0, "\x7f\x81\xff\xff\x7f");
   TEST_VARINT(0xffffffff, 1, 0, "\x7f\x80\xff\xff\xff\x0f");
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/transport/connectivity_state_test.cc b/test/core/transport/connectivity_state_test.cc
index 11046e1..f589459 100644
--- a/test/core/transport/connectivity_state_test.cc
+++ b/test/core/transport/connectivity_state_test.cc
@@ -29,14 +29,13 @@
 
 int g_counter;
 
-static void must_succeed(grpc_exec_ctx* exec_ctx, void* arg,
-                         grpc_error* error) {
+static void must_succeed(void* arg, grpc_error* error) {
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(arg == THE_ARG);
   g_counter++;
 }
 
-static void must_fail(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+static void must_fail(void* arg, grpc_error* error) {
   GPR_ASSERT(error != GRPC_ERROR_NONE);
   GPR_ASSERT(arg == THE_ARG);
   g_counter++;
@@ -59,7 +58,7 @@
 
 static void test_check(void) {
   grpc_connectivity_state_tracker tracker;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_error* error;
   gpr_log(GPR_DEBUG, "test_check");
   grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_IDLE, "xxx");
@@ -67,8 +66,7 @@
              GRPC_CHANNEL_IDLE);
   GPR_ASSERT(grpc_connectivity_state_check(&tracker) == GRPC_CHANNEL_IDLE);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
-  grpc_connectivity_state_destroy(&exec_ctx, &tracker);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_connectivity_state_destroy(&tracker);
 }
 
 static void test_subscribe_then_unsubscribe(void) {
@@ -76,23 +74,21 @@
   grpc_closure* closure =
       GRPC_CLOSURE_CREATE(must_fail, THE_ARG, grpc_schedule_on_exec_ctx);
   grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_log(GPR_DEBUG, "test_subscribe_then_unsubscribe");
   g_counter = 0;
   grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_IDLE, "xxx");
-  GPR_ASSERT(grpc_connectivity_state_notify_on_state_change(&exec_ctx, &tracker,
-                                                            &state, closure));
-  grpc_exec_ctx_flush(&exec_ctx);
+  GPR_ASSERT(grpc_connectivity_state_notify_on_state_change(&tracker, &state,
+                                                            closure));
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(state == GRPC_CHANNEL_IDLE);
   GPR_ASSERT(g_counter == 0);
-  grpc_connectivity_state_notify_on_state_change(&exec_ctx, &tracker, nullptr,
-                                                 closure);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_connectivity_state_notify_on_state_change(&tracker, nullptr, closure);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(state == GRPC_CHANNEL_IDLE);
   GPR_ASSERT(g_counter == 1);
 
-  grpc_connectivity_state_destroy(&exec_ctx, &tracker);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_connectivity_state_destroy(&tracker);
 }
 
 static void test_subscribe_then_destroy(void) {
@@ -100,17 +96,18 @@
   grpc_closure* closure =
       GRPC_CLOSURE_CREATE(must_succeed, THE_ARG, grpc_schedule_on_exec_ctx);
   grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_log(GPR_DEBUG, "test_subscribe_then_destroy");
   g_counter = 0;
   grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_IDLE, "xxx");
-  GPR_ASSERT(grpc_connectivity_state_notify_on_state_change(&exec_ctx, &tracker,
-                                                            &state, closure));
-  grpc_exec_ctx_flush(&exec_ctx);
+  GPR_ASSERT(grpc_connectivity_state_notify_on_state_change(&tracker, &state,
+                                                            closure));
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(state == GRPC_CHANNEL_IDLE);
   GPR_ASSERT(g_counter == 0);
-  grpc_connectivity_state_destroy(&exec_ctx, &tracker);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_connectivity_state_destroy(&tracker);
+
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(state == GRPC_CHANNEL_SHUTDOWN);
   GPR_ASSERT(g_counter == 1);
 }
@@ -120,28 +117,30 @@
   grpc_closure* closure =
       GRPC_CLOSURE_CREATE(must_fail, THE_ARG, grpc_schedule_on_exec_ctx);
   grpc_connectivity_state state = GRPC_CHANNEL_SHUTDOWN;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_log(GPR_DEBUG, "test_subscribe_with_failure_then_destroy");
   g_counter = 0;
   grpc_connectivity_state_init(&tracker, GRPC_CHANNEL_SHUTDOWN, "xxx");
   GPR_ASSERT(0 == grpc_connectivity_state_notify_on_state_change(
-                      &exec_ctx, &tracker, &state, closure));
-  grpc_exec_ctx_flush(&exec_ctx);
+                      &tracker, &state, closure));
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(state == GRPC_CHANNEL_SHUTDOWN);
   GPR_ASSERT(g_counter == 0);
-  grpc_connectivity_state_destroy(&exec_ctx, &tracker);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_connectivity_state_destroy(&tracker);
+  grpc_core::ExecCtx::Get()->Flush();
   GPR_ASSERT(state == GRPC_CHANNEL_SHUTDOWN);
   GPR_ASSERT(g_counter == 1);
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   grpc_core::testing::grpc_tracer_enable_flag(&grpc_connectivity_state_trace);
   test_connectivity_state_name();
   test_check();
   test_subscribe_then_unsubscribe();
   test_subscribe_then_destroy();
   test_subscribe_with_failure_then_destroy();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/transport/metadata_test.cc b/test/core/transport/metadata_test.cc
index b60a961..5c52ae8 100644
--- a/test/core/transport/metadata_test.cc
+++ b/test/core/transport/metadata_test.cc
@@ -60,15 +60,15 @@
           intern_keys, intern_values);
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   m1 = grpc_mdelem_from_slices(
-      &exec_ctx, maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+      maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
       maybe_intern(grpc_slice_from_static_string("b"), intern_values));
   m2 = grpc_mdelem_from_slices(
-      &exec_ctx, maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+      maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
       maybe_intern(grpc_slice_from_static_string("b"), intern_values));
   m3 = grpc_mdelem_from_slices(
-      &exec_ctx, maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+      maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
       maybe_intern(grpc_slice_from_static_string("c"), intern_values));
   GPR_ASSERT(grpc_mdelem_eq(m1, m2));
   GPR_ASSERT(!grpc_mdelem_eq(m3, m1));
@@ -77,10 +77,10 @@
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDKEY(m1), "a") == 0);
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(m1), "b") == 0);
   GPR_ASSERT(grpc_slice_str_cmp(GRPC_MDVALUE(m3), "c") == 0);
-  GRPC_MDELEM_UNREF(&exec_ctx, m1);
-  GRPC_MDELEM_UNREF(&exec_ctx, m2);
-  GRPC_MDELEM_UNREF(&exec_ctx, m3);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(m1);
+  GRPC_MDELEM_UNREF(m2);
+  GRPC_MDELEM_UNREF(m3);
+
   grpc_shutdown();
 }
 
@@ -95,19 +95,15 @@
       intern_keys, intern_values);
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   /* add, and immediately delete a bunch of different elements */
   for (i = 0; i < MANY; i++) {
     gpr_ltoa(i, buffer);
-    GRPC_MDELEM_UNREF(
-        &exec_ctx,
-        grpc_mdelem_from_slices(
-            &exec_ctx,
-            maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
-            maybe_intern(grpc_slice_from_copied_string(buffer),
-                         intern_values)));
+    GRPC_MDELEM_UNREF(grpc_mdelem_from_slices(
+        maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+        maybe_intern(grpc_slice_from_copied_string(buffer), intern_values)));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_shutdown();
 }
 
@@ -121,28 +117,28 @@
   gpr_log(GPR_INFO, "test_create_many_persistant_metadata");
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   /* add phase */
   for (i = 0; i < MANY; i++) {
     gpr_ltoa(i, buffer);
     created[i] = grpc_mdelem_from_slices(
-        &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("a")),
+        grpc_slice_intern(grpc_slice_from_static_string("a")),
         grpc_slice_intern(grpc_slice_from_static_string(buffer)));
   }
   /* verify phase */
   for (i = 0; i < MANY; i++) {
     gpr_ltoa(i, buffer);
     md = grpc_mdelem_from_slices(
-        &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("a")),
+        grpc_slice_intern(grpc_slice_from_static_string("a")),
         grpc_slice_intern(grpc_slice_from_static_string(buffer)));
     GPR_ASSERT(grpc_mdelem_eq(md, created[i]));
-    GRPC_MDELEM_UNREF(&exec_ctx, md);
+    GRPC_MDELEM_UNREF(md);
   }
   /* cleanup phase */
   for (i = 0; i < MANY; i++) {
-    GRPC_MDELEM_UNREF(&exec_ctx, created[i]);
+    GRPC_MDELEM_UNREF(created[i]);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_shutdown();
 
   gpr_free(created);
@@ -155,31 +151,25 @@
           intern_keys, intern_values);
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem a, b, c;
   GRPC_MDELEM_UNREF(
-      &exec_ctx,
       a = grpc_mdelem_from_slices(
-          &exec_ctx,
           maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
           maybe_intern(grpc_slice_from_static_string("b"), intern_values)));
   GRPC_MDELEM_UNREF(
-      &exec_ctx,
       b = grpc_mdelem_from_slices(
-          &exec_ctx,
           maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
           maybe_intern(grpc_slice_from_static_string("b"), intern_values)));
   GRPC_MDELEM_UNREF(
-      &exec_ctx,
       c = grpc_mdelem_from_slices(
-          &exec_ctx,
           maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
           maybe_intern(grpc_slice_from_static_string("b"), intern_values)));
   if (intern_keys && intern_values) {
     GPR_ASSERT(a.payload == b.payload);
     GPR_ASSERT(a.payload == c.payload);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_shutdown();
 }
 
@@ -188,16 +178,16 @@
           intern_keys, intern_values);
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem a, b, c;
   a = grpc_mdelem_from_slices(
-      &exec_ctx, maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+      maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
       maybe_intern(grpc_slice_from_static_string("b"), intern_values));
   b = grpc_mdelem_from_slices(
-      &exec_ctx, maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+      maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
       maybe_intern(grpc_slice_from_static_string("b"), intern_values));
   c = grpc_mdelem_from_slices(
-      &exec_ctx, maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
+      maybe_intern(grpc_slice_from_static_string("a"), intern_keys),
       maybe_intern(grpc_slice_from_static_string("b"), intern_values));
   GPR_ASSERT(grpc_mdelem_eq(a, a));
   GPR_ASSERT(grpc_mdelem_eq(b, b));
@@ -216,10 +206,10 @@
     GPR_ASSERT(a.payload != c.payload);
     GPR_ASSERT(b.payload != c.payload);
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, a);
-  GRPC_MDELEM_UNREF(&exec_ctx, b);
-  GRPC_MDELEM_UNREF(&exec_ctx, c);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(a);
+  GRPC_MDELEM_UNREF(b);
+  GRPC_MDELEM_UNREF(c);
+
   grpc_shutdown();
 }
 
@@ -235,7 +225,7 @@
   gpr_log(GPR_INFO, "test_things_stick_around");
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   for (i = 0; i < nstrs; i++) {
     gpr_asprintf(&buffer, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx%" PRIuPTR "x", i);
@@ -246,7 +236,7 @@
 
   for (i = 0; i < nstrs; i++) {
     grpc_slice_ref_internal(strs[i]);
-    grpc_slice_unref_internal(&exec_ctx, strs[i]);
+    grpc_slice_unref_internal(strs[i]);
   }
 
   for (i = 0; i < nstrs; i++) {
@@ -258,18 +248,17 @@
   }
 
   for (i = 0; i < nstrs; i++) {
-    grpc_slice_unref_internal(&exec_ctx, strs[shuf[i]]);
+    grpc_slice_unref_internal(strs[shuf[i]]);
     for (j = i + 1; j < nstrs; j++) {
       gpr_asprintf(&buffer, "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx%" PRIuPTR "x",
                    shuf[j]);
       test = grpc_slice_intern(grpc_slice_from_static_string(buffer));
       GPR_ASSERT(grpc_slice_is_equivalent(test, strs[shuf[j]]));
-      grpc_slice_unref_internal(&exec_ctx, test);
+      grpc_slice_unref_internal(test);
       gpr_free(buffer);
     }
   }
 
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
   gpr_free(strs);
   gpr_free(shuf);
@@ -282,39 +271,38 @@
   gpr_log(GPR_INFO, "test_user_data_works");
 
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   ud1 = static_cast<int*>(gpr_malloc(sizeof(int)));
   *ud1 = 1;
   ud2 = static_cast<int*>(gpr_malloc(sizeof(int)));
   *ud2 = 2;
   md = grpc_mdelem_from_slices(
-      &exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")),
+      grpc_slice_intern(grpc_slice_from_static_string("abc")),
       grpc_slice_intern(grpc_slice_from_static_string("123")));
   grpc_mdelem_set_user_data(md, gpr_free, ud1);
   grpc_mdelem_set_user_data(md, gpr_free, ud2);
   GPR_ASSERT(grpc_mdelem_get_user_data(md, gpr_free) == ud1);
-  GRPC_MDELEM_UNREF(&exec_ctx, md);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(md);
+
   grpc_shutdown();
 }
 
-static void verify_ascii_header_size(grpc_exec_ctx* exec_ctx, const char* key,
-                                     const char* value, bool intern_key,
-                                     bool intern_value) {
+static void verify_ascii_header_size(const char* key, const char* value,
+                                     bool intern_key, bool intern_value) {
   grpc_mdelem elem = grpc_mdelem_from_slices(
-      exec_ctx, maybe_intern(grpc_slice_from_static_string(key), intern_key),
+      maybe_intern(grpc_slice_from_static_string(key), intern_key),
       maybe_intern(grpc_slice_from_static_string(value), intern_value));
   size_t elem_size = grpc_mdelem_get_size_in_hpack_table(elem, false);
   size_t expected_size = 32 + strlen(key) + strlen(value);
   GPR_ASSERT(expected_size == elem_size);
-  GRPC_MDELEM_UNREF(exec_ctx, elem);
+  GRPC_MDELEM_UNREF(elem);
 }
 
-static void verify_binary_header_size(grpc_exec_ctx* exec_ctx, const char* key,
-                                      const uint8_t* value, size_t value_len,
-                                      bool intern_key, bool intern_value) {
+static void verify_binary_header_size(const char* key, const uint8_t* value,
+                                      size_t value_len, bool intern_key,
+                                      bool intern_value) {
   grpc_mdelem elem = grpc_mdelem_from_slices(
-      exec_ctx, maybe_intern(grpc_slice_from_static_string(key), intern_key),
+      maybe_intern(grpc_slice_from_static_string(key), intern_key),
       maybe_intern(grpc_slice_from_static_buffer(value, value_len),
                    intern_value));
   GPR_ASSERT(grpc_is_binary_header(GRPC_MDKEY(elem)));
@@ -324,9 +312,9 @@
   grpc_slice base64_encoded = grpc_chttp2_base64_encode(value_slice);
   size_t expected_size = 32 + strlen(key) + GRPC_SLICE_LENGTH(base64_encoded);
   GPR_ASSERT(expected_size == elem_size);
-  grpc_slice_unref_internal(exec_ctx, value_slice);
-  grpc_slice_unref_internal(exec_ctx, base64_encoded);
-  GRPC_MDELEM_UNREF(exec_ctx, elem);
+  grpc_slice_unref_internal(value_slice);
+  grpc_slice_unref_internal(base64_encoded);
+  GRPC_MDELEM_UNREF(elem);
 }
 
 #define BUFFER_SIZE 64
@@ -334,27 +322,23 @@
   gpr_log(GPR_INFO, "test_mdelem_size: intern_key=%d intern_value=%d",
           intern_key, intern_value);
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   uint8_t binary_value[BUFFER_SIZE] = {0};
   for (uint8_t i = 0; i < BUFFER_SIZE; i++) {
     binary_value[i] = i;
   }
 
-  verify_ascii_header_size(&exec_ctx, "hello", "world", intern_key,
-                           intern_value);
-  verify_ascii_header_size(&exec_ctx, "hello",
-                           "worldxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", intern_key,
-                           intern_value);
-  verify_ascii_header_size(&exec_ctx, ":scheme", "http", intern_key,
-                           intern_value);
+  verify_ascii_header_size("hello", "world", intern_key, intern_value);
+  verify_ascii_header_size("hello", "worldxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
+                           intern_key, intern_value);
+  verify_ascii_header_size(":scheme", "http", intern_key, intern_value);
 
   for (uint8_t i = 0; i < BUFFER_SIZE; i++) {
-    verify_binary_header_size(&exec_ctx, "hello-bin", binary_value, i,
-                              intern_key, intern_value);
+    verify_binary_header_size("hello-bin", binary_value, i, intern_key,
+                              intern_value);
   }
 
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
 }
 
@@ -362,13 +346,13 @@
   gpr_log(GPR_INFO, "test_static_metadata: dup_key=%d dup_value=%d", dup_key,
           dup_value);
   grpc_init();
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   for (size_t i = 0; i < GRPC_STATIC_MDELEM_COUNT; i++) {
     grpc_mdelem p = GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[i],
                                      GRPC_MDELEM_STORAGE_STATIC);
     grpc_mdelem q =
-        grpc_mdelem_from_slices(&exec_ctx, maybe_dup(GRPC_MDKEY(p), dup_key),
+        grpc_mdelem_from_slices(maybe_dup(GRPC_MDKEY(p), dup_key),
                                 maybe_dup(GRPC_MDVALUE(p), dup_value));
     GPR_ASSERT(grpc_mdelem_eq(p, q));
     if (dup_key || dup_value) {
@@ -376,16 +360,16 @@
     } else {
       GPR_ASSERT(p.payload == q.payload);
     }
-    GRPC_MDELEM_UNREF(&exec_ctx, p);
-    GRPC_MDELEM_UNREF(&exec_ctx, q);
+    GRPC_MDELEM_UNREF(p);
+    GRPC_MDELEM_UNREF(q);
   }
 
-  grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
 }
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
   test_no_op();
   for (int k = 0; k <= 1; k++) {
     for (int v = 0; v <= 1; v++) {
@@ -400,5 +384,6 @@
   test_create_many_persistant_metadata();
   test_things_stick_around();
   test_user_data_works();
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/transport/status_conversion_test.cc b/test/core/transport/status_conversion_test.cc
index 7af5d12..1ed6ccf 100644
--- a/test/core/transport/status_conversion_test.cc
+++ b/test/core/transport/status_conversion_test.cc
@@ -22,12 +22,11 @@
 
 #define GRPC_STATUS_TO_HTTP2_ERROR(a, b) \
   GPR_ASSERT(grpc_status_to_http2_error(a) == (b))
-#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b)                           \
-  do {                                                                       \
-    grpc_exec_ctx my_exec_ctx = GRPC_EXEC_CTX_INIT;                          \
-    GPR_ASSERT(grpc_http2_error_to_grpc_status(&my_exec_ctx, a, deadline) == \
-               (b));                                                         \
-    grpc_exec_ctx_finish(&my_exec_ctx);                                      \
+#define HTTP2_ERROR_TO_GRPC_STATUS(a, deadline, b)                   \
+  do {                                                               \
+    grpc_core::ExecCtx exec_ctx;                                     \
+    GPR_ASSERT(grpc_http2_error_to_grpc_status(a, deadline) == (b)); \
+                                                                     \
   } while (0)
 #define GRPC_STATUS_TO_HTTP2_STATUS(a, b) \
   GPR_ASSERT(grpc_status_to_http2_status(a) == (b))
diff --git a/test/core/transport/stream_owned_slice_test.cc b/test/core/transport/stream_owned_slice_test.cc
index e82df21..7831f67 100644
--- a/test/core/transport/stream_owned_slice_test.cc
+++ b/test/core/transport/stream_owned_slice_test.cc
@@ -20,12 +20,14 @@
 
 #include "test/core/util/test_config.h"
 
+#include <grpc/grpc.h>
 #include <grpc/support/log.h>
 
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void do_nothing(void* arg, grpc_error* error) {}
 
 int main(int argc, char** argv) {
   grpc_test_init(argc, argv);
+  grpc_init();
 
   uint8_t buffer[] = "abc123";
   grpc_stream_refcount r;
@@ -39,5 +41,6 @@
   grpc_slice_unref(slice);
   GPR_ASSERT(r.refs.count == 1);
 
+  grpc_shutdown();
   return 0;
 }
diff --git a/test/core/util/BUILD b/test/core/util/BUILD
index 6443553..2237cfc 100644
--- a/test/core/util/BUILD
+++ b/test/core/util/BUILD
@@ -16,7 +16,10 @@
 
 licenses(["notice"])  # Apache v2
 
-grpc_package(name = "test/core/util", visibility = "public")
+grpc_package(
+    name = "test/core/util",
+    visibility = "public",
+)
 
 grpc_cc_library(
     name = "gpr_test_util",
@@ -49,10 +52,12 @@
     name = "grpc_test_util_base",
     srcs = [
         "grpc_profiler.cc",
+        "histogram.cc",
         "mock_endpoint.cc",
         "parse_hexstring.cc",
         "passthru_endpoint.cc",
         "port.cc",
+        "port_isolated_runtime_environment.cc",
         "port_server_client.cc",
         "reconnect_server.cc",
         "slice_splitter.cc",
@@ -62,6 +67,7 @@
     ],
     hdrs = [
         "grpc_profiler.h",
+        "histogram.h",
         "mock_endpoint.h",
         "parse_hexstring.h",
         "passthru_endpoint.h",
@@ -76,8 +82,8 @@
     language = "C++",
     deps = [
         ":gpr_test_util",
+        ":grpc_debugger_macros",
         "//:grpc_common",
-        ":grpc_debugger_macros"
     ],
 )
 
@@ -107,13 +113,23 @@
     name = "fuzzer_corpus_test",
     testonly = 1,
     srcs = ["fuzzer_corpus_test.cc"],
+    external_deps = [
+        "gtest",
+        "gflags",
+    ],
     deps = [
         ":gpr_test_util",
         "//:grpc",
     ],
-    external_deps = [
-        "gtest",
-        "gflags",
+)
+
+grpc_cc_test(
+    name = "histogram_test",
+    srcs = ["histogram_test.cc"],
+    language = "C++",
+    deps = [
+        ":grpc_test_util",
+        "//:gpr",
     ],
 )
 
@@ -121,3 +137,8 @@
     name = "fuzzer_one_entry_runner",
     srcs = ["fuzzer_one_entry_runner.sh"],
 )
+
+sh_library(
+    name = "run_with_poller_sh",
+    srcs = ["run_with_poller.sh"],
+)
diff --git a/test/core/util/fuzzer_corpus_test.cc b/test/core/util/fuzzer_corpus_test.cc
index d7aea54..7849321 100644
--- a/test/core/util/fuzzer_corpus_test.cc
+++ b/test/core/util/fuzzer_corpus_test.cc
@@ -29,8 +29,8 @@
 #include "test/core/util/test_config.h"
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
-extern "C" bool squelch;
-extern "C" bool leak_check;
+extern bool squelch;
+extern bool leak_check;
 
 // In some distros, gflags is in the namespace google, and in some others,
 // in gflags. This hack is enabling us to find both.
diff --git a/src/core/lib/support/histogram.cc b/test/core/util/histogram.cc
similarity index 72%
rename from src/core/lib/support/histogram.cc
rename to test/core/util/histogram.cc
index 73c821a..2f916f8 100644
--- a/src/core/lib/support/histogram.cc
+++ b/test/core/util/histogram.cc
@@ -16,8 +16,6 @@
  *
  */
 
-#include <grpc/support/histogram.h>
-
 #include <math.h>
 #include <stddef.h>
 #include <string.h>
@@ -27,12 +25,14 @@
 #include <grpc/support/port_platform.h>
 #include <grpc/support/useful.h>
 
+#include "test/core/util/histogram.h"
+
 /* Histograms are stored with exponentially increasing bucket sizes.
    The first bucket is [0, m) where m = 1 + resolution
    Bucket n (n>=1) contains [m**n, m**(n+1))
    There are sufficient buckets to reach max_bucket_start */
 
-struct gpr_histogram {
+struct grpc_histogram {
   /* Sum of all values seen so far */
   double sum;
   /* Sum of squares of all values seen so far */
@@ -55,25 +55,25 @@
 };
 
 /* determine a bucket index given a value - does no bounds checking */
-static size_t bucket_for_unchecked(gpr_histogram* h, double x) {
+static size_t bucket_for_unchecked(grpc_histogram* h, double x) {
   return (size_t)(log(x) * h->one_on_log_multiplier);
 }
 
 /* bounds checked version of the above */
-static size_t bucket_for(gpr_histogram* h, double x) {
+static size_t bucket_for(grpc_histogram* h, double x) {
   size_t bucket = bucket_for_unchecked(h, GPR_CLAMP(x, 1.0, h->max_possible));
   GPR_ASSERT(bucket < h->num_buckets);
   return bucket;
 }
 
 /* at what value does a bucket start? */
-static double bucket_start(gpr_histogram* h, double x) {
+static double bucket_start(grpc_histogram* h, double x) {
   return pow(h->multiplier, x);
 }
 
-gpr_histogram* gpr_histogram_create(double resolution,
-                                    double max_bucket_start) {
-  gpr_histogram* h = (gpr_histogram*)gpr_malloc(sizeof(gpr_histogram));
+grpc_histogram* grpc_histogram_create(double resolution,
+                                      double max_bucket_start) {
+  grpc_histogram* h = (grpc_histogram*)gpr_malloc(sizeof(grpc_histogram));
   GPR_ASSERT(resolution > 0.0);
   GPR_ASSERT(max_bucket_start > resolution);
   h->sum = 0.0;
@@ -91,12 +91,12 @@
   return h;
 }
 
-void gpr_histogram_destroy(gpr_histogram* h) {
+void grpc_histogram_destroy(grpc_histogram* h) {
   gpr_free(h->buckets);
   gpr_free(h);
 }
 
-void gpr_histogram_add(gpr_histogram* h, double x) {
+void grpc_histogram_add(grpc_histogram* h, double x) {
   h->sum += x;
   h->sum_of_squares += x * x;
   h->count++;
@@ -109,22 +109,22 @@
   h->buckets[bucket_for(h, x)]++;
 }
 
-int gpr_histogram_merge(gpr_histogram* dst, const gpr_histogram* src) {
+int grpc_histogram_merge(grpc_histogram* dst, const grpc_histogram* src) {
   if ((dst->num_buckets != src->num_buckets) ||
       (dst->multiplier != src->multiplier)) {
     /* Fail because these histograms don't match */
     return 0;
   }
-  gpr_histogram_merge_contents(dst, src->buckets, src->num_buckets,
-                               src->min_seen, src->max_seen, src->sum,
-                               src->sum_of_squares, src->count);
+  grpc_histogram_merge_contents(dst, src->buckets, src->num_buckets,
+                                src->min_seen, src->max_seen, src->sum,
+                                src->sum_of_squares, src->count);
   return 1;
 }
 
-void gpr_histogram_merge_contents(gpr_histogram* dst, const uint32_t* data,
-                                  size_t data_count, double min_seen,
-                                  double max_seen, double sum,
-                                  double sum_of_squares, double count) {
+void grpc_histogram_merge_contents(grpc_histogram* dst, const uint32_t* data,
+                                   size_t data_count, double min_seen,
+                                   double max_seen, double sum,
+                                   double sum_of_squares, double count) {
   size_t i;
   GPR_ASSERT(dst->num_buckets == data_count);
   dst->sum += sum;
@@ -141,7 +141,7 @@
   }
 }
 
-static double threshold_for_count_below(gpr_histogram* h, double count_below) {
+static double threshold_for_count_below(grpc_histogram* h, double count_below) {
   double count_so_far;
   double lower_bound;
   double upper_bound;
@@ -190,38 +190,38 @@
   }
 }
 
-double gpr_histogram_percentile(gpr_histogram* h, double percentile) {
+double grpc_histogram_percentile(grpc_histogram* h, double percentile) {
   return threshold_for_count_below(h, h->count * percentile / 100.0);
 }
 
-double gpr_histogram_mean(gpr_histogram* h) {
+double grpc_histogram_mean(grpc_histogram* h) {
   GPR_ASSERT(h->count != 0);
   return h->sum / h->count;
 }
 
-double gpr_histogram_stddev(gpr_histogram* h) {
-  return sqrt(gpr_histogram_variance(h));
+double grpc_histogram_stddev(grpc_histogram* h) {
+  return sqrt(grpc_histogram_variance(h));
 }
 
-double gpr_histogram_variance(gpr_histogram* h) {
+double grpc_histogram_variance(grpc_histogram* h) {
   if (h->count == 0) return 0.0;
   return (h->sum_of_squares * h->count - h->sum * h->sum) /
          (h->count * h->count);
 }
 
-double gpr_histogram_maximum(gpr_histogram* h) { return h->max_seen; }
+double grpc_histogram_maximum(grpc_histogram* h) { return h->max_seen; }
 
-double gpr_histogram_minimum(gpr_histogram* h) { return h->min_seen; }
+double grpc_histogram_minimum(grpc_histogram* h) { return h->min_seen; }
 
-double gpr_histogram_count(gpr_histogram* h) { return h->count; }
+double grpc_histogram_count(grpc_histogram* h) { return h->count; }
 
-double gpr_histogram_sum(gpr_histogram* h) { return h->sum; }
+double grpc_histogram_sum(grpc_histogram* h) { return h->sum; }
 
-double gpr_histogram_sum_of_squares(gpr_histogram* h) {
+double grpc_histogram_sum_of_squares(grpc_histogram* h) {
   return h->sum_of_squares;
 }
 
-const uint32_t* gpr_histogram_get_contents(gpr_histogram* h, size_t* size) {
+const uint32_t* grpc_histogram_get_contents(grpc_histogram* h, size_t* size) {
   *size = h->num_buckets;
   return h->buckets;
 }
diff --git a/test/core/util/histogram.h b/test/core/util/histogram.h
new file mode 100644
index 0000000..9d4985e
--- /dev/null
+++ b/test/core/util/histogram.h
@@ -0,0 +1,62 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#ifndef GRPC_SUPPORT_HISTOGRAM_H
+#define GRPC_SUPPORT_HISTOGRAM_H
+
+#include <grpc/support/port_platform.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct grpc_histogram grpc_histogram;
+
+grpc_histogram* grpc_histogram_create(double resolution,
+                                      double max_bucket_start);
+void grpc_histogram_destroy(grpc_histogram* h);
+void grpc_histogram_add(grpc_histogram* h, double x);
+
+/** The following merges the second histogram into the first. It only works
+   if they have the same buckets and resolution. Returns 0 on failure, 1
+   on success */
+int grpc_histogram_merge(grpc_histogram* dst, const grpc_histogram* src);
+
+double grpc_histogram_percentile(grpc_histogram* histogram, double percentile);
+double grpc_histogram_mean(grpc_histogram* histogram);
+double grpc_histogram_stddev(grpc_histogram* histogram);
+double grpc_histogram_variance(grpc_histogram* histogram);
+double grpc_histogram_maximum(grpc_histogram* histogram);
+double grpc_histogram_minimum(grpc_histogram* histogram);
+double grpc_histogram_count(grpc_histogram* histogram);
+double grpc_histogram_sum(grpc_histogram* histogram);
+double grpc_histogram_sum_of_squares(grpc_histogram* histogram);
+
+const uint32_t* grpc_histogram_get_contents(grpc_histogram* histogram,
+                                            size_t* count);
+void grpc_histogram_merge_contents(grpc_histogram* histogram,
+                                   const uint32_t* data, size_t data_count,
+                                   double min_seen, double max_seen, double sum,
+                                   double sum_of_squares, double count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GRPC_SUPPORT_HISTOGRAM_H */
diff --git a/test/core/util/histogram_test.cc b/test/core/util/histogram_test.cc
new file mode 100644
index 0000000..b96ac7d
--- /dev/null
+++ b/test/core/util/histogram_test.cc
@@ -0,0 +1,163 @@
+/*
+ *
+ * Copyright 2015 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+#include "test/core/util/histogram.h"
+#include <grpc/support/log.h>
+
+#define LOG_TEST(x) gpr_log(GPR_INFO, "%s", x);
+
+static void test_no_op(void) {
+  grpc_histogram_destroy(grpc_histogram_create(0.01, 60e9));
+}
+
+static void expect_percentile(grpc_histogram* h, double percentile,
+                              double min_expect, double max_expect) {
+  double got = grpc_histogram_percentile(h, percentile);
+  gpr_log(GPR_INFO, "@%f%%, expect %f <= %f <= %f", percentile, min_expect, got,
+          max_expect);
+  GPR_ASSERT(min_expect <= got);
+  GPR_ASSERT(got <= max_expect);
+}
+
+static void test_simple(void) {
+  grpc_histogram* h;
+
+  LOG_TEST("test_simple");
+
+  h = grpc_histogram_create(0.01, 60e9);
+  grpc_histogram_add(h, 10000);
+  grpc_histogram_add(h, 10000);
+  grpc_histogram_add(h, 11000);
+  grpc_histogram_add(h, 11000);
+
+  expect_percentile(h, 50, 10001, 10999);
+  GPR_ASSERT(grpc_histogram_mean(h) == 10500);
+
+  grpc_histogram_destroy(h);
+}
+
+static void test_percentile(void) {
+  grpc_histogram* h;
+  double last;
+  double i;
+  double cur;
+
+  LOG_TEST("test_percentile");
+
+  h = grpc_histogram_create(0.05, 1e9);
+  grpc_histogram_add(h, 2.5);
+  grpc_histogram_add(h, 2.5);
+  grpc_histogram_add(h, 8);
+  grpc_histogram_add(h, 4);
+
+  GPR_ASSERT(grpc_histogram_count(h) == 4);
+  GPR_ASSERT(grpc_histogram_minimum(h) == 2.5);
+  GPR_ASSERT(grpc_histogram_maximum(h) == 8);
+  GPR_ASSERT(grpc_histogram_sum(h) == 17);
+  GPR_ASSERT(grpc_histogram_sum_of_squares(h) == 92.5);
+  GPR_ASSERT(grpc_histogram_mean(h) == 4.25);
+  GPR_ASSERT(grpc_histogram_variance(h) == 5.0625);
+  GPR_ASSERT(grpc_histogram_stddev(h) == 2.25);
+
+  expect_percentile(h, -10, 2.5, 2.5);
+  expect_percentile(h, 0, 2.5, 2.5);
+  expect_percentile(h, 12.5, 2.5, 2.5);
+  expect_percentile(h, 25, 2.5, 2.5);
+  expect_percentile(h, 37.5, 2.5, 2.8);
+  expect_percentile(h, 50, 3.0, 3.5);
+  expect_percentile(h, 62.5, 3.5, 4.5);
+  expect_percentile(h, 75, 5, 7.9);
+  expect_percentile(h, 100, 8, 8);
+  expect_percentile(h, 110, 8, 8);
+
+  /* test monotonicity */
+  last = 0.0;
+  for (i = 0; i < 100.0; i += 0.01) {
+    cur = grpc_histogram_percentile(h, i);
+    GPR_ASSERT(cur >= last);
+    last = cur;
+  }
+
+  grpc_histogram_destroy(h);
+}
+
+static void test_merge(void) {
+  grpc_histogram *h1, *h2;
+  double last;
+  double i;
+  double cur;
+
+  LOG_TEST("test_merge");
+
+  h1 = grpc_histogram_create(0.05, 1e9);
+  grpc_histogram_add(h1, 2.5);
+  grpc_histogram_add(h1, 2.5);
+  grpc_histogram_add(h1, 8);
+  grpc_histogram_add(h1, 4);
+
+  h2 = grpc_histogram_create(0.01, 1e9);
+  GPR_ASSERT(grpc_histogram_merge(h1, h2) == 0);
+  grpc_histogram_destroy(h2);
+
+  h2 = grpc_histogram_create(0.05, 1e10);
+  GPR_ASSERT(grpc_histogram_merge(h1, h2) == 0);
+  grpc_histogram_destroy(h2);
+
+  h2 = grpc_histogram_create(0.05, 1e9);
+  GPR_ASSERT(grpc_histogram_merge(h1, h2) == 1);
+  GPR_ASSERT(grpc_histogram_count(h1) == 4);
+  GPR_ASSERT(grpc_histogram_minimum(h1) == 2.5);
+  GPR_ASSERT(grpc_histogram_maximum(h1) == 8);
+  GPR_ASSERT(grpc_histogram_sum(h1) == 17);
+  GPR_ASSERT(grpc_histogram_sum_of_squares(h1) == 92.5);
+  GPR_ASSERT(grpc_histogram_mean(h1) == 4.25);
+  GPR_ASSERT(grpc_histogram_variance(h1) == 5.0625);
+  GPR_ASSERT(grpc_histogram_stddev(h1) == 2.25);
+  grpc_histogram_destroy(h2);
+
+  h2 = grpc_histogram_create(0.05, 1e9);
+  grpc_histogram_add(h2, 7.0);
+  grpc_histogram_add(h2, 17.0);
+  grpc_histogram_add(h2, 1.0);
+  GPR_ASSERT(grpc_histogram_merge(h1, h2) == 1);
+  GPR_ASSERT(grpc_histogram_count(h1) == 7);
+  GPR_ASSERT(grpc_histogram_minimum(h1) == 1.0);
+  GPR_ASSERT(grpc_histogram_maximum(h1) == 17.0);
+  GPR_ASSERT(grpc_histogram_sum(h1) == 42.0);
+  GPR_ASSERT(grpc_histogram_sum_of_squares(h1) == 431.5);
+  GPR_ASSERT(grpc_histogram_mean(h1) == 6.0);
+
+  /* test monotonicity */
+  last = 0.0;
+  for (i = 0; i < 100.0; i += 0.01) {
+    cur = grpc_histogram_percentile(h1, i);
+    GPR_ASSERT(cur >= last);
+    last = cur;
+  }
+
+  grpc_histogram_destroy(h1);
+  grpc_histogram_destroy(h2);
+}
+
+int main(void) {
+  test_no_op();
+  test_simple();
+  test_percentile();
+  test_merge();
+  return 0;
+}
diff --git a/test/core/util/mock_endpoint.cc b/test/core/util/mock_endpoint.cc
index d9545ef..4b35a58 100644
--- a/test/core/util/mock_endpoint.cc
+++ b/test/core/util/mock_endpoint.cc
@@ -40,13 +40,13 @@
   grpc_resource_user* resource_user;
 } grpc_mock_endpoint;
 
-static void me_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                    grpc_slice_buffer* slices, grpc_closure* cb) {
+static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                    grpc_closure* cb) {
   grpc_mock_endpoint* m = (grpc_mock_endpoint*)ep;
   gpr_mu_lock(&m->mu);
   if (m->read_buffer.count > 0) {
     grpc_slice_buffer_swap(&m->read_buffer, slices);
-    GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
   } else {
     m->on_read = cb;
     m->on_read_out = slices;
@@ -54,44 +54,41 @@
   gpr_mu_unlock(&m->mu);
 }
 
-static void me_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                     grpc_slice_buffer* slices, grpc_closure* cb) {
+static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                     grpc_closure* cb) {
   grpc_mock_endpoint* m = (grpc_mock_endpoint*)ep;
   for (size_t i = 0; i < slices->count; i++) {
     m->on_write(slices->slices[i]);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
 }
 
-static void me_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                              grpc_pollset* pollset) {}
+static void me_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {}
 
-static void me_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+static void me_add_to_pollset_set(grpc_endpoint* ep,
                                   grpc_pollset_set* pollset) {}
 
-static void me_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                       grpc_endpoint* ep,
+static void me_delete_from_pollset_set(grpc_endpoint* ep,
                                        grpc_pollset_set* pollset) {}
 
-static void me_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                        grpc_error* why) {
+static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
   grpc_mock_endpoint* m = (grpc_mock_endpoint*)ep;
   gpr_mu_lock(&m->mu);
   if (m->on_read) {
-    GRPC_CLOSURE_SCHED(exec_ctx, m->on_read,
+    GRPC_CLOSURE_SCHED(m->on_read,
                        GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                            "Endpoint Shutdown", &why, 1));
     m->on_read = nullptr;
   }
   gpr_mu_unlock(&m->mu);
-  grpc_resource_user_shutdown(exec_ctx, m->resource_user);
+  grpc_resource_user_shutdown(m->resource_user);
   GRPC_ERROR_UNREF(why);
 }
 
-static void me_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+static void me_destroy(grpc_endpoint* ep) {
   grpc_mock_endpoint* m = (grpc_mock_endpoint*)ep;
   grpc_slice_buffer_destroy(&m->read_buffer);
-  grpc_resource_user_unref(exec_ctx, m->resource_user);
+  grpc_resource_user_unref(m->resource_user);
   gpr_free(m);
 }
 
@@ -134,13 +131,12 @@
   return &m->base;
 }
 
-void grpc_mock_endpoint_put_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                 grpc_slice slice) {
+void grpc_mock_endpoint_put_read(grpc_endpoint* ep, grpc_slice slice) {
   grpc_mock_endpoint* m = (grpc_mock_endpoint*)ep;
   gpr_mu_lock(&m->mu);
   if (m->on_read != nullptr) {
     grpc_slice_buffer_add(m->on_read_out, slice);
-    GRPC_CLOSURE_SCHED(exec_ctx, m->on_read, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(m->on_read, GRPC_ERROR_NONE);
     m->on_read = nullptr;
   } else {
     grpc_slice_buffer_add(&m->read_buffer, slice);
diff --git a/test/core/util/mock_endpoint.h b/test/core/util/mock_endpoint.h
index ccabaf7..6521d3e 100644
--- a/test/core/util/mock_endpoint.h
+++ b/test/core/util/mock_endpoint.h
@@ -23,8 +23,7 @@
 
 grpc_endpoint* grpc_mock_endpoint_create(void (*on_write)(grpc_slice slice),
                                          grpc_resource_quota* resource_quota);
-void grpc_mock_endpoint_put_read(grpc_exec_ctx* exec_ctx,
-                                 grpc_endpoint* mock_endpoint,
+void grpc_mock_endpoint_put_read(grpc_endpoint* mock_endpoint,
                                  grpc_slice slice);
 
 #endif
diff --git a/test/core/util/one_corpus_entry_fuzzer.cc b/test/core/util/one_corpus_entry_fuzzer.cc
index c0b67da..c745eb5 100644
--- a/test/core/util/one_corpus_entry_fuzzer.cc
+++ b/test/core/util/one_corpus_entry_fuzzer.cc
@@ -18,7 +18,10 @@
 
 #include <stdbool.h>
 
+#include <grpc/grpc.h>
+
 #include <grpc/support/log.h>
+#include "src/core/lib/iomgr/exec_ctx.h"
 #include "src/core/lib/iomgr/load_file.h"
 
 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size);
@@ -30,10 +33,15 @@
   grpc_slice buffer;
   squelch = false;
   leak_check = false;
+  /* TODO(yashkt) Calling grpc_init breaks tests. Fix the tests and replace
+   * grpc_core::ExecCtx::GlobalInit with grpc_init and GlobalShutdown with
+   * grpc_shutdown */
   GPR_ASSERT(
       GRPC_LOG_IF_ERROR("load_file", grpc_load_file(argv[1], 0, &buffer)));
   LLVMFuzzerTestOneInput(GRPC_SLICE_START_PTR(buffer),
                          GRPC_SLICE_LENGTH(buffer));
+  grpc_core::ExecCtx::GlobalInit();
   grpc_slice_unref(buffer);
+  grpc_core::ExecCtx::GlobalShutdown();
   return 0;
 }
diff --git a/test/core/util/passthru_endpoint.cc b/test/core/util/passthru_endpoint.cc
index a9efe22..5f127cb 100644
--- a/test/core/util/passthru_endpoint.cc
+++ b/test/core/util/passthru_endpoint.cc
@@ -49,22 +49,22 @@
   int halves;
   grpc_passthru_endpoint_stats* stats;
   grpc_passthru_endpoint_stats
-      dummy_stats;  // used if constructor stats == NULL
+      dummy_stats;  // used if constructor stats == nullptr
   bool shutdown;
   half client;
   half server;
 };
 
-static void me_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                    grpc_slice_buffer* slices, grpc_closure* cb) {
+static void me_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                    grpc_closure* cb) {
   half* m = (half*)ep;
   gpr_mu_lock(&m->parent->mu);
   if (m->parent->shutdown) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already shutdown"));
+        cb, GRPC_ERROR_CREATE_FROM_STATIC_STRING("Already shutdown"));
   } else if (m->read_buffer.count > 0) {
     grpc_slice_buffer_swap(&m->read_buffer, slices);
-    GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
   } else {
     m->on_read = cb;
     m->on_read_out = slices;
@@ -77,8 +77,8 @@
   return &h->parent->client;
 }
 
-static void me_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                     grpc_slice_buffer* slices, grpc_closure* cb) {
+static void me_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                     grpc_closure* cb) {
   half* m = other_half((half*)ep);
   gpr_mu_lock(&m->parent->mu);
   grpc_error* error = GRPC_ERROR_NONE;
@@ -89,7 +89,7 @@
     for (size_t i = 0; i < slices->count; i++) {
       grpc_slice_buffer_add(m->on_read_out, grpc_slice_copy(slices->slices[i]));
     }
-    GRPC_CLOSURE_SCHED(exec_ctx, m->on_read, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(m->on_read, GRPC_ERROR_NONE);
     m->on_read = nullptr;
   } else {
     for (size_t i = 0; i < slices->count; i++) {
@@ -98,52 +98,49 @@
     }
   }
   gpr_mu_unlock(&m->parent->mu);
-  GRPC_CLOSURE_SCHED(exec_ctx, cb, error);
+  GRPC_CLOSURE_SCHED(cb, error);
 }
 
-static void me_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                              grpc_pollset* pollset) {}
+static void me_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {}
 
-static void me_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+static void me_add_to_pollset_set(grpc_endpoint* ep,
                                   grpc_pollset_set* pollset) {}
 
-static void me_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                       grpc_endpoint* ep,
+static void me_delete_from_pollset_set(grpc_endpoint* ep,
                                        grpc_pollset_set* pollset) {}
 
-static void me_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                        grpc_error* why) {
+static void me_shutdown(grpc_endpoint* ep, grpc_error* why) {
   half* m = (half*)ep;
   gpr_mu_lock(&m->parent->mu);
   m->parent->shutdown = true;
   if (m->on_read) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, m->on_read,
+        m->on_read,
         GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
     m->on_read = nullptr;
   }
   m = other_half(m);
   if (m->on_read) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, m->on_read,
+        m->on_read,
         GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING("Shutdown", &why, 1));
     m->on_read = nullptr;
   }
   gpr_mu_unlock(&m->parent->mu);
-  grpc_resource_user_shutdown(exec_ctx, m->resource_user);
+  grpc_resource_user_shutdown(m->resource_user);
   GRPC_ERROR_UNREF(why);
 }
 
-static void me_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+static void me_destroy(grpc_endpoint* ep) {
   passthru_endpoint* p = ((half*)ep)->parent;
   gpr_mu_lock(&p->mu);
   if (0 == --p->halves) {
     gpr_mu_unlock(&p->mu);
     gpr_mu_destroy(&p->mu);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &p->client.read_buffer);
-    grpc_slice_buffer_destroy_internal(exec_ctx, &p->server.read_buffer);
-    grpc_resource_user_unref(exec_ctx, p->client.resource_user);
-    grpc_resource_user_unref(exec_ctx, p->server.resource_user);
+    grpc_slice_buffer_destroy_internal(&p->client.read_buffer);
+    grpc_slice_buffer_destroy_internal(&p->server.read_buffer);
+    grpc_resource_user_unref(p->client.resource_user);
+    grpc_resource_user_unref(p->server.resource_user);
     gpr_free(p);
   } else {
     gpr_mu_unlock(&p->mu);
diff --git a/test/core/util/port.h b/test/core/util/port.h
index 602099d..3a4cf44 100644
--- a/test/core/util/port.h
+++ b/test/core/util/port.h
@@ -19,10 +19,6 @@
 #ifndef GRPC_TEST_CORE_UTIL_PORT_H
 #define GRPC_TEST_CORE_UTIL_PORT_H
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 typedef struct grpc_pick_port_functions {
   int (*pick_unused_port_fn)(void);
   int (*pick_unused_port_or_die_fn)(void);
@@ -45,8 +41,4 @@
 /** Request the family of pick_port functions in \a functions be used. */
 void grpc_set_pick_port_functions(grpc_pick_port_functions functions);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_TEST_CORE_UTIL_PORT_H */
diff --git a/test/core/util/port_isolated_runtime_environment.cc b/test/core/util/port_isolated_runtime_environment.cc
new file mode 100644
index 0000000..5f0585e
--- /dev/null
+++ b/test/core/util/port_isolated_runtime_environment.cc
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+/* When running tests on remote machines, the framework takes a round-robin pick
+ * of a port within certain range. There is no need to recycle ports.
+ */
+#include "src/core/lib/iomgr/port.h"
+#include "test/core/util/test_config.h"
+#if defined(GRPC_PORT_ISOLATED_RUNTIME)
+
+#include "test/core/util/port.h"
+
+#define LOWER_PORT 49152
+static int s_allocated_port = LOWER_PORT;
+
+int grpc_pick_unused_port_or_die(void) {
+  int allocated_port = s_allocated_port++;
+  if (s_allocated_port == 65536) {
+    s_allocated_port = LOWER_PORT;
+  }
+
+  return allocated_port;
+}
+
+void grpc_recycle_unused_port(int port) { (void)port; }
+
+#endif /* GRPC_PORT_ISOLATED_RUNTIME */
diff --git a/test/core/util/port_server_client.cc b/test/core/util/port_server_client.cc
index edec50b..7e76c80 100644
--- a/test/core/util/port_server_client.cc
+++ b/test/core/util/port_server_client.cc
@@ -40,22 +40,19 @@
   int done;
 } freereq;
 
-static void destroy_pops_and_shutdown(grpc_exec_ctx* exec_ctx, void* p,
-                                      grpc_error* error) {
+static void destroy_pops_and_shutdown(void* p, grpc_error* error) {
   grpc_pollset* pollset = grpc_polling_entity_pollset((grpc_polling_entity*)p);
-  grpc_pollset_destroy(exec_ctx, pollset);
+  grpc_pollset_destroy(pollset);
   gpr_free(pollset);
 }
 
-static void freed_port_from_server(grpc_exec_ctx* exec_ctx, void* arg,
-                                   grpc_error* error) {
+static void freed_port_from_server(void* arg, grpc_error* error) {
   freereq* pr = (freereq*)arg;
   gpr_mu_lock(pr->mu);
   pr->done = 1;
   GRPC_LOG_IF_ERROR(
       "pollset_kick",
-      grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&pr->pops),
-                        nullptr));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
   gpr_mu_unlock(pr->mu);
 }
 
@@ -65,7 +62,7 @@
   grpc_httpcli_response rsp;
   freereq pr;
   char* path;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_closure* shutdown_closure;
 
   grpc_init();
@@ -87,30 +84,30 @@
   grpc_httpcli_context_init(&context);
   grpc_resource_quota* resource_quota =
       grpc_resource_quota_create("port_server_client/free");
-  grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
-                   grpc_exec_ctx_now(&exec_ctx) + 30 * GPR_MS_PER_SEC,
+  grpc_httpcli_get(&context, &pr.pops, resource_quota, &req,
+                   grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
                    GRPC_CLOSURE_CREATE(freed_port_from_server, &pr,
                                        grpc_schedule_on_exec_ctx),
                    &rsp);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_resource_quota_unref_internal(resource_quota);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_mu_lock(pr.mu);
   while (!pr.done) {
     grpc_pollset_worker* worker = nullptr;
     if (!GRPC_LOG_IF_ERROR(
             "pollset_work",
-            grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
-                              &worker,
-                              grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC))) {
+            grpc_pollset_work(
+                grpc_polling_entity_pollset(&pr.pops), &worker,
+                grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) {
       pr.done = 1;
     }
   }
   gpr_mu_unlock(pr.mu);
 
-  grpc_httpcli_context_destroy(&exec_ctx, &context);
-  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
+  grpc_httpcli_context_destroy(&context);
+  grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops),
                         shutdown_closure);
-  grpc_exec_ctx_finish(&exec_ctx);
+
   gpr_free(path);
   grpc_http_response_destroy(&rsp);
 
@@ -127,8 +124,7 @@
   grpc_httpcli_response response;
 } portreq;
 
-static void got_port_from_server(grpc_exec_ctx* exec_ctx, void* arg,
-                                 grpc_error* error) {
+static void got_port_from_server(void* arg, grpc_error* error) {
   size_t i;
   int port = 0;
   portreq* pr = (portreq*)arg;
@@ -154,8 +150,7 @@
       pr->port = 0;
       GRPC_LOG_IF_ERROR(
           "pollset_kick",
-          grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&pr->pops),
-                            nullptr));
+          grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
       gpr_mu_unlock(pr->mu);
       return;
     }
@@ -172,12 +167,12 @@
     memset(&pr->response, 0, sizeof(pr->response));
     grpc_resource_quota* resource_quota =
         grpc_resource_quota_create("port_server_client/pick_retry");
-    grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
-                     grpc_exec_ctx_now(exec_ctx) + 30 * GPR_MS_PER_SEC,
+    grpc_httpcli_get(pr->ctx, &pr->pops, resource_quota, &req,
+                     grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
                      GRPC_CLOSURE_CREATE(got_port_from_server, pr,
                                          grpc_schedule_on_exec_ctx),
                      &pr->response);
-    grpc_resource_quota_unref_internal(exec_ctx, resource_quota);
+    grpc_resource_quota_unref_internal(resource_quota);
     return;
   }
   GPR_ASSERT(response);
@@ -191,8 +186,7 @@
   pr->port = port;
   GRPC_LOG_IF_ERROR(
       "pollset_kick",
-      grpc_pollset_kick(exec_ctx, grpc_polling_entity_pollset(&pr->pops),
-                        nullptr));
+      grpc_pollset_kick(grpc_polling_entity_pollset(&pr->pops), nullptr));
   gpr_mu_unlock(pr->mu);
 }
 
@@ -200,53 +194,55 @@
   grpc_httpcli_context context;
   grpc_httpcli_request req;
   portreq pr;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_closure* shutdown_closure;
 
   grpc_init();
+  {
+    grpc_core::ExecCtx exec_ctx;
+    memset(&pr, 0, sizeof(pr));
+    memset(&req, 0, sizeof(req));
+    grpc_pollset* pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
+    grpc_pollset_init(pollset, &pr.mu);
+    pr.pops = grpc_polling_entity_create_from_pollset(pollset);
+    shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops,
+                                           grpc_schedule_on_exec_ctx);
+    pr.port = -1;
+    pr.server = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
+    pr.ctx = &context;
 
-  memset(&pr, 0, sizeof(pr));
-  memset(&req, 0, sizeof(req));
-  grpc_pollset* pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
-  grpc_pollset_init(pollset, &pr.mu);
-  pr.pops = grpc_polling_entity_create_from_pollset(pollset);
-  shutdown_closure = GRPC_CLOSURE_CREATE(destroy_pops_and_shutdown, &pr.pops,
-                                         grpc_schedule_on_exec_ctx);
-  pr.port = -1;
-  pr.server = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
-  pr.ctx = &context;
+    req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
+    req.http.path = const_cast<char*>("/get");
 
-  req.host = const_cast<char*>(GRPC_PORT_SERVER_ADDRESS);
-  req.http.path = const_cast<char*>("/get");
-
-  grpc_httpcli_context_init(&context);
-  grpc_resource_quota* resource_quota =
-      grpc_resource_quota_create("port_server_client/pick");
-  grpc_httpcli_get(
-      &exec_ctx, &context, &pr.pops, resource_quota, &req,
-      grpc_exec_ctx_now(&exec_ctx) + 30 * GPR_MS_PER_SEC,
-      GRPC_CLOSURE_CREATE(got_port_from_server, &pr, grpc_schedule_on_exec_ctx),
-      &pr.response);
-  grpc_resource_quota_unref_internal(&exec_ctx, resource_quota);
-  grpc_exec_ctx_flush(&exec_ctx);
-  gpr_mu_lock(pr.mu);
-  while (pr.port == -1) {
-    grpc_pollset_worker* worker = nullptr;
-    if (!GRPC_LOG_IF_ERROR(
-            "pollset_work",
-            grpc_pollset_work(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
-                              &worker,
-                              grpc_exec_ctx_now(&exec_ctx) + GPR_MS_PER_SEC))) {
-      pr.port = 0;
+    grpc_httpcli_context_init(&context);
+    grpc_resource_quota* resource_quota =
+        grpc_resource_quota_create("port_server_client/pick");
+    grpc_httpcli_get(&context, &pr.pops, resource_quota, &req,
+                     grpc_core::ExecCtx::Get()->Now() + 30 * GPR_MS_PER_SEC,
+                     GRPC_CLOSURE_CREATE(got_port_from_server, &pr,
+                                         grpc_schedule_on_exec_ctx),
+                     &pr.response);
+    grpc_resource_quota_unref_internal(resource_quota);
+    grpc_core::ExecCtx::Get()->Flush();
+    gpr_mu_lock(pr.mu);
+    while (pr.port == -1) {
+      grpc_pollset_worker* worker = nullptr;
+      if (!GRPC_LOG_IF_ERROR(
+              "pollset_work",
+              grpc_pollset_work(
+                  grpc_polling_entity_pollset(&pr.pops), &worker,
+                  grpc_core::ExecCtx::Get()->Now() + GPR_MS_PER_SEC))) {
+        pr.port = 0;
+      }
     }
-  }
-  gpr_mu_unlock(pr.mu);
+    gpr_mu_unlock(pr.mu);
 
-  grpc_http_response_destroy(&pr.response);
-  grpc_httpcli_context_destroy(&exec_ctx, &context);
-  grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&pr.pops),
-                        shutdown_closure);
-  grpc_exec_ctx_finish(&exec_ctx);
+    grpc_http_response_destroy(&pr.response);
+    grpc_httpcli_context_destroy(&context);
+    grpc_pollset_shutdown(grpc_polling_entity_pollset(&pr.pops),
+                          shutdown_closure);
+
+    grpc_core::ExecCtx::Get()->Flush();
+  }
   grpc_shutdown();
 
   return pr.port;
diff --git a/test/core/util/reconnect_server.cc b/test/core/util/reconnect_server.cc
index 4775b07..bcafc4e 100644
--- a/test/core/util/reconnect_server.cc
+++ b/test/core/util/reconnect_server.cc
@@ -55,7 +55,7 @@
   }
 }
 
-static void on_connect(grpc_exec_ctx* exec_ctx, void* arg, grpc_endpoint* tcp,
+static void on_connect(void* arg, grpc_endpoint* tcp,
                        grpc_pollset* accepting_pollset,
                        grpc_tcp_server_acceptor* acceptor) {
   gpr_free(acceptor);
@@ -65,9 +65,9 @@
   gpr_timespec now = gpr_now(GPR_CLOCK_REALTIME);
   timestamp_list* new_tail;
   peer = grpc_endpoint_get_peer(tcp);
-  grpc_endpoint_shutdown(exec_ctx, tcp,
+  grpc_endpoint_shutdown(tcp,
                          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Connected"));
-  grpc_endpoint_destroy(exec_ctx, tcp);
+  grpc_endpoint_destroy(tcp);
   if (peer) {
     last_colon = strrchr(peer, ':');
     if (server->peer == nullptr) {
diff --git a/test/core/util/run_with_poller.sh b/test/core/util/run_with_poller.sh
new file mode 100755
index 0000000..0579145
--- /dev/null
+++ b/test/core/util/run_with_poller.sh
@@ -0,0 +1,19 @@
+#!/bin/sh
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+export GRPC_POLL_STRATEGY=$1
+shift
+$@
diff --git a/test/core/util/test_config.h b/test/core/util/test_config.h
index 4383fbf..5b3d347 100644
--- a/test/core/util/test_config.h
+++ b/test/core/util/test_config.h
@@ -21,10 +21,6 @@
 
 #include <grpc/support/time.h>
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
 extern int64_t g_fixture_slowdown_factor;
 extern int64_t g_poller_slowdown_factor;
 
@@ -37,14 +33,10 @@
 /* Converts a given timeout (in milliseconds) to a deadline. */
 gpr_timespec grpc_timeout_milliseconds_to_deadline(int64_t time_ms);
 
-#ifndef GRPC_TEST_CUSTOM_PICK_PORT
+#if !defined(GRPC_TEST_CUSTOM_PICK_PORT) && !defined(GRPC_PORT_ISOLATED_RUNTIME)
 #define GRPC_TEST_PICK_PORT
 #endif
 
 void grpc_test_init(int argc, char** argv);
 
-#ifdef __cplusplus
-}
-#endif
-
 #endif /* GRPC_TEST_CORE_UTIL_TEST_CONFIG_H */
diff --git a/test/core/util/test_tcp_server.cc b/test/core/util/test_tcp_server.cc
index da34da6..5f6af4e 100644
--- a/test/core/util/test_tcp_server.cc
+++ b/test/core/util/test_tcp_server.cc
@@ -33,8 +33,7 @@
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
 
-static void on_server_destroyed(grpc_exec_ctx* exec_ctx, void* data,
-                                grpc_error* error) {
+static void on_server_destroyed(void* data, grpc_error* error) {
   test_tcp_server* server = static_cast<test_tcp_server*>(data);
   server->shutdown = 1;
 }
@@ -56,51 +55,46 @@
   grpc_resolved_address resolved_addr;
   struct sockaddr_in* addr = (struct sockaddr_in*)resolved_addr.addr;
   int port_added;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   addr->sin_family = AF_INET;
   addr->sin_port = htons((uint16_t)port);
   memset(&addr->sin_addr, 0, sizeof(addr->sin_addr));
 
-  grpc_error* error = grpc_tcp_server_create(
-      &exec_ctx, &server->shutdown_complete, nullptr, &server->tcp_server);
+  grpc_error* error = grpc_tcp_server_create(&server->shutdown_complete,
+                                             nullptr, &server->tcp_server);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   error =
       grpc_tcp_server_add_port(server->tcp_server, &resolved_addr, &port_added);
   GPR_ASSERT(error == GRPC_ERROR_NONE);
   GPR_ASSERT(port_added == port);
 
-  grpc_tcp_server_start(&exec_ctx, server->tcp_server, &server->pollset, 1,
+  grpc_tcp_server_start(server->tcp_server, &server->pollset, 1,
                         server->on_connect, server->cb_data);
   gpr_log(GPR_INFO, "test tcp server listening on 0.0.0.0:%d", port);
-
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 void test_tcp_server_poll(test_tcp_server* server, int seconds) {
   grpc_pollset_worker* worker = nullptr;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_millis deadline = grpc_timespec_to_millis_round_up(
       grpc_timeout_seconds_to_deadline(seconds));
   gpr_mu_lock(server->mu);
-  GRPC_LOG_IF_ERROR(
-      "pollset_work",
-      grpc_pollset_work(&exec_ctx, server->pollset, &worker, deadline));
+  GRPC_LOG_IF_ERROR("pollset_work",
+                    grpc_pollset_work(server->pollset, &worker, deadline));
   gpr_mu_unlock(server->mu);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
-static void do_nothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
-static void finish_pollset(grpc_exec_ctx* exec_ctx, void* arg,
-                           grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(arg));
+static void do_nothing(void* arg, grpc_error* error) {}
+static void finish_pollset(void* arg, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(arg));
 }
 
 void test_tcp_server_destroy(test_tcp_server* server) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_timespec shutdown_deadline;
   grpc_closure do_nothing_cb;
-  grpc_tcp_server_unref(&exec_ctx, server->tcp_server);
+  grpc_tcp_server_unref(server->tcp_server);
   GRPC_CLOSURE_INIT(&do_nothing_cb, do_nothing, nullptr,
                     grpc_schedule_on_exec_ctx);
   shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
@@ -109,10 +103,10 @@
          gpr_time_cmp(gpr_now(GPR_CLOCK_MONOTONIC), shutdown_deadline) < 0) {
     test_tcp_server_poll(server, 1);
   }
-  grpc_pollset_shutdown(&exec_ctx, server->pollset,
+  grpc_pollset_shutdown(server->pollset,
                         GRPC_CLOSURE_CREATE(finish_pollset, server->pollset,
                                             grpc_schedule_on_exec_ctx));
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(server->pollset);
   grpc_shutdown();
 }
diff --git a/test/core/util/trickle_endpoint.cc b/test/core/util/trickle_endpoint.cc
index 4544fb7..f95ed62 100644
--- a/test/core/util/trickle_endpoint.cc
+++ b/test/core/util/trickle_endpoint.cc
@@ -45,24 +45,23 @@
   grpc_closure* write_cb;
 } trickle_endpoint;
 
-static void te_read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                    grpc_slice_buffer* slices, grpc_closure* cb) {
+static void te_read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                    grpc_closure* cb) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
-  grpc_endpoint_read(exec_ctx, te->wrapped, slices, cb);
+  grpc_endpoint_read(te->wrapped, slices, cb);
 }
 
-static void maybe_call_write_cb_locked(grpc_exec_ctx* exec_ctx,
-                                       trickle_endpoint* te) {
+static void maybe_call_write_cb_locked(trickle_endpoint* te) {
   if (te->write_cb != nullptr &&
       (te->error != GRPC_ERROR_NONE ||
        te->write_buffer.length <= WRITE_BUFFER_SIZE)) {
-    GRPC_CLOSURE_SCHED(exec_ctx, te->write_cb, GRPC_ERROR_REF(te->error));
+    GRPC_CLOSURE_SCHED(te->write_cb, GRPC_ERROR_REF(te->error));
     te->write_cb = nullptr;
   }
 }
 
-static void te_write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                     grpc_slice_buffer* slices, grpc_closure* cb) {
+static void te_write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                     grpc_closure* cb) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
   gpr_mu_lock(&te->mu);
   GPR_ASSERT(te->write_cb == nullptr);
@@ -74,47 +73,44 @@
                           grpc_slice_copy(slices->slices[i]));
   }
   te->write_cb = cb;
-  maybe_call_write_cb_locked(exec_ctx, te);
+  maybe_call_write_cb_locked(te);
   gpr_mu_unlock(&te->mu);
 }
 
-static void te_add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                              grpc_pollset* pollset) {
+static void te_add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
-  grpc_endpoint_add_to_pollset(exec_ctx, te->wrapped, pollset);
+  grpc_endpoint_add_to_pollset(te->wrapped, pollset);
 }
 
-static void te_add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
+static void te_add_to_pollset_set(grpc_endpoint* ep,
                                   grpc_pollset_set* pollset_set) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
-  grpc_endpoint_add_to_pollset_set(exec_ctx, te->wrapped, pollset_set);
+  grpc_endpoint_add_to_pollset_set(te->wrapped, pollset_set);
 }
 
-static void te_delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                       grpc_endpoint* ep,
+static void te_delete_from_pollset_set(grpc_endpoint* ep,
                                        grpc_pollset_set* pollset_set) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
-  grpc_endpoint_delete_from_pollset_set(exec_ctx, te->wrapped, pollset_set);
+  grpc_endpoint_delete_from_pollset_set(te->wrapped, pollset_set);
 }
 
-static void te_shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                        grpc_error* why) {
+static void te_shutdown(grpc_endpoint* ep, grpc_error* why) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
   gpr_mu_lock(&te->mu);
   if (te->error == GRPC_ERROR_NONE) {
     te->error = GRPC_ERROR_REF(why);
   }
-  maybe_call_write_cb_locked(exec_ctx, te);
+  maybe_call_write_cb_locked(te);
   gpr_mu_unlock(&te->mu);
-  grpc_endpoint_shutdown(exec_ctx, te->wrapped, why);
+  grpc_endpoint_shutdown(te->wrapped, why);
 }
 
-static void te_destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
+static void te_destroy(grpc_endpoint* ep) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
-  grpc_endpoint_destroy(exec_ctx, te->wrapped);
+  grpc_endpoint_destroy(te->wrapped);
   gpr_mu_destroy(&te->mu);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &te->write_buffer);
-  grpc_slice_buffer_destroy_internal(exec_ctx, &te->writing_buffer);
+  grpc_slice_buffer_destroy_internal(&te->write_buffer);
+  grpc_slice_buffer_destroy_internal(&te->writing_buffer);
   GRPC_ERROR_UNREF(te->error);
   gpr_free(te);
 }
@@ -134,8 +130,7 @@
   return grpc_endpoint_get_fd(te->wrapped);
 }
 
-static void te_finish_write(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+static void te_finish_write(void* arg, grpc_error* error) {
   trickle_endpoint* te = (trickle_endpoint*)arg;
   gpr_mu_lock(&te->mu);
   te->writing = false;
@@ -173,8 +168,7 @@
   return (double)s.tv_sec + 1e-9 * (double)s.tv_nsec;
 }
 
-size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx* exec_ctx,
-                                     grpc_endpoint* ep) {
+size_t grpc_trickle_endpoint_trickle(grpc_endpoint* ep) {
   trickle_endpoint* te = (trickle_endpoint*)ep;
   gpr_mu_lock(&te->mu);
   if (!te->writing && te->write_buffer.length > 0) {
@@ -189,9 +183,9 @@
       te->writing = true;
       te->last_write = now;
       grpc_endpoint_write(
-          exec_ctx, te->wrapped, &te->writing_buffer,
+          te->wrapped, &te->writing_buffer,
           GRPC_CLOSURE_CREATE(te_finish_write, te, grpc_schedule_on_exec_ctx));
-      maybe_call_write_cb_locked(exec_ctx, te);
+      maybe_call_write_cb_locked(te);
     }
   }
   size_t backlog = te->write_buffer.length;
diff --git a/test/core/util/trickle_endpoint.h b/test/core/util/trickle_endpoint.h
index 11c113b..cd07de9 100644
--- a/test/core/util/trickle_endpoint.h
+++ b/test/core/util/trickle_endpoint.h
@@ -25,8 +25,7 @@
                                             double bytes_per_second);
 
 /* Allow up to \a bytes through the endpoint. Returns the new backlog. */
-size_t grpc_trickle_endpoint_trickle(grpc_exec_ctx* exec_ctx,
-                                     grpc_endpoint* endpoint);
+size_t grpc_trickle_endpoint_trickle(grpc_endpoint* endpoint);
 
 size_t grpc_trickle_get_backlog(grpc_endpoint* endpoint);
 
diff --git a/test/cpp/client/client_channel_stress_test.cc b/test/cpp/client/client_channel_stress_test.cc
index 8940f6f..e829d52 100644
--- a/test/cpp/client/client_channel_stress_test.cc
+++ b/test/cpp/client/client_channel_stress_test.cc
@@ -34,10 +34,8 @@
 #include <grpc/support/thd.h>
 #include <grpc/support/time.h>
 
-extern "C" {
 #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
 #include "src/core/lib/iomgr/sockaddr.h"
-}
 
 #include "test/core/util/port.h"
 #include "test/core/util/test_config.h"
@@ -214,13 +212,13 @@
   };
 
   void SetNextResolution(const std::vector<AddressData>& address_data) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_lb_addresses* addresses =
         grpc_lb_addresses_create(address_data.size(), nullptr);
     for (size_t i = 0; i < address_data.size(); ++i) {
       char* lb_uri_str;
       gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", address_data[i].port);
-      grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
+      grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str, true);
       GPR_ASSERT(lb_uri != nullptr);
       grpc_lb_addresses_set_address_from_uri(
           addresses, i, lb_uri, address_data[i].is_balancer,
@@ -230,10 +228,9 @@
     }
     grpc_arg fake_addresses = grpc_lb_addresses_create_channel_arg(addresses);
     grpc_channel_args fake_result = {1, &fake_addresses};
-    grpc_fake_resolver_response_generator_set_response(
-        &exec_ctx, response_generator_, &fake_result);
-    grpc_lb_addresses_destroy(&exec_ctx, addresses);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_fake_resolver_response_generator_set_response(response_generator_,
+                                                       &fake_result);
+    grpc_lb_addresses_destroy(addresses);
   }
 
   void KeepSendingRequests() {
diff --git a/test/cpp/codegen/BUILD b/test/cpp/codegen/BUILD
index 6cc81e3..1388dbc 100644
--- a/test/cpp/codegen/BUILD
+++ b/test/cpp/codegen/BUILD
@@ -14,7 +14,7 @@
 
 licenses(["notice"])  # Apache v2
 
-load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_package")
+load("//bazel:grpc_build_system.bzl", "grpc_cc_test", "grpc_package", "grpc_cc_binary", "grpc_sh_test")
 
 grpc_package(name = "test/cpp/codegen")
 
@@ -55,14 +55,10 @@
     ],
 )
 
-grpc_cc_test(
+grpc_cc_binary(
     name = "golden_file_test",
+    testonly = True,
     srcs = ["golden_file_test.cc"],
-    args = ["--generated_file_path=$(GENDIR)/src/proto/grpc/testing/"],
-    data = [
-        ":compiler_test_golden",
-        "//src/proto/grpc/testing:_compiler_test_proto_grpc_codegen",
-    ],
     deps = [
         "//:grpc++",
         "//src/proto/grpc/testing:compiler_test_proto",
@@ -73,3 +69,14 @@
         "gflags",
     ],
 )
+
+grpc_sh_test(
+    name = "run_golden_file_test",
+    srcs = ["run_golden_file_test.sh"],
+    data = [
+        ":golden_file_test",
+        ":compiler_test_golden",
+        ":compiler_test_mock_golden",
+        "//src/proto/grpc/testing:_compiler_test_proto_grpc_codegen",
+    ],
+)
diff --git a/test/cpp/codegen/golden_file_test.cc b/test/cpp/codegen/golden_file_test.cc
index 1488098..7e4d15a 100644
--- a/test/cpp/codegen/golden_file_test.cc
+++ b/test/cpp/codegen/golden_file_test.cc
@@ -22,6 +22,13 @@
 #include <gflags/gflags.h>
 #include <gtest/gtest.h>
 
+// In some distros, gflags is in the namespace google, and in some others,
+// in gflags. This hack is enabling us to find both.
+namespace google {}
+namespace gflags {}
+using namespace google;
+using namespace gflags;
+
 DEFINE_string(
     generated_file_path, "",
     "path to the directory containing generated files compiler_test.grpc.pb.h"
@@ -60,7 +67,7 @@
 
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
-  ::google::ParseCommandLineFlags(&argc, &argv, true);
+  ParseCommandLineFlags(&argc, &argv, true);
   if (FLAGS_generated_file_path.empty()) {
     FLAGS_generated_file_path = "gens/src/proto/grpc/testing/";
   }
diff --git a/test/cpp/codegen/run_golden_file_test.sh b/test/cpp/codegen/run_golden_file_test.sh
new file mode 100755
index 0000000..cdfaa96
--- /dev/null
+++ b/test/cpp/codegen/run_golden_file_test.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+GENERATED_FILES_PATH="$TEST_SRCDIR/../../../../../genfiles/src/proto/grpc/testing/"
+test/cpp/codegen/golden_file_test --generated_file_path="$GENERATED_FILES_PATH"
diff --git a/test/cpp/common/channel_arguments_test.cc b/test/cpp/common/channel_arguments_test.cc
index d6ed2e5..f330c01 100644
--- a/test/cpp/common/channel_arguments_test.cc
+++ b/test/cpp/common/channel_arguments_test.cc
@@ -249,5 +249,8 @@
 
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
+  grpc_init();
+  int ret = RUN_ALL_TESTS();
+  grpc_shutdown();
+  return ret;
 }
diff --git a/test/cpp/common/channel_filter_test.cc b/test/cpp/common/channel_filter_test.cc
index 6385181..7bdd53f 100644
--- a/test/cpp/common/channel_filter_test.cc
+++ b/test/cpp/common/channel_filter_test.cc
@@ -28,7 +28,7 @@
  public:
   MyChannelData() {}
 
-  grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+  grpc_error* Init(grpc_channel_element* elem,
                    grpc_channel_element_args* args) override {
     (void)args->channel_args;  // Make sure field is available.
     return GRPC_ERROR_NONE;
@@ -39,7 +39,7 @@
  public:
   MyCallData() {}
 
-  grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+  grpc_error* Init(grpc_call_element* elem,
                    const grpc_call_element_args* args) override {
     (void)args->path;  // Make sure field is available.
     return GRPC_ERROR_NONE;
diff --git a/test/cpp/end2end/BUILD b/test/cpp/end2end/BUILD
index b29a13d..fa77c30 100644
--- a/test/cpp/end2end/BUILD
+++ b/test/cpp/end2end/BUILD
@@ -14,7 +14,7 @@
 
 licenses(["notice"])  # Apache v2
 
-load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package")
+load("//bazel:grpc_build_system.bzl", "grpc_cc_library", "grpc_cc_test", "grpc_package", "grpc_cc_binary")
 
 grpc_package(name = "test/cpp/end2end", visibility = "public") # Allows external users to implement end2end tests.
 
@@ -66,13 +66,17 @@
         "//test/core/util:grpc_test_util",
         "//test/cpp/util:test_util",
     ],
+    data = [
+        ":client_crash_test_server",
+    ],
     external_deps = [
         "gtest",
     ],
 )
 
-grpc_cc_test(
+grpc_cc_binary(
     name = "client_crash_test_server",
+    testonly = True,
     srcs = ["client_crash_test_server.cc"],
     deps = [
         "//:gpr",
@@ -301,10 +305,14 @@
     external_deps = [
         "gtest",
     ],
+    data = [
+        ":server_crash_test_client",
+    ],
 )
 
-grpc_cc_test(
+grpc_cc_binary(
     name = "server_crash_test_client",
+    testonly = True,
     srcs = ["server_crash_test_client.cc"],
     deps = [
         "//:gpr",
diff --git a/test/cpp/end2end/client_lb_end2end_test.cc b/test/cpp/end2end/client_lb_end2end_test.cc
index f8bb12f..c6e9577 100644
--- a/test/cpp/end2end/client_lb_end2end_test.cc
+++ b/test/cpp/end2end/client_lb_end2end_test.cc
@@ -28,6 +28,7 @@
 #include <grpc++/server_builder.h>
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
+#include <grpc/support/atm.h>
 #include <grpc/support/log.h>
 #include <grpc/support/string_util.h>
 #include <grpc/support/thd.h>
@@ -35,6 +36,7 @@
 
 #include "src/core/ext/filters/client_channel/resolver/fake/fake_resolver.h"
 #include "src/core/ext/filters/client_channel/subchannel_index.h"
+#include "src/core/lib/backoff/backoff.h"
 #include "src/core/lib/support/env.h"
 
 #include "src/proto/grpc/testing/echo.grpc.pb.h"
@@ -48,10 +50,33 @@
 using grpc::testing::EchoResponse;
 using std::chrono::system_clock;
 
+// defined in tcp_client_posix.c
+extern void (*grpc_tcp_client_connect_impl)(
+    grpc_closure* closure, grpc_endpoint** ep,
+    grpc_pollset_set* interested_parties, const grpc_channel_args* channel_args,
+    const grpc_resolved_address* addr, grpc_millis deadline);
+
+const auto original_tcp_connect_fn = grpc_tcp_client_connect_impl;
+
 namespace grpc {
 namespace testing {
 namespace {
 
+gpr_atm g_connection_delay_ms;
+
+void tcp_client_connect_with_delay(grpc_closure* closure, grpc_endpoint** ep,
+                                   grpc_pollset_set* interested_parties,
+                                   const grpc_channel_args* channel_args,
+                                   const grpc_resolved_address* addr,
+                                   grpc_millis deadline) {
+  const int delay_ms = gpr_atm_acq_load(&g_connection_delay_ms);
+  if (delay_ms > 0) {
+    gpr_sleep_until(grpc_timeout_milliseconds_to_deadline(delay_ms));
+  }
+  original_tcp_connect_fn(closure, ep, interested_parties, channel_args, addr,
+                          deadline + delay_ms);
+}
+
 // Subclass of TestServiceImpl that increments a request counter for
 // every call to the Echo RPC.
 class MyTestServiceImpl : public TestServiceImpl {
@@ -112,13 +137,13 @@
   }
 
   void SetNextResolution(const std::vector<int>& ports) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_lb_addresses* addresses =
         grpc_lb_addresses_create(ports.size(), nullptr);
     for (size_t i = 0; i < ports.size(); ++i) {
       char* lb_uri_str;
       gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", ports[i]);
-      grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
+      grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str, true);
       GPR_ASSERT(lb_uri != nullptr);
       grpc_lb_addresses_set_address_from_uri(addresses, i, lb_uri,
                                              false /* is balancer */,
@@ -130,33 +155,32 @@
         grpc_lb_addresses_create_channel_arg(addresses);
     grpc_channel_args* fake_result =
         grpc_channel_args_copy_and_add(nullptr, &fake_addresses, 1);
-    grpc_fake_resolver_response_generator_set_response(
-        &exec_ctx, response_generator_, fake_result);
-    grpc_channel_args_destroy(&exec_ctx, fake_result);
-    grpc_lb_addresses_destroy(&exec_ctx, addresses);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_fake_resolver_response_generator_set_response(response_generator_,
+                                                       fake_result);
+    grpc_channel_args_destroy(fake_result);
+    grpc_lb_addresses_destroy(addresses);
   }
 
-  void ResetStub(const grpc::string& lb_policy_name = "") {
-    ChannelArguments args;
+  std::vector<int> GetServersPorts() {
+    std::vector<int> ports;
+    for (const auto& server : servers_) ports.push_back(server->port_);
+    return ports;
+  }
+
+  void ResetStub(const std::vector<int>& ports,
+                 const grpc::string& lb_policy_name,
+                 ChannelArguments args = ChannelArguments()) {
     if (lb_policy_name.size() > 0) {
       args.SetLoadBalancingPolicyName(lb_policy_name);
     }  // else, default to pick first
     args.SetPointer(GRPC_ARG_FAKE_RESOLVER_RESPONSE_GENERATOR,
                     response_generator_);
-    args.SetInt("grpc.testing.fixed_reconnect_backoff_ms", 2000);
-    std::ostringstream uri;
-    uri << "fake:///";
-    for (size_t i = 0; i < servers_.size() - 1; ++i) {
-      uri << "127.0.0.1:" << servers_[i]->port_ << ",";
-    }
-    uri << "127.0.0.1:" << servers_[servers_.size() - 1]->port_;
     channel_ =
-        CreateCustomChannel(uri.str(), InsecureChannelCredentials(), args);
+        CreateCustomChannel("fake:///", InsecureChannelCredentials(), args);
     stub_ = grpc::testing::EchoTestService::NewStub(channel_);
   }
 
-  Status SendRpc(EchoResponse* response = nullptr) {
+  bool SendRpc(EchoResponse* response = nullptr) {
     const bool local_response = (response == nullptr);
     if (local_response) response = new EchoResponse;
     EchoRequest request;
@@ -164,19 +188,19 @@
     ClientContext context;
     Status status = stub_->Echo(&context, request, response);
     if (local_response) delete response;
-    return status;
+    return status.ok();
   }
 
   void CheckRpcSendOk() {
     EchoResponse response;
-    const Status status = SendRpc(&response);
-    EXPECT_TRUE(status.ok());
+    const bool success = SendRpc(&response);
+    EXPECT_TRUE(success);
     EXPECT_EQ(response.message(), kRequestMessage_);
   }
 
   void CheckRpcSendFailure() {
-    const Status status = SendRpc();
-    EXPECT_FALSE(status.ok());
+    const bool success = SendRpc();
+    EXPECT_FALSE(success);
   }
 
   struct ServerData {
@@ -267,7 +291,7 @@
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub();  // implicit pick first
+  ResetStub(GetServersPorts(), "");  // test that pick first is the default.
   std::vector<int> ports;
   for (size_t i = 0; i < servers_.size(); ++i) {
     ports.emplace_back(servers_[i]->port_);
@@ -291,11 +315,63 @@
   EXPECT_EQ("pick_first", channel_->GetLoadBalancingPolicyName());
 }
 
+TEST_F(ClientLbEnd2endTest, PickFirstBackOffInitialReconnect) {
+  ChannelArguments args;
+  constexpr int kInitialBackOffMs = 100;
+  args.SetInt(GRPC_ARG_INITIAL_RECONNECT_BACKOFF_MS, kInitialBackOffMs);
+  const std::vector<int> ports = {grpc_pick_unused_port_or_die()};
+  const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
+  ResetStub(ports, "pick_first", args);
+  SetNextResolution(ports);
+  // The channel won't become connected (there's no server).
+  ASSERT_FALSE(channel_->WaitForConnected(
+      grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
+  // Bring up a server on the chosen port.
+  StartServers(1, ports);
+  // Now it will.
+  ASSERT_TRUE(channel_->WaitForConnected(
+      grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 2)));
+  const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
+  const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
+  gpr_log(GPR_DEBUG, "Waited %ld milliseconds", waited_ms);
+  // We should have waited at least kInitialBackOffMs. We substract one to
+  // account for test and precision accuracy drift.
+  EXPECT_GE(waited_ms, kInitialBackOffMs - 1);
+  // But not much more.
+  EXPECT_GT(
+      gpr_time_cmp(
+          grpc_timeout_milliseconds_to_deadline(kInitialBackOffMs * 1.10), t1),
+      0);
+}
+
+TEST_F(ClientLbEnd2endTest, PickFirstBackOffMinReconnect) {
+  ChannelArguments args;
+  constexpr int kMinReconnectBackOffMs = 1000;
+  args.SetInt(GRPC_ARG_MIN_RECONNECT_BACKOFF_MS, kMinReconnectBackOffMs);
+  const std::vector<int> ports = {grpc_pick_unused_port_or_die()};
+  ResetStub(ports, "pick_first", args);
+  SetNextResolution(ports);
+  // Make connection delay a 10% longer than it's willing to in order to make
+  // sure we are hitting the codepath that waits for the min reconnect backoff.
+  gpr_atm_rel_store(&g_connection_delay_ms, kMinReconnectBackOffMs * 1.10);
+  grpc_tcp_client_connect_impl = tcp_client_connect_with_delay;
+  const gpr_timespec t0 = gpr_now(GPR_CLOCK_MONOTONIC);
+  channel_->WaitForConnected(
+      grpc_timeout_milliseconds_to_deadline(kMinReconnectBackOffMs * 2));
+  const gpr_timespec t1 = gpr_now(GPR_CLOCK_MONOTONIC);
+  const grpc_millis waited_ms = gpr_time_to_millis(gpr_time_sub(t1, t0));
+  gpr_log(GPR_DEBUG, "Waited %ld ms", waited_ms);
+  // We should have waited at least kMinReconnectBackOffMs. We substract one to
+  // account for test and precision accuracy drift.
+  EXPECT_GE(waited_ms, kMinReconnectBackOffMs - 1);
+  gpr_atm_rel_store(&g_connection_delay_ms, 0);
+}
+
 TEST_F(ClientLbEnd2endTest, PickFirstUpdates) {
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub();  // implicit pick first
+  ResetStub(GetServersPorts(), "pick_first");
   std::vector<int> ports;
 
   // Perform one RPC against the first server.
@@ -341,7 +417,7 @@
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub();  // implicit pick first
+  ResetStub(GetServersPorts(), "pick_first");
   std::vector<int> ports;
 
   // Perform one RPC against the first server.
@@ -371,7 +447,7 @@
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub();  // implicit pick first
+  ResetStub(GetServersPorts(), "pick_first");
   std::vector<int> ports;
   for (size_t i = 0; i < servers_.size(); ++i) {
     ports.emplace_back(servers_[i]->port_);
@@ -393,7 +469,7 @@
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub("round_robin");
+  ResetStub(GetServersPorts(), "round_robin");
   std::vector<int> ports;
   for (const auto& server : servers_) {
     ports.emplace_back(server->port_);
@@ -424,7 +500,7 @@
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub("round_robin");
+  ResetStub(GetServersPorts(), "round_robin");
   std::vector<int> ports;
 
   // Start with a single server.
@@ -507,7 +583,7 @@
 TEST_F(ClientLbEnd2endTest, RoundRobinUpdateInError) {
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub("round_robin");
+  ResetStub(GetServersPorts(), "round_robin");
   std::vector<int> ports;
 
   // Start with a single server.
@@ -539,7 +615,7 @@
   // Start servers and send one RPC per server.
   const int kNumServers = 3;
   StartServers(kNumServers);
-  ResetStub("round_robin");
+  ResetStub(GetServersPorts(), "round_robin");
   std::vector<int> ports;
   for (size_t i = 0; i < servers_.size(); ++i) {
     ports.emplace_back(servers_[i]->port_);
@@ -566,22 +642,35 @@
     ports.push_back(grpc_pick_unused_port_or_die());
   }
   StartServers(kNumServers, ports);
-  ResetStub("round_robin");
+  ResetStub(GetServersPorts(), "round_robin");
   SetNextResolution(ports);
   // Send a number of RPCs, which succeed.
   for (size_t i = 0; i < 100; ++i) {
     CheckRpcSendOk();
   }
   // Kill all servers
+  gpr_log(GPR_INFO, "****** ABOUT TO KILL SERVERS *******");
   for (size_t i = 0; i < servers_.size(); ++i) {
     servers_[i]->Shutdown(false);
   }
-  // Client request should fail.
-  CheckRpcSendFailure();
+  gpr_log(GPR_INFO, "****** SERVERS KILLED *******");
+  gpr_log(GPR_INFO, "****** SENDING DOOMED REQUESTS *******");
+  // Client requests should fail. Send enough to tickle all subchannels.
+  for (size_t i = 0; i < servers_.size(); ++i) CheckRpcSendFailure();
+  gpr_log(GPR_INFO, "****** DOOMED REQUESTS SENT *******");
   // Bring servers back up on the same port (we aren't recreating the channel).
+  gpr_log(GPR_INFO, "****** RESTARTING SERVERS *******");
   StartServers(kNumServers, ports);
-  // Client request should succeed.
-  CheckRpcSendOk();
+  gpr_log(GPR_INFO, "****** SERVERS RESTARTED *******");
+  gpr_log(GPR_INFO, "****** SENDING REQUEST TO SUCCEED *******");
+  // Client request should eventually (but still fairly soon) succeed.
+  const gpr_timespec deadline = grpc_timeout_seconds_to_deadline(5);
+  gpr_timespec now = gpr_now(GPR_CLOCK_MONOTONIC);
+  while (gpr_time_cmp(deadline, now) > 0) {
+    if (SendRpc()) break;
+    now = gpr_now(GPR_CLOCK_MONOTONIC);
+  }
+  GPR_ASSERT(gpr_time_cmp(deadline, now) > 0);
 }
 
 }  // namespace
diff --git a/test/cpp/end2end/end2end_test.cc b/test/cpp/end2end/end2end_test.cc
index c71034b..4c8dfe0 100644
--- a/test/cpp/end2end/end2end_test.cc
+++ b/test/cpp/end2end/end2end_test.cc
@@ -741,6 +741,7 @@
   Status s = stream->Finish();
   EXPECT_EQ(response.message(), request.message());
   EXPECT_TRUE(s.ok());
+  EXPECT_TRUE(context.debug_error_string().empty());
 }
 
 TEST_P(End2endTest, RequestStreamOneRequestWithCoalescingApi) {
@@ -1258,6 +1259,13 @@
     EXPECT_EQ(iter->code(), s.error_code());
     EXPECT_EQ(iter->error_message(), s.error_message());
     EXPECT_EQ(iter->binary_error_details(), s.error_details());
+    EXPECT_TRUE(context.debug_error_string().find("created") !=
+                std::string::npos);
+    EXPECT_TRUE(context.debug_error_string().find("file") != std::string::npos);
+    EXPECT_TRUE(context.debug_error_string().find("line") != std::string::npos);
+    EXPECT_TRUE(context.debug_error_string().find("status") !=
+                std::string::npos);
+    EXPECT_TRUE(context.debug_error_string().find("13") != std::string::npos);
   }
 }
 
@@ -1300,12 +1308,19 @@
   EchoResponse response;
   request.set_message("Hello");
   request.mutable_param()->set_skip_cancelled_check(true);
-  // Let server sleep for 2 ms first to guarantee expiry
-  request.mutable_param()->set_server_sleep_us(2 * 1000);
+  // Let server sleep for 40 ms first to guarantee expiry.
+  // 40 ms might seem a bit extreme but the timer manager would have been just
+  // initialized (when ResetStub() was called) and there are some warmup costs
+  // i.e the timer thread many not have even started. There might also be other
+  // delays in the timer manager thread (in acquiring locks, timer data
+  // structure manipulations, starting backup timer threads) that add to the
+  // delays. 40ms is still not enough in some cases but this significantly
+  // reduces the test flakes
+  request.mutable_param()->set_server_sleep_us(40 * 1000);
 
   ClientContext context;
   std::chrono::system_clock::time_point deadline =
-      std::chrono::system_clock::now() + std::chrono::microseconds(10);
+      std::chrono::system_clock::now() + std::chrono::milliseconds(1);
   context.set_deadline(deadline);
   Status s = stub_->Echo(&context, request, &response);
   EXPECT_EQ(StatusCode::DEADLINE_EXCEEDED, s.error_code());
diff --git a/test/cpp/end2end/filter_end2end_test.cc b/test/cpp/end2end/filter_end2end_test.cc
index f260ea0..c443037 100644
--- a/test/cpp/end2end/filter_end2end_test.cc
+++ b/test/cpp/end2end/filter_end2end_test.cc
@@ -100,7 +100,7 @@
 
 class ChannelDataImpl : public ChannelData {
  public:
-  grpc_error* Init(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+  grpc_error* Init(grpc_channel_element* elem,
                    grpc_channel_element_args* args) {
     IncrementConnectionCounter();
     return GRPC_ERROR_NONE;
@@ -109,13 +109,12 @@
 
 class CallDataImpl : public CallData {
  public:
-  void StartTransportStreamOpBatch(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem,
+  void StartTransportStreamOpBatch(grpc_call_element* elem,
                                    TransportStreamOpBatch* op) override {
     // Incrementing the counter could be done from Init(), but we want
     // to test that the individual methods are actually called correctly.
     if (op->recv_initial_metadata() != nullptr) IncrementCallCounter();
-    grpc_call_next_op(exec_ctx, elem, op->op());
+    grpc_call_next_op(elem, op->op());
   }
 };
 
diff --git a/test/cpp/end2end/grpclb_end2end_test.cc b/test/cpp/end2end/grpclb_end2end_test.cc
index c15ab88..d4ee6b4 100644
--- a/test/cpp/end2end/grpclb_end2end_test.cc
+++ b/test/cpp/end2end/grpclb_end2end_test.cc
@@ -353,11 +353,6 @@
           "balancer", server_host_, balancers_.back().get()));
     }
     ResetStub();
-    std::vector<AddressData> addresses;
-    for (size_t i = 0; i < balancer_servers_.size(); ++i) {
-      addresses.emplace_back(AddressData{balancer_servers_[i].port_, true, ""});
-    }
-    SetNextResolution(addresses);
   }
 
   void TearDown() override {
@@ -370,6 +365,14 @@
     grpc_fake_resolver_response_generator_unref(response_generator_);
   }
 
+  void SetNextResolutionAllBalancers() {
+    std::vector<AddressData> addresses;
+    for (size_t i = 0; i < balancer_servers_.size(); ++i) {
+      addresses.emplace_back(AddressData{balancer_servers_[i].port_, true, ""});
+    }
+    SetNextResolution(addresses);
+  }
+
   void ResetStub(int fallback_timeout = 0) {
     ChannelArguments args;
     args.SetGrpclbFallbackTimeout(fallback_timeout);
@@ -451,13 +454,13 @@
   };
 
   void SetNextResolution(const std::vector<AddressData>& address_data) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     grpc_lb_addresses* addresses =
         grpc_lb_addresses_create(address_data.size(), nullptr);
     for (size_t i = 0; i < address_data.size(); ++i) {
       char* lb_uri_str;
       gpr_asprintf(&lb_uri_str, "ipv4:127.0.0.1:%d", address_data[i].port);
-      grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
+      grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str, true);
       GPR_ASSERT(lb_uri != nullptr);
       grpc_lb_addresses_set_address_from_uri(
           addresses, i, lb_uri, address_data[i].is_balancer,
@@ -467,10 +470,9 @@
     }
     grpc_arg fake_addresses = grpc_lb_addresses_create_channel_arg(addresses);
     grpc_channel_args fake_result = {1, &fake_addresses};
-    grpc_fake_resolver_response_generator_set_response(
-        &exec_ctx, response_generator_, &fake_result);
-    grpc_lb_addresses_destroy(&exec_ctx, addresses);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_fake_resolver_response_generator_set_response(response_generator_,
+                                                       &fake_result);
+    grpc_lb_addresses_destroy(addresses);
   }
 
   const std::vector<int> GetBackendPorts(const size_t start_index = 0) const {
@@ -581,6 +583,7 @@
 };
 
 TEST_F(SingleBalancerTest, Vanilla) {
+  SetNextResolutionAllBalancers();
   const size_t kNumRpcsPerAddress = 100;
   ScheduleResponseForBalancer(
       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -608,6 +611,7 @@
 }
 
 TEST_F(SingleBalancerTest, InitiallyEmptyServerlist) {
+  SetNextResolutionAllBalancers();
   const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
   const int kCallDeadlineMs = 1000 * grpc_test_slowdown_factor();
 
@@ -645,6 +649,7 @@
 }
 
 TEST_F(SingleBalancerTest, Fallback) {
+  SetNextResolutionAllBalancers();
   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
   const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
   const size_t kNumBackendInResolution = backends_.size() / 2;
@@ -711,6 +716,7 @@
 }
 
 TEST_F(SingleBalancerTest, FallbackUpdate) {
+  SetNextResolutionAllBalancers();
   const int kFallbackTimeoutMs = 200 * grpc_test_slowdown_factor();
   const int kServerlistDelayMs = 500 * grpc_test_slowdown_factor();
   const size_t kNumBackendInResolution = backends_.size() / 3;
@@ -818,6 +824,7 @@
 }
 
 TEST_F(SingleBalancerTest, BackendsRestart) {
+  SetNextResolutionAllBalancers();
   const size_t kNumRpcsPerAddress = 100;
   ScheduleResponseForBalancer(
       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -857,6 +864,7 @@
 };
 
 TEST_F(UpdatesTest, UpdateBalancers) {
+  SetNextResolutionAllBalancers();
   const std::vector<int> first_backend{GetBackendPorts()[0]};
   const std::vector<int> second_backend{GetBackendPorts()[1]};
   ScheduleResponseForBalancer(
@@ -919,6 +927,7 @@
 // verify that the LB channel inside grpclb keeps the initial connection (which
 // by definition is also present in the update).
 TEST_F(UpdatesTest, UpdateBalancersRepeated) {
+  SetNextResolutionAllBalancers();
   const std::vector<int> first_backend{GetBackendPorts()[0]};
   const std::vector<int> second_backend{GetBackendPorts()[0]};
 
@@ -989,6 +998,9 @@
 }
 
 TEST_F(UpdatesTest, UpdateBalancersDeadUpdate) {
+  std::vector<AddressData> addresses;
+  addresses.emplace_back(AddressData{balancer_servers_[0].port_, true, ""});
+  SetNextResolution(addresses);
   const std::vector<int> first_backend{GetBackendPorts()[0]};
   const std::vector<int> second_backend{GetBackendPorts()[1]};
 
@@ -1030,7 +1042,7 @@
   EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
   EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
 
-  std::vector<AddressData> addresses;
+  addresses.clear();
   addresses.emplace_back(AddressData{balancer_servers_[1].port_, true, ""});
   gpr_log(GPR_INFO, "========= ABOUT TO UPDATE 1 ==========");
   SetNextResolution(addresses);
@@ -1055,8 +1067,14 @@
   balancers_[2]->NotifyDoneWithServerlists();
   EXPECT_EQ(1U, balancer_servers_[0].service_->request_count());
   EXPECT_EQ(1U, balancer_servers_[0].service_->response_count());
-  EXPECT_EQ(1U, balancer_servers_[1].service_->request_count());
-  EXPECT_EQ(1U, balancer_servers_[1].service_->response_count());
+  // The second balancer, published as part of the first update, may end up
+  // getting two requests (that is, 1 <= #req <= 2) if the LB call retry timer
+  // firing races with the arrival of the update containing the second
+  // balancer.
+  EXPECT_GE(balancer_servers_[1].service_->request_count(), 1U);
+  EXPECT_GE(balancer_servers_[1].service_->response_count(), 1U);
+  EXPECT_LE(balancer_servers_[1].service_->request_count(), 2U);
+  EXPECT_LE(balancer_servers_[1].service_->response_count(), 2U);
   EXPECT_EQ(0U, balancer_servers_[2].service_->request_count());
   EXPECT_EQ(0U, balancer_servers_[2].service_->response_count());
   // Check LB policy name for the channel.
@@ -1064,6 +1082,7 @@
 }
 
 TEST_F(SingleBalancerTest, Drop) {
+  SetNextResolutionAllBalancers();
   const size_t kNumRpcsPerAddress = 100;
   const int num_of_drop_by_rate_limiting_addresses = 1;
   const int num_of_drop_by_load_balancing_addresses = 2;
@@ -1107,6 +1126,7 @@
 }
 
 TEST_F(SingleBalancerTest, DropAllFirst) {
+  SetNextResolutionAllBalancers();
   // All registered addresses are marked as "drop".
   const int num_of_drop_by_rate_limiting_addresses = 1;
   const int num_of_drop_by_load_balancing_addresses = 1;
@@ -1122,6 +1142,7 @@
 }
 
 TEST_F(SingleBalancerTest, DropAll) {
+  SetNextResolutionAllBalancers();
   ScheduleResponseForBalancer(
       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
       0);
@@ -1152,6 +1173,7 @@
 };
 
 TEST_F(SingleBalancerWithClientLoadReportingTest, Vanilla) {
+  SetNextResolutionAllBalancers();
   const size_t kNumRpcsPerAddress = 100;
   ScheduleResponseForBalancer(
       0, BalancerServiceImpl::BuildResponseForBackends(GetBackendPorts(), {}),
@@ -1186,6 +1208,7 @@
 }
 
 TEST_F(SingleBalancerWithClientLoadReportingTest, Drop) {
+  SetNextResolutionAllBalancers();
   const size_t kNumRpcsPerAddress = 3;
   const int num_of_drop_by_rate_limiting_addresses = 2;
   const int num_of_drop_by_load_balancing_addresses = 1;
diff --git a/test/cpp/end2end/mock_test.cc b/test/cpp/end2end/mock_test.cc
index 61f4111..175ecea 100644
--- a/test/cpp/end2end/mock_test.cc
+++ b/test/cpp/end2end/mock_test.cc
@@ -43,7 +43,6 @@
 
 #include <iostream>
 
-using namespace std;
 using ::testing::AtLeast;
 using ::testing::DoAll;
 using ::testing::Invoke;
@@ -57,6 +56,7 @@
 using grpc::testing::EchoTestService;
 using grpc::testing::MockClientReaderWriter;
 using std::chrono::system_clock;
+using std::vector;
 
 namespace grpc {
 namespace testing {
diff --git a/test/cpp/grpclb/grpclb_api_test.cc b/test/cpp/grpclb/grpclb_api_test.cc
index 7b62080..1f2ef0c 100644
--- a/test/cpp/grpclb/grpclb_api_test.cc
+++ b/test/cpp/grpclb/grpclb_api_test.cc
@@ -17,6 +17,7 @@
  */
 
 #include <grpc++/impl/codegen/config.h>
+#include <grpc/grpc.h>
 #include <gtest/gtest.h>
 
 #include "src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.h"
@@ -98,9 +99,6 @@
   server->set_port(54321);
   server->set_load_balance_token("load_balancing");
   server->set_drop(true);
-  auto* expiration_interval = serverlist->mutable_expiration_interval();
-  expiration_interval->set_seconds(888);
-  expiration_interval->set_nanos(999);
 
   const grpc::string encoded_response = response.SerializeAsString();
   const grpc_slice encoded_slice = grpc_slice_from_copied_buffer(
@@ -121,11 +119,6 @@
   EXPECT_STREQ(c_serverlist->servers[1]->load_balance_token, "load_balancing");
   EXPECT_TRUE(c_serverlist->servers[1]->drop);
 
-  EXPECT_TRUE(c_serverlist->expiration_interval.has_seconds);
-  EXPECT_EQ(c_serverlist->expiration_interval.seconds, 888);
-  EXPECT_TRUE(c_serverlist->expiration_interval.has_nanos);
-  EXPECT_EQ(c_serverlist->expiration_interval.nanos, 999);
-
   grpc_slice_unref(encoded_slice);
   grpc_grpclb_destroy_serverlist(c_serverlist);
 }
@@ -135,5 +128,8 @@
 
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
+  grpc_init();
+  int ret = RUN_ALL_TESTS();
+  grpc_shutdown();
+  return ret;
 }
diff --git a/test/cpp/grpclb/grpclb_test.cc b/test/cpp/grpclb/grpclb_test.cc
index ca846c7..38b65fb 100644
--- a/test/cpp/grpclb/grpclb_test.cc
+++ b/test/cpp/grpclb/grpclb_test.cc
@@ -113,10 +113,9 @@
 
 static void* tag(intptr_t t) { return (void*)t; }
 
-static grpc_slice build_response_payload_slice(
-    const char* host, int* ports, size_t nports,
-    int64_t expiration_interval_secs, int32_t expiration_interval_nanos,
-    const char* token_prefix) {
+static grpc_slice build_response_payload_slice(const char* host, int* ports,
+                                               size_t nports,
+                                               const char* token_prefix) {
   // server_list {
   //   servers {
   //     ip_address: <in_addr/6 bytes of an IP>
@@ -128,15 +127,6 @@
   grpc::lb::v1::LoadBalanceResponse response;
   auto* serverlist = response.mutable_server_list();
 
-  if (expiration_interval_secs > 0 || expiration_interval_nanos > 0) {
-    auto* expiration_interval = serverlist->mutable_expiration_interval();
-    if (expiration_interval_secs > 0) {
-      expiration_interval->set_seconds(expiration_interval_secs);
-    }
-    if (expiration_interval_nanos > 0) {
-      expiration_interval->set_nanos(expiration_interval_nanos);
-    }
-  }
   for (size_t i = 0; i < nports; i++) {
     auto* server = serverlist->add_servers();
     // TODO(dgq): test ipv6
@@ -248,13 +238,13 @@
     if (i == 0) {
       // First half of the ports.
       response_payload_slice = build_response_payload_slice(
-          "127.0.0.1", ports, nports / 2, -1, -1, sf->lb_token_prefix);
+          "127.0.0.1", ports, nports / 2, sf->lb_token_prefix);
     } else {
       // Second half of the ports.
       sleep_ms(update_delay_ms);
       response_payload_slice = build_response_payload_slice(
-          "127.0.0.1", ports + (nports / 2), (nports + 1) / 2 /* ceil */, -1,
-          -1, "" /* this half doesn't get to receive an LB token */);
+          "127.0.0.1", ports + (nports / 2), (nports + 1) / 2 /* ceil */,
+          "" /* this half doesn't get to receive an LB token */);
     }
 
     response_payload = grpc_raw_byte_buffer_create(&response_payload_slice, 1);
@@ -562,7 +552,7 @@
 #define BALANCERS_NAME "lb.name"
 static void setup_client(const server_fixture* lb_server,
                          const server_fixture* backends, client_fixture* cf) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
 
   char* expected_target_names = nullptr;
   const char* backends_name = lb_server->servers_hostport;
@@ -574,7 +564,7 @@
   grpc_lb_addresses* addresses = grpc_lb_addresses_create(1, nullptr);
   char* lb_uri_str;
   gpr_asprintf(&lb_uri_str, "ipv4:%s", lb_server->servers_hostport);
-  grpc_uri* lb_uri = grpc_uri_parse(&exec_ctx, lb_uri_str, true);
+  grpc_uri* lb_uri = grpc_uri_parse(lb_uri_str, true);
   GPR_ASSERT(lb_uri != nullptr);
   grpc_lb_addresses_set_address_from_uri(addresses, 0, lb_uri, true,
                                          lb_server->balancer_name, nullptr);
@@ -586,7 +576,7 @@
       grpc_lb_addresses_create_channel_arg(addresses);
   grpc_channel_args* fake_result =
       grpc_channel_args_copy_and_add(nullptr, &fake_addresses, 1);
-  grpc_lb_addresses_destroy(&exec_ctx, addresses);
+  grpc_lb_addresses_destroy(addresses);
 
   const grpc_arg new_args[] = {
       grpc_fake_transport_expected_targets_arg(expected_target_names),
@@ -601,13 +591,12 @@
       grpc_fake_transport_security_credentials_create();
   cf->client =
       grpc_secure_channel_create(fake_creds, cf->server_uri, args, nullptr);
-  grpc_fake_resolver_response_generator_set_response(
-      &exec_ctx, response_generator, fake_result);
-  grpc_channel_args_destroy(&exec_ctx, fake_result);
-  grpc_channel_credentials_unref(&exec_ctx, fake_creds);
-  grpc_channel_args_destroy(&exec_ctx, args);
+  grpc_fake_resolver_response_generator_set_response(response_generator,
+                                                     fake_result);
+  grpc_channel_args_destroy(fake_result);
+  grpc_channel_credentials_unref(fake_creds);
+  grpc_channel_args_destroy(args);
   grpc_fake_resolver_response_generator_unref(response_generator);
-  grpc_exec_ctx_finish(&exec_ctx);
 }
 
 static void teardown_client(client_fixture* cf) {
@@ -703,14 +692,15 @@
       tf.lb_backends[i].lb_token_prefix = "";
     }
     setup_server("127.0.0.1", &tf.lb_backends[i]);
-    gpr_thd_new(&tf.lb_backends[i].tid, fork_backend_server, &tf.lb_backends[i],
-                &options);
+    gpr_thd_new(&tf.lb_backends[i].tid, "grpclb_backend", fork_backend_server,
+                &tf.lb_backends[i], &options);
   }
 
   tf.lb_server.lb_token_prefix = LB_TOKEN_PREFIX;
   tf.lb_server.balancer_name = BALANCERS_NAME;
   setup_server("127.0.0.1", &tf.lb_server);
-  gpr_thd_new(&tf.lb_server.tid, fork_lb_server, &tf.lb_server, &options);
+  gpr_thd_new(&tf.lb_server.tid, "grpclb_server", fork_lb_server, &tf.lb_server,
+              &options);
   setup_client(&tf.lb_server, tf.lb_backends, &tf.client);
   return tf;
 }
diff --git a/test/cpp/interop/interop_client.cc b/test/cpp/interop/interop_client.cc
index af97fe0..d2192f5 100644
--- a/test/cpp/interop/interop_client.cc
+++ b/test/cpp/interop/interop_client.cc
@@ -122,7 +122,8 @@
     : serviceStub_(channel, new_stub_every_test_case),
       do_not_abort_on_transient_failures_(do_not_abort_on_transient_failures) {}
 
-bool InteropClient::AssertStatusOk(const Status& s) {
+bool InteropClient::AssertStatusOk(const Status& s,
+                                   const grpc::string& optional_debug_string) {
   if (s.ok()) {
     return true;
   }
@@ -131,17 +132,21 @@
   // already checked for s.ok() above). So, the following will call abort()
   // (unless s.error_code() corresponds to a transient failure and
   // 'do_not_abort_on_transient_failures' is true)
-  return AssertStatusCode(s, StatusCode::OK);
+  return AssertStatusCode(s, StatusCode::OK, optional_debug_string);
 }
 
-bool InteropClient::AssertStatusCode(const Status& s,
-                                     StatusCode expected_code) {
+bool InteropClient::AssertStatusCode(
+    const Status& s, StatusCode expected_code,
+    const grpc::string& optional_debug_string) {
   if (s.error_code() == expected_code) {
     return true;
   }
 
-  gpr_log(GPR_ERROR, "Error status code: %d (expected: %d), message: %s",
-          s.error_code(), expected_code, s.error_message().c_str());
+  gpr_log(GPR_ERROR,
+          "Error status code: %d (expected: %d), message: %s,"
+          " debug string: %s",
+          s.error_code(), expected_code, s.error_message().c_str(),
+          optional_debug_string.c_str());
 
   // In case of transient transient/retryable failures (like a broken
   // connection) we may or may not abort (see TransientFailureOrAbort())
@@ -161,7 +166,7 @@
 
   Status s = serviceStub_.Get()->EmptyCall(&context, request, &response);
 
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -191,7 +196,7 @@
   }
 
   Status s = serviceStub_.Get()->UnaryCall(&context, *request, response);
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -241,7 +246,7 @@
 
   Status s = serviceStub_.Get()->UnaryCall(&context, request, &response);
 
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -269,7 +274,7 @@
 
   Status s = serviceStub_.Get()->UnaryCall(&context, request, &response);
 
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -412,7 +417,7 @@
   GPR_ASSERT(stream->WritesDone());
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -451,7 +456,7 @@
   }
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -516,7 +521,7 @@
   GPR_ASSERT(stream->WritesDone());
 
   s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -578,7 +583,7 @@
   }
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
   return true;
@@ -619,7 +624,7 @@
   }
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -666,7 +671,7 @@
   }
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -710,7 +715,7 @@
   GPR_ASSERT(!stream->Read(&response));
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -732,7 +737,8 @@
   context.TryCancel();
   Status s = stream->Finish();
 
-  if (!AssertStatusCode(s, StatusCode::CANCELLED)) {
+  if (!AssertStatusCode(s, StatusCode::CANCELLED,
+                        context.debug_error_string())) {
     return false;
   }
 
@@ -790,7 +796,8 @@
   stream->Write(request);
 
   Status s = stream->Finish();
-  if (!AssertStatusCode(s, StatusCode::DEADLINE_EXCEEDED)) {
+  if (!AssertStatusCode(s, StatusCode::DEADLINE_EXCEEDED,
+                        context.debug_error_string())) {
     return false;
   }
 
@@ -810,7 +817,7 @@
   GPR_ASSERT(stream->Read(&response) == false);
 
   Status s = stream->Finish();
-  if (!AssertStatusOk(s)) {
+  if (!AssertStatusOk(s, context.debug_error_string())) {
     return false;
   }
 
@@ -833,7 +840,8 @@
   requested_status->set_code(test_code);
   requested_status->set_message(test_msg);
   Status s = serviceStub_.Get()->UnaryCall(&context, request, &response);
-  if (!AssertStatusCode(s, grpc::StatusCode::UNKNOWN)) {
+  if (!AssertStatusCode(s, grpc::StatusCode::UNKNOWN,
+                        context.debug_error_string())) {
     return false;
   }
   GPR_ASSERT(s.error_message() == test_msg);
@@ -853,7 +861,8 @@
   while (stream->Read(&streaming_response))
     ;
   s = stream->Finish();
-  if (!AssertStatusCode(s, grpc::StatusCode::UNKNOWN)) {
+  if (!AssertStatusCode(s, grpc::StatusCode::UNKNOWN,
+                        context.debug_error_string())) {
     return false;
   }
   GPR_ASSERT(s.error_message() == test_msg);
@@ -880,7 +889,7 @@
   context1.AddMetadata("x-user-ip", "1.2.3.4");
   Status s1 =
       serviceStub_.Get()->CacheableUnaryCall(&context1, request, &response1);
-  if (!AssertStatusOk(s1)) {
+  if (!AssertStatusOk(s1, context1.debug_error_string())) {
     return false;
   }
   gpr_log(GPR_DEBUG, "response 1 payload: %s",
@@ -893,7 +902,7 @@
   context2.AddMetadata("x-user-ip", "1.2.3.4");
   Status s2 =
       serviceStub_.Get()->CacheableUnaryCall(&context2, request, &response2);
-  if (!AssertStatusOk(s2)) {
+  if (!AssertStatusOk(s2, context2.debug_error_string())) {
     return false;
   }
   gpr_log(GPR_DEBUG, "response 2 payload: %s",
@@ -915,7 +924,7 @@
   context3.AddMetadata("x-user-ip", "1.2.3.4");
   Status s3 =
       serviceStub_.Get()->CacheableUnaryCall(&context3, request1, &response3);
-  if (!AssertStatusOk(s3)) {
+  if (!AssertStatusOk(s3, context3.debug_error_string())) {
     return false;
   }
   gpr_log(GPR_DEBUG, "response 3 payload: %s",
@@ -946,7 +955,7 @@
     request.mutable_payload()->set_body(payload.c_str(), kLargeRequestSize);
 
     Status s = serviceStub_.Get()->UnaryCall(&context, request, &response);
-    if (!AssertStatusOk(s)) {
+    if (!AssertStatusOk(s, context.debug_error_string())) {
       return false;
     }
 
@@ -997,7 +1006,7 @@
     GPR_ASSERT(!stream->Read(&response));
 
     Status s = stream->Finish();
-    if (!AssertStatusOk(s)) {
+    if (!AssertStatusOk(s, context.debug_error_string())) {
       return false;
     }
 
@@ -1028,7 +1037,8 @@
 
   Status s = stub->UnimplementedCall(&context, request, &response);
 
-  if (!AssertStatusCode(s, StatusCode::UNIMPLEMENTED)) {
+  if (!AssertStatusCode(s, StatusCode::UNIMPLEMENTED,
+                        context.debug_error_string())) {
     return false;
   }
 
@@ -1046,7 +1056,8 @@
   Status s =
       serviceStub_.Get()->UnimplementedCall(&context, request, &response);
 
-  if (!AssertStatusCode(s, StatusCode::UNIMPLEMENTED)) {
+  if (!AssertStatusCode(s, StatusCode::UNIMPLEMENTED,
+                        context.debug_error_string())) {
     return false;
   }
 
diff --git a/test/cpp/interop/interop_client.h b/test/cpp/interop/interop_client.h
index 57e8ba6..b8bb134 100644
--- a/test/cpp/interop/interop_client.h
+++ b/test/cpp/interop/interop_client.h
@@ -103,8 +103,10 @@
   /// Run \a custom_check_fn as an additional check.
   bool PerformLargeUnary(SimpleRequest* request, SimpleResponse* response,
                          CheckerFn custom_checks_fn);
-  bool AssertStatusOk(const Status& s);
-  bool AssertStatusCode(const Status& s, StatusCode expected_code);
+  bool AssertStatusOk(const Status& s,
+                      const grpc::string& optional_debug_string);
+  bool AssertStatusCode(const Status& s, StatusCode expected_code,
+                        const grpc::string& optional_debug_string);
   bool TransientFailureOrAbort();
   ServiceStub serviceStub_;
 
diff --git a/test/cpp/interop/interop_server.cc b/test/cpp/interop/interop_server.cc
index a24cdc7..30bd8bf 100644
--- a/test/cpp/interop/interop_server.cc
+++ b/test/cpp/interop/interop_server.cc
@@ -317,9 +317,15 @@
 
 void grpc::testing::interop::RunServer(
     std::shared_ptr<ServerCredentials> creds) {
-  GPR_ASSERT(FLAGS_port != 0);
+  RunServer(creds, FLAGS_port, nullptr);
+}
+
+void grpc::testing::interop::RunServer(
+    std::shared_ptr<ServerCredentials> creds, const int port,
+    ServerStartedCondition* server_started_condition) {
+  GPR_ASSERT(port != 0);
   std::ostringstream server_address;
-  server_address << "0.0.0.0:" << FLAGS_port;
+  server_address << "0.0.0.0:" << port;
   TestServiceImpl service;
 
   SimpleRequest request;
@@ -333,6 +339,14 @@
   }
   std::unique_ptr<Server> server(builder.BuildAndStart());
   gpr_log(GPR_INFO, "Server listening on %s", server_address.str().c_str());
+
+  // Signal that the server has started.
+  if (server_started_condition) {
+    std::unique_lock<std::mutex> lock(server_started_condition->mutex);
+    server_started_condition->server_started = true;
+    server_started_condition->condition.notify_all();
+  }
+
   while (!gpr_atm_no_barrier_load(&g_got_sigint)) {
     gpr_sleep_until(gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
                                  gpr_time_from_seconds(5, GPR_TIMESPAN)));
diff --git a/test/cpp/interop/server_helper.h b/test/cpp/interop/server_helper.h
index 6af003f..1bf7db1 100644
--- a/test/cpp/interop/server_helper.h
+++ b/test/cpp/interop/server_helper.h
@@ -19,6 +19,7 @@
 #ifndef GRPC_TEST_CPP_INTEROP_SERVER_HELPER_H
 #define GRPC_TEST_CPP_INTEROP_SERVER_HELPER_H
 
+#include <condition_variable>
 #include <memory>
 
 #include <grpc/compression.h>
@@ -50,8 +51,27 @@
 namespace interop {
 
 extern gpr_atm g_got_sigint;
+
+struct ServerStartedCondition {
+  std::mutex mutex;
+  std::condition_variable condition;
+  bool server_started = false;
+};
+
+/// Run gRPC interop server using port FLAGS_port.
+///
+/// \param creds The credentials associated with the server.
 void RunServer(std::shared_ptr<ServerCredentials> creds);
 
+/// Run gRPC interop server.
+///
+/// \param creds The credentials associated with the server.
+/// \param port Port to use for the server.
+/// \param server_started_condition (optional) Struct holding mutex, condition
+///     variable, and condition used to notify when the server has started.
+void RunServer(std::shared_ptr<ServerCredentials> creds, int port,
+               ServerStartedCondition* server_started_condition);
+
 }  // namespace interop
 }  // namespace testing
 }  // namespace grpc
diff --git a/test/cpp/interop/stress_test.cc b/test/cpp/interop/stress_test.cc
index 028ff11..4b39dc3 100644
--- a/test/cpp/interop/stress_test.cc
+++ b/test/cpp/interop/stress_test.cc
@@ -36,9 +36,7 @@
 #include "test/cpp/util/metrics_server.h"
 #include "test/cpp/util/test_config.h"
 
-extern "C" {
 extern void gpr_default_log(gpr_log_func_args* args);
-}
 
 DEFINE_int32(metrics_port, 8081, "The metrics server port.");
 
diff --git a/test/cpp/microbenchmarks/bm_call_create.cc b/test/cpp/microbenchmarks/bm_call_create.cc
index a45c577..5c2c38c 100644
--- a/test/cpp/microbenchmarks/bm_call_create.cc
+++ b/test/cpp/microbenchmarks/bm_call_create.cc
@@ -311,12 +311,9 @@
 }
 BENCHMARK(BM_LameChannelCallCreateCoreSeparateBatch);
 
-static void FilterDestroy(grpc_exec_ctx* exec_ctx, void* arg,
-                          grpc_error* error) {
-  gpr_free(arg);
-}
+static void FilterDestroy(void* arg, grpc_error* error) { gpr_free(arg); }
 
-static void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void DoNothing(void* arg, grpc_error* error) {}
 
 class FakeClientChannelFactory : public grpc_client_channel_factory {
  public:
@@ -324,15 +321,12 @@
 
  private:
   static void NoRef(grpc_client_channel_factory* factory) {}
-  static void NoUnref(grpc_exec_ctx* exec_ctx,
-                      grpc_client_channel_factory* factory) {}
-  static grpc_subchannel* CreateSubchannel(grpc_exec_ctx* exec_ctx,
-                                           grpc_client_channel_factory* factory,
+  static void NoUnref(grpc_client_channel_factory* factory) {}
+  static grpc_subchannel* CreateSubchannel(grpc_client_channel_factory* factory,
                                            const grpc_subchannel_args* args) {
     return nullptr;
   }
-  static grpc_channel* CreateClientChannel(grpc_exec_ctx* exec_ctx,
-                                           grpc_client_channel_factory* factory,
+  static grpc_channel* CreateClientChannel(grpc_client_channel_factory* factory,
                                            const char* target,
                                            grpc_client_channel_type type,
                                            const grpc_channel_args* args) {
@@ -366,36 +360,32 @@
 
 namespace dummy_filter {
 
-static void StartTransportStreamOp(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem,
+static void StartTransportStreamOp(grpc_call_element* elem,
                                    grpc_transport_stream_op_batch* op) {}
 
-static void StartTransportOp(grpc_exec_ctx* exec_ctx,
-                             grpc_channel_element* elem,
+static void StartTransportOp(grpc_channel_element* elem,
                              grpc_transport_op* op) {}
 
-static grpc_error* InitCallElem(grpc_exec_ctx* exec_ctx,
-                                grpc_call_element* elem,
+static grpc_error* InitCallElem(grpc_call_element* elem,
                                 const grpc_call_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem,
+static void SetPollsetOrPollsetSet(grpc_call_element* elem,
                                    grpc_polling_entity* pollent) {}
 
-static void DestroyCallElem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void DestroyCallElem(grpc_call_element* elem,
                             const grpc_call_final_info* final_info,
                             grpc_closure* then_sched_closure) {}
 
-grpc_error* InitChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+grpc_error* InitChannelElem(grpc_channel_element* elem,
                             grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-void DestroyChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {}
+void DestroyChannelElem(grpc_channel_element* elem) {}
 
-void GetChannelInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+void GetChannelInfo(grpc_channel_element* elem,
                     const grpc_channel_info* channel_info) {}
 
 static const grpc_channel_filter dummy_filter = {StartTransportStreamOp,
@@ -422,41 +412,38 @@
 const char* name;
 
 /* implementation of grpc_transport_init_stream */
-int InitStream(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-               grpc_stream* stream, grpc_stream_refcount* refcount,
-               const void* server_data, gpr_arena* arena) {
+int InitStream(grpc_transport* self, grpc_stream* stream,
+               grpc_stream_refcount* refcount, const void* server_data,
+               gpr_arena* arena) {
   return 0;
 }
 
 /* implementation of grpc_transport_set_pollset */
-void SetPollset(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                grpc_stream* stream, grpc_pollset* pollset) {}
+void SetPollset(grpc_transport* self, grpc_stream* stream,
+                grpc_pollset* pollset) {}
 
 /* implementation of grpc_transport_set_pollset */
-void SetPollsetSet(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                   grpc_stream* stream, grpc_pollset_set* pollset_set) {}
+void SetPollsetSet(grpc_transport* self, grpc_stream* stream,
+                   grpc_pollset_set* pollset_set) {}
 
 /* implementation of grpc_transport_perform_stream_op */
-void PerformStreamOp(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                     grpc_stream* stream, grpc_transport_stream_op_batch* op) {
-  GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
+void PerformStreamOp(grpc_transport* self, grpc_stream* stream,
+                     grpc_transport_stream_op_batch* op) {
+  GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_NONE);
 }
 
 /* implementation of grpc_transport_perform_op */
-void PerformOp(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-               grpc_transport_op* op) {}
+void PerformOp(grpc_transport* self, grpc_transport_op* op) {}
 
 /* implementation of grpc_transport_destroy_stream */
-void DestroyStream(grpc_exec_ctx* exec_ctx, grpc_transport* self,
-                   grpc_stream* stream, grpc_closure* then_sched_closure) {}
+void DestroyStream(grpc_transport* self, grpc_stream* stream,
+                   grpc_closure* then_sched_closure) {}
 
 /* implementation of grpc_transport_destroy */
-void Destroy(grpc_exec_ctx* exec_ctx, grpc_transport* self) {}
+void Destroy(grpc_transport* self) {}
 
 /* implementation of grpc_transport_get_endpoint */
-grpc_endpoint* GetEndpoint(grpc_exec_ctx* exec_ctx, grpc_transport* self) {
-  return nullptr;
-}
+grpc_endpoint* GetEndpoint(grpc_transport* self) { return nullptr; }
 
 static const grpc_transport_vtable dummy_transport_vtable = {
     0,          "dummy_http2", InitStream,
@@ -472,8 +459,8 @@
  public:
   class Op {
    public:
-    Op(grpc_exec_ctx* exec_ctx, NoOp* p, grpc_call_stack* s) {}
-    void Finish(grpc_exec_ctx* exec_ctx) {}
+    Op(NoOp* p, grpc_call_stack* s) {}
+    void Finish() {}
   };
 };
 
@@ -489,13 +476,11 @@
 
   class Op {
    public:
-    Op(grpc_exec_ctx* exec_ctx, SendEmptyMetadata* p, grpc_call_stack* s) {
+    Op(SendEmptyMetadata* p, grpc_call_stack* s) {
       grpc_metadata_batch_init(&batch_);
       p->op_payload_.send_initial_metadata.send_initial_metadata = &batch_;
     }
-    void Finish(grpc_exec_ctx* exec_ctx) {
-      grpc_metadata_batch_destroy(exec_ctx, &batch_);
-    }
+    void Finish() { grpc_metadata_batch_destroy(&batch_); }
 
    private:
     grpc_metadata_batch batch_;
@@ -536,20 +521,20 @@
     label << " #has_dummy_filter";
   }
 
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   size_t channel_size = grpc_channel_stack_size(
       filters.size() == 0 ? nullptr : &filters[0], filters.size());
   grpc_channel_stack* channel_stack =
       static_cast<grpc_channel_stack*>(gpr_zalloc(channel_size));
   GPR_ASSERT(GRPC_LOG_IF_ERROR(
       "channel_stack_init",
-      grpc_channel_stack_init(&exec_ctx, 1, FilterDestroy, channel_stack,
-                              &filters[0], filters.size(), &channel_args,
+      grpc_channel_stack_init(1, FilterDestroy, channel_stack, &filters[0],
+                              filters.size(), &channel_args,
                               fixture.flags & REQUIRES_TRANSPORT
                                   ? &dummy_transport::dummy_transport
                                   : nullptr,
                               "CHANNEL", channel_stack)));
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   grpc_call_stack* call_stack =
       static_cast<grpc_call_stack*>(gpr_zalloc(channel_stack->call_stack_size));
   grpc_millis deadline = GRPC_MILLIS_INF_FUTURE;
@@ -568,12 +553,12 @@
   call_args.arena = gpr_arena_create(kArenaSize);
   while (state.KeepRunning()) {
     GPR_TIMER_SCOPE("BenchmarkCycle", 0);
-    GRPC_ERROR_UNREF(grpc_call_stack_init(&exec_ctx, channel_stack, 1,
-                                          DoNothing, nullptr, &call_args));
-    typename TestOp::Op op(&exec_ctx, &test_op_data, call_stack);
-    grpc_call_stack_destroy(&exec_ctx, call_stack, &final_info, nullptr);
-    op.Finish(&exec_ctx);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_ERROR_UNREF(
+        grpc_call_stack_init(channel_stack, 1, DoNothing, nullptr, &call_args));
+    typename TestOp::Op op(&test_op_data, call_stack);
+    grpc_call_stack_destroy(call_stack, &final_info, nullptr);
+    op.Finish();
+    grpc_core::ExecCtx::Get()->Flush();
     // recreate arena every 64k iterations to avoid oom
     if (0 == (state.iterations() & 0xffff)) {
       gpr_arena_destroy(call_args.arena);
@@ -581,8 +566,8 @@
     }
   }
   gpr_arena_destroy(call_args.arena);
-  grpc_channel_stack_destroy(&exec_ctx, channel_stack);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_channel_stack_destroy(channel_stack);
+
   gpr_free(channel_stack);
   gpr_free(call_stack);
 
@@ -632,59 +617,55 @@
   grpc_call_combiner* call_combiner;
 } call_data;
 
-static void StartTransportStreamOp(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem,
+static void StartTransportStreamOp(grpc_call_element* elem,
                                    grpc_transport_stream_op_batch* op) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
   if (op->recv_initial_metadata) {
     GRPC_CALL_COMBINER_START(
-        exec_ctx, calld->call_combiner,
+        calld->call_combiner,
         op->payload->recv_initial_metadata.recv_initial_metadata_ready,
         GRPC_ERROR_NONE, "recv_initial_metadata");
   }
   if (op->recv_message) {
-    GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
+    GRPC_CALL_COMBINER_START(calld->call_combiner,
                              op->payload->recv_message.recv_message_ready,
                              GRPC_ERROR_NONE, "recv_message");
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, op->on_complete, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(op->on_complete, GRPC_ERROR_NONE);
 }
 
-static void StartTransportOp(grpc_exec_ctx* exec_ctx,
-                             grpc_channel_element* elem,
+static void StartTransportOp(grpc_channel_element* elem,
                              grpc_transport_op* op) {
   if (op->disconnect_with_error != GRPC_ERROR_NONE) {
     GRPC_ERROR_UNREF(op->disconnect_with_error);
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
 }
 
-static grpc_error* InitCallElem(grpc_exec_ctx* exec_ctx,
-                                grpc_call_element* elem,
+static grpc_error* InitCallElem(grpc_call_element* elem,
                                 const grpc_call_element_args* args) {
   call_data* calld = static_cast<call_data*>(elem->call_data);
   calld->call_combiner = args->call_combiner;
   return GRPC_ERROR_NONE;
 }
 
-static void SetPollsetOrPollsetSet(grpc_exec_ctx* exec_ctx,
-                                   grpc_call_element* elem,
+static void SetPollsetOrPollsetSet(grpc_call_element* elem,
                                    grpc_polling_entity* pollent) {}
 
-static void DestroyCallElem(grpc_exec_ctx* exec_ctx, grpc_call_element* elem,
+static void DestroyCallElem(grpc_call_element* elem,
                             const grpc_call_final_info* final_info,
                             grpc_closure* then_sched_closure) {
-  GRPC_CLOSURE_SCHED(exec_ctx, then_sched_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(then_sched_closure, GRPC_ERROR_NONE);
 }
 
-grpc_error* InitChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+grpc_error* InitChannelElem(grpc_channel_element* elem,
                             grpc_channel_element_args* args) {
   return GRPC_ERROR_NONE;
 }
 
-void DestroyChannelElem(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem) {}
+void DestroyChannelElem(grpc_channel_element* elem) {}
 
-void GetChannelInfo(grpc_exec_ctx* exec_ctx, grpc_channel_element* elem,
+void GetChannelInfo(grpc_channel_element* elem,
                     const grpc_channel_info* channel_info) {}
 
 static const grpc_channel_filter isolated_call_filter = {
@@ -711,10 +692,8 @@
         builder, &isolated_call_filter::isolated_call_filter, nullptr,
         nullptr));
     {
-      grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-      channel_ = grpc_channel_create_with_builder(&exec_ctx, builder,
-                                                  GRPC_CLIENT_CHANNEL);
-      grpc_exec_ctx_finish(&exec_ctx);
+      grpc_core::ExecCtx exec_ctx;
+      channel_ = grpc_channel_create_with_builder(builder, GRPC_CLIENT_CHANNEL);
     }
     cq_ = grpc_completion_queue_create_for_next(nullptr);
   }
diff --git a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
index 3fff8b0..4b73103 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_hpack.cc
@@ -50,22 +50,22 @@
 
 static void BM_HpackEncoderInitDestroy(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_chttp2_hpack_compressor c;
   while (state.KeepRunning()) {
     grpc_chttp2_hpack_compressor_init(&c);
-    grpc_chttp2_hpack_compressor_destroy(&exec_ctx, &c);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_chttp2_hpack_compressor_destroy(&c);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_HpackEncoderInitDestroy);
 
 static void BM_HpackEncoderEncodeDeadline(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_millis saved_now = grpc_exec_ctx_now(&exec_ctx);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_millis saved_now = grpc_core::ExecCtx::Get()->Now();
 
   grpc_metadata_batch b;
   grpc_metadata_batch_init(&b);
@@ -85,14 +85,13 @@
         (size_t)1024,
         &stats,
     };
-    grpc_chttp2_encode_header(&exec_ctx, &c, nullptr, 0, &b, &hopt, &outbuf);
-    grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, &outbuf);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_chttp2_encode_header(&c, nullptr, 0, &b, &hopt, &outbuf);
+    grpc_slice_buffer_reset_and_unref_internal(&outbuf);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_metadata_batch_destroy(&exec_ctx, &b);
-  grpc_chttp2_hpack_compressor_destroy(&exec_ctx, &c);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &outbuf);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_metadata_batch_destroy(&b);
+  grpc_chttp2_hpack_compressor_destroy(&c);
+  grpc_slice_buffer_destroy_internal(&outbuf);
 
   std::ostringstream label;
   label << "framing_bytes/iter:"
@@ -109,17 +108,16 @@
 template <class Fixture>
 static void BM_HpackEncoderEncodeHeader(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   static bool logged_representative_output = false;
 
   grpc_metadata_batch b;
   grpc_metadata_batch_init(&b);
-  std::vector<grpc_mdelem> elems = Fixture::GetElems(&exec_ctx);
+  std::vector<grpc_mdelem> elems = Fixture::GetElems();
   std::vector<grpc_linked_mdelem> storage(elems.size());
   for (size_t i = 0; i < elems.size(); i++) {
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "addmd",
-        grpc_metadata_batch_add_tail(&exec_ctx, &b, &storage[i], elems[i])));
+        "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
   }
 
   grpc_chttp2_hpack_compressor c;
@@ -136,7 +134,7 @@
         (size_t)state.range(1),
         &stats,
     };
-    grpc_chttp2_encode_header(&exec_ctx, &c, nullptr, 0, &b, &hopt, &outbuf);
+    grpc_chttp2_encode_header(&c, nullptr, 0, &b, &hopt, &outbuf);
     if (!logged_representative_output && state.iterations() > 3) {
       logged_representative_output = true;
       for (size_t i = 0; i < outbuf.count; i++) {
@@ -145,13 +143,12 @@
         gpr_free(s);
       }
     }
-    grpc_slice_buffer_reset_and_unref_internal(&exec_ctx, &outbuf);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_slice_buffer_reset_and_unref_internal(&outbuf);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_metadata_batch_destroy(&exec_ctx, &b);
-  grpc_chttp2_hpack_compressor_destroy(&exec_ctx, &c);
-  grpc_slice_buffer_destroy_internal(&exec_ctx, &outbuf);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_metadata_batch_destroy(&b);
+  grpc_chttp2_hpack_compressor_destroy(&c);
+  grpc_slice_buffer_destroy_internal(&outbuf);
 
   std::ostringstream label;
   label << "framing_bytes/iter:"
@@ -169,15 +166,13 @@
 class EmptyBatch {
  public:
   static constexpr bool kEnableTrueBinary = false;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
-    return {};
-  }
+  static std::vector<grpc_mdelem> GetElems() { return {}; }
 };
 
 class SingleStaticElem {
  public:
   static constexpr bool kEnableTrueBinary = false;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE};
   }
 };
@@ -185,9 +180,9 @@
 class SingleInternedElem {
  public:
   static constexpr bool kEnableTrueBinary = false;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {grpc_mdelem_from_slices(
-        exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")),
+        grpc_slice_intern(grpc_slice_from_static_string("abc")),
         grpc_slice_intern(grpc_slice_from_static_string("def")))};
   }
 };
@@ -196,10 +191,10 @@
 class SingleInternedBinaryElem {
  public:
   static constexpr bool kEnableTrueBinary = kTrueBinary;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     grpc_slice bytes = MakeBytes();
     std::vector<grpc_mdelem> out = {grpc_mdelem_from_slices(
-        exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc-bin")),
+        grpc_slice_intern(grpc_slice_from_static_string("abc-bin")),
         grpc_slice_intern(bytes))};
     grpc_slice_unref(bytes);
     return out;
@@ -218,9 +213,9 @@
 class SingleInternedKeyElem {
  public:
   static constexpr bool kEnableTrueBinary = false;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {grpc_mdelem_from_slices(
-        exec_ctx, grpc_slice_intern(grpc_slice_from_static_string("abc")),
+        grpc_slice_intern(grpc_slice_from_static_string("abc")),
         grpc_slice_from_static_string("def"))};
   }
 };
@@ -228,9 +223,8 @@
 class SingleNonInternedElem {
  public:
   static constexpr bool kEnableTrueBinary = false;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
-    return {grpc_mdelem_from_slices(exec_ctx,
-                                    grpc_slice_from_static_string("abc"),
+  static std::vector<grpc_mdelem> GetElems() {
+    return {grpc_mdelem_from_slices(grpc_slice_from_static_string("abc"),
                                     grpc_slice_from_static_string("def"))};
   }
 };
@@ -239,9 +233,9 @@
 class SingleNonInternedBinaryElem {
  public:
   static constexpr bool kEnableTrueBinary = kTrueBinary;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
-    return {grpc_mdelem_from_slices(
-        exec_ctx, grpc_slice_from_static_string("abc-bin"), MakeBytes())};
+  static std::vector<grpc_mdelem> GetElems() {
+    return {grpc_mdelem_from_slices(grpc_slice_from_static_string("abc-bin"),
+                                    MakeBytes())};
   }
 
  private:
@@ -257,21 +251,21 @@
 class RepresentativeClientInitialMetadata {
  public:
   static constexpr bool kEnableTrueBinary = true;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {
         GRPC_MDELEM_SCHEME_HTTP,
         GRPC_MDELEM_METHOD_POST,
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_PATH,
+            GRPC_MDSTR_PATH,
             grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))),
-        grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY,
+        grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
                                 grpc_slice_intern(grpc_slice_from_static_string(
                                     "foo.test.google.fr:1234"))),
         GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP,
         GRPC_MDELEM_TE_TRAILERS,
         GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_USER_AGENT,
+            GRPC_MDSTR_USER_AGENT,
             grpc_slice_intern(grpc_slice_from_static_string(
                 "grpc-c/3.0.0-dev (linux; chttp2; green)")))};
   }
@@ -283,18 +277,18 @@
 class MoreRepresentativeClientInitialMetadata {
  public:
   static constexpr bool kEnableTrueBinary = true;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {
         GRPC_MDELEM_SCHEME_HTTP,
         GRPC_MDELEM_METHOD_POST,
-        grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_PATH,
+        grpc_mdelem_from_slices(GRPC_MDSTR_PATH,
                                 grpc_slice_intern(grpc_slice_from_static_string(
                                     "/grpc.test.FooService/BarMethod"))),
-        grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY,
+        grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
                                 grpc_slice_intern(grpc_slice_from_static_string(
                                     "foo.test.google.fr:1234"))),
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_GRPC_TRACE_BIN,
+            GRPC_MDSTR_GRPC_TRACE_BIN,
             grpc_slice_from_static_string("\x00\x01\x02\x03\x04\x05\x06\x07\x08"
                                           "\x09\x0a\x0b\x0c\x0d\x0e\x0f"
                                           "\x10\x11\x12\x13\x14\x15\x16\x17\x18"
@@ -303,7 +297,7 @@
                                           "\x29\x2a\x2b\x2c\x2d\x2e\x2f"
                                           "\x30")),
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_GRPC_TAGS_BIN,
+            GRPC_MDSTR_GRPC_TAGS_BIN,
             grpc_slice_from_static_string("\x00\x01\x02\x03\x04\x05\x06\x07\x08"
                                           "\x09\x0a\x0b\x0c\x0d\x0e\x0f"
                                           "\x10\x11\x12\x13")),
@@ -311,7 +305,7 @@
         GRPC_MDELEM_TE_TRAILERS,
         GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_USER_AGENT,
+            GRPC_MDSTR_USER_AGENT,
             grpc_slice_intern(grpc_slice_from_static_string(
                 "grpc-c/3.0.0-dev (linux; chttp2; green)")))};
   }
@@ -320,7 +314,7 @@
 class RepresentativeServerInitialMetadata {
  public:
   static constexpr bool kEnableTrueBinary = true;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {GRPC_MDELEM_STATUS_200,
             GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
             GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP};
@@ -330,7 +324,7 @@
 class RepresentativeServerTrailingMetadata {
  public:
   static constexpr bool kEnableTrueBinary = true;
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {GRPC_MDELEM_GRPC_STATUS_0};
   }
 };
@@ -431,48 +425,45 @@
 
 static void BM_HpackParserInitDestroy(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_chttp2_hpack_parser p;
   while (state.KeepRunning()) {
-    grpc_chttp2_hpack_parser_init(&exec_ctx, &p);
-    grpc_chttp2_hpack_parser_destroy(&exec_ctx, &p);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_chttp2_hpack_parser_init(&p);
+    grpc_chttp2_hpack_parser_destroy(&p);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_HpackParserInitDestroy);
 
-static void UnrefHeader(grpc_exec_ctx* exec_ctx, void* user_data,
-                        grpc_mdelem md) {
-  GRPC_MDELEM_UNREF(exec_ctx, md);
+static void UnrefHeader(void* user_data, grpc_mdelem md) {
+  GRPC_MDELEM_UNREF(md);
 }
 
-template <class Fixture, void (*OnHeader)(grpc_exec_ctx*, void*, grpc_mdelem)>
+template <class Fixture, void (*OnHeader)(void*, grpc_mdelem)>
 static void BM_HpackParserParseHeader(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   std::vector<grpc_slice> init_slices = Fixture::GetInitSlices();
   std::vector<grpc_slice> benchmark_slices = Fixture::GetBenchmarkSlices();
   grpc_chttp2_hpack_parser p;
-  grpc_chttp2_hpack_parser_init(&exec_ctx, &p);
+  grpc_chttp2_hpack_parser_init(&p);
   p.on_header = OnHeader;
   p.on_header_user_data = nullptr;
   for (auto slice : init_slices) {
-    GPR_ASSERT(GRPC_ERROR_NONE ==
-               grpc_chttp2_hpack_parser_parse(&exec_ctx, &p, slice));
+    GPR_ASSERT(GRPC_ERROR_NONE == grpc_chttp2_hpack_parser_parse(&p, slice));
   }
   while (state.KeepRunning()) {
     for (auto slice : benchmark_slices) {
-      GPR_ASSERT(GRPC_ERROR_NONE ==
-                 grpc_chttp2_hpack_parser_parse(&exec_ctx, &p, slice));
+      GPR_ASSERT(GRPC_ERROR_NONE == grpc_chttp2_hpack_parser_parse(&p, slice));
     }
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   }
   for (auto slice : init_slices) grpc_slice_unref(slice);
   for (auto slice : benchmark_slices) grpc_slice_unref(slice);
-  grpc_chttp2_hpack_parser_destroy(&exec_ctx, &p);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_chttp2_hpack_parser_destroy(&p);
+
   track_counters.Finish(state);
 }
 
@@ -769,8 +760,7 @@
 static void free_timeout(void* p) { gpr_free(p); }
 
 // New implementation.
-static void OnHeaderNew(grpc_exec_ctx* exec_ctx, void* user_data,
-                        grpc_mdelem md) {
+static void OnHeaderNew(void* user_data, grpc_mdelem md) {
   if (grpc_slice_eq(GRPC_MDKEY(md), GRPC_MDSTR_GRPC_TIMEOUT)) {
     grpc_millis* cached_timeout =
         static_cast<grpc_millis*>(grpc_mdelem_get_user_data(md, free_timeout));
@@ -793,7 +783,7 @@
       }
     }
     benchmark::DoNotOptimize(timeout);
-    GRPC_MDELEM_UNREF(exec_ctx, md);
+    GRPC_MDELEM_UNREF(md);
   } else {
     GPR_ASSERT(0);
   }
diff --git a/test/cpp/microbenchmarks/bm_chttp2_transport.cc b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
index be4da4d..fcb1677 100644
--- a/test/cpp/microbenchmarks/bm_chttp2_transport.cc
+++ b/test/cpp/microbenchmarks/bm_chttp2_transport.cc
@@ -58,7 +58,7 @@
     ru_ = grpc_resource_user_create(Library::get().rq(), "dummy_endpoint");
   }
 
-  void PushInput(grpc_exec_ctx* exec_ctx, grpc_slice slice) {
+  void PushInput(grpc_slice slice) {
     if (read_cb_ == nullptr) {
       GPR_ASSERT(!have_slice_);
       buffered_slice_ = slice;
@@ -66,7 +66,7 @@
       return;
     }
     grpc_slice_buffer_add(slices_, slice);
-    GRPC_CLOSURE_SCHED(exec_ctx, read_cb_, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(read_cb_, GRPC_ERROR_NONE);
     read_cb_ = nullptr;
   }
 
@@ -77,50 +77,45 @@
   bool have_slice_ = false;
   grpc_slice buffered_slice_;
 
-  void QueueRead(grpc_exec_ctx* exec_ctx, grpc_slice_buffer* slices,
-                 grpc_closure* cb) {
+  void QueueRead(grpc_slice_buffer* slices, grpc_closure* cb) {
     GPR_ASSERT(read_cb_ == nullptr);
     if (have_slice_) {
       have_slice_ = false;
       grpc_slice_buffer_add(slices, buffered_slice_);
-      GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
       return;
     }
     read_cb_ = cb;
     slices_ = slices;
   }
 
-  static void read(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                   grpc_slice_buffer* slices, grpc_closure* cb) {
-    static_cast<DummyEndpoint*>(ep)->QueueRead(exec_ctx, slices, cb);
+  static void read(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                   grpc_closure* cb) {
+    static_cast<DummyEndpoint*>(ep)->QueueRead(slices, cb);
   }
 
-  static void write(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                    grpc_slice_buffer* slices, grpc_closure* cb) {
-    GRPC_CLOSURE_SCHED(exec_ctx, cb, GRPC_ERROR_NONE);
+  static void write(grpc_endpoint* ep, grpc_slice_buffer* slices,
+                    grpc_closure* cb) {
+    GRPC_CLOSURE_SCHED(cb, GRPC_ERROR_NONE);
   }
 
   static grpc_workqueue* get_workqueue(grpc_endpoint* ep) { return nullptr; }
 
-  static void add_to_pollset(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                             grpc_pollset* pollset) {}
+  static void add_to_pollset(grpc_endpoint* ep, grpc_pollset* pollset) {}
 
-  static void add_to_pollset_set(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                                 grpc_pollset_set* pollset) {}
-
-  static void delete_from_pollset_set(grpc_exec_ctx* exec_ctx,
-                                      grpc_endpoint* ep,
-                                      grpc_pollset_set* pollset) {}
-
-  static void shutdown(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep,
-                       grpc_error* why) {
-    grpc_resource_user_shutdown(exec_ctx, static_cast<DummyEndpoint*>(ep)->ru_);
-    GRPC_CLOSURE_SCHED(exec_ctx, static_cast<DummyEndpoint*>(ep)->read_cb_,
-                       why);
+  static void add_to_pollset_set(grpc_endpoint* ep, grpc_pollset_set* pollset) {
   }
 
-  static void destroy(grpc_exec_ctx* exec_ctx, grpc_endpoint* ep) {
-    grpc_resource_user_unref(exec_ctx, static_cast<DummyEndpoint*>(ep)->ru_);
+  static void delete_from_pollset_set(grpc_endpoint* ep,
+                                      grpc_pollset_set* pollset) {}
+
+  static void shutdown(grpc_endpoint* ep, grpc_error* why) {
+    grpc_resource_user_shutdown(static_cast<DummyEndpoint*>(ep)->ru_);
+    GRPC_CLOSURE_SCHED(static_cast<DummyEndpoint*>(ep)->read_cb_, why);
+  }
+
+  static void destroy(grpc_endpoint* ep) {
+    grpc_resource_user_unref(static_cast<DummyEndpoint*>(ep)->ru_);
     delete static_cast<DummyEndpoint*>(ep);
   }
 
@@ -136,29 +131,24 @@
   Fixture(const grpc::ChannelArguments& args, bool client) {
     grpc_channel_args c_args = args.c_channel_args();
     ep_ = new DummyEndpoint;
-    t_ = grpc_create_chttp2_transport(exec_ctx(), &c_args, ep_, client);
-    grpc_chttp2_transport_start_reading(exec_ctx(), t_, nullptr, nullptr);
+    t_ = grpc_create_chttp2_transport(&c_args, ep_, client);
+    grpc_chttp2_transport_start_reading(t_, nullptr, nullptr);
     FlushExecCtx();
   }
 
-  void FlushExecCtx() { grpc_exec_ctx_flush(&exec_ctx_); }
+  void FlushExecCtx() { grpc_core::ExecCtx::Get()->Flush(); }
 
-  ~Fixture() {
-    grpc_transport_destroy(&exec_ctx_, t_);
-    grpc_exec_ctx_finish(&exec_ctx_);
-  }
+  ~Fixture() { grpc_transport_destroy(t_); }
 
   grpc_chttp2_transport* chttp2_transport() {
     return reinterpret_cast<grpc_chttp2_transport*>(t_);
   }
   grpc_transport* transport() { return t_; }
-  grpc_exec_ctx* exec_ctx() { return &exec_ctx_; }
 
-  void PushInput(grpc_slice slice) { ep_->PushInput(exec_ctx(), slice); }
+  void PushInput(grpc_slice slice) { ep_->PushInput(slice); }
 
  private:
   DummyEndpoint* ep_;
-  grpc_exec_ctx exec_ctx_ = GRPC_EXEC_CTX_INIT;
   grpc_transport* t_;
 };
 
@@ -175,8 +165,8 @@
       GRPC_CLOSURE_INIT(this, Execute, this, sched);
     }
     F f_;
-    static void Execute(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
-      static_cast<C*>(arg)->f_(exec_ctx, error);
+    static void Execute(void* arg, grpc_error* error) {
+      static_cast<C*>(arg)->f_(error);
     }
   };
   return std::unique_ptr<Closure>(new C(f, sched));
@@ -188,8 +178,8 @@
   struct C : public grpc_closure {
     C(const F& f) : f_(f) {}
     F f_;
-    static void Execute(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
-      static_cast<C*>(arg)->f_(exec_ctx, error);
+    static void Execute(void* arg, grpc_error* error) {
+      static_cast<C*>(arg)->f_(error);
       delete static_cast<C*>(arg);
     }
   };
@@ -220,22 +210,22 @@
       gpr_arena_destroy(arena_);
       arena_ = gpr_arena_create(4096);
     }
-    grpc_transport_init_stream(f_->exec_ctx(), f_->transport(),
+    grpc_transport_init_stream(f_->transport(),
                                static_cast<grpc_stream*>(stream_), &refcount_,
                                nullptr, arena_);
   }
 
-  void DestroyThen(grpc_exec_ctx* exec_ctx, grpc_closure* closure) {
+  void DestroyThen(grpc_closure* closure) {
     destroy_closure_ = closure;
 #ifndef NDEBUG
-    grpc_stream_unref(exec_ctx, &refcount_, "DestroyThen");
+    grpc_stream_unref(&refcount_, "DestroyThen");
 #else
-    grpc_stream_unref(exec_ctx, &refcount_);
+    grpc_stream_unref(&refcount_);
 #endif
   }
 
-  void Op(grpc_exec_ctx* exec_ctx, grpc_transport_stream_op_batch* op) {
-    grpc_transport_perform_stream_op(exec_ctx, f_->transport(),
+  void Op(grpc_transport_stream_op_batch* op) {
+    grpc_transport_perform_stream_op(f_->transport(),
                                      static_cast<grpc_stream*>(stream_), op);
   }
 
@@ -244,10 +234,9 @@
   }
 
  private:
-  static void FinishDestroy(grpc_exec_ctx* exec_ctx, void* arg,
-                            grpc_error* error) {
+  static void FinishDestroy(void* arg, grpc_error* error) {
     auto stream = static_cast<Stream*>(arg);
-    grpc_transport_destroy_stream(exec_ctx, stream->f_->transport(),
+    grpc_transport_destroy_stream(stream->f_->transport(),
                                   static_cast<grpc_stream*>(stream->stream_),
                                   stream->destroy_closure_);
     gpr_event_set(&stream->done_, (void*)1);
@@ -268,6 +257,7 @@
 
 static void BM_StreamCreateDestroy(benchmark::State& state) {
   TrackCounters track_counters;
+  grpc_core::ExecCtx exec_ctx;
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   grpc_transport_stream_op_batch op;
@@ -276,14 +266,13 @@
   op.cancel_stream = true;
   op.payload = &op_payload;
   op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
-  std::unique_ptr<Closure> next =
-      MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
-        if (!state.KeepRunning()) return;
-        s.Init(state);
-        s.Op(exec_ctx, &op);
-        s.DestroyThen(exec_ctx, next.get());
-      });
-  GRPC_CLOSURE_RUN(f.exec_ctx(), next.get(), GRPC_ERROR_NONE);
+  std::unique_ptr<Closure> next = MakeClosure([&](grpc_error* error) {
+    if (!state.KeepRunning()) return;
+    s.Init(state);
+    s.Op(&op);
+    s.DestroyThen(next.get());
+  });
+  GRPC_CLOSURE_RUN(next.get(), GRPC_ERROR_NONE);
   f.FlushExecCtx();
   track_counters.Finish(state);
 }
@@ -291,21 +280,21 @@
 
 class RepresentativeClientInitialMetadata {
  public:
-  static std::vector<grpc_mdelem> GetElems(grpc_exec_ctx* exec_ctx) {
+  static std::vector<grpc_mdelem> GetElems() {
     return {
         GRPC_MDELEM_SCHEME_HTTP,
         GRPC_MDELEM_METHOD_POST,
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_PATH,
+            GRPC_MDSTR_PATH,
             grpc_slice_intern(grpc_slice_from_static_string("/foo/bar"))),
-        grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_AUTHORITY,
+        grpc_mdelem_from_slices(GRPC_MDSTR_AUTHORITY,
                                 grpc_slice_intern(grpc_slice_from_static_string(
                                     "foo.test.google.fr:1234"))),
         GRPC_MDELEM_GRPC_ACCEPT_ENCODING_IDENTITY_COMMA_DEFLATE_COMMA_GZIP,
         GRPC_MDELEM_TE_TRAILERS,
         GRPC_MDELEM_CONTENT_TYPE_APPLICATION_SLASH_GRPC,
         grpc_mdelem_from_slices(
-            exec_ctx, GRPC_MDSTR_USER_AGENT,
+            GRPC_MDSTR_USER_AGENT,
             grpc_slice_intern(grpc_slice_from_static_string(
                 "grpc-c/3.0.0-dev (linux; chttp2; green)")))};
   }
@@ -314,6 +303,7 @@
 template <class Metadata>
 static void BM_StreamCreateSendInitialMetadataDestroy(benchmark::State& state) {
   TrackCounters track_counters;
+  grpc_core::ExecCtx exec_ctx;
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   grpc_transport_stream_op_batch op;
@@ -330,34 +320,33 @@
   grpc_metadata_batch b;
   grpc_metadata_batch_init(&b);
   b.deadline = GRPC_MILLIS_INF_FUTURE;
-  std::vector<grpc_mdelem> elems = Metadata::GetElems(f.exec_ctx());
+  std::vector<grpc_mdelem> elems = Metadata::GetElems();
   std::vector<grpc_linked_mdelem> storage(elems.size());
   for (size_t i = 0; i < elems.size(); i++) {
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "addmd",
-        grpc_metadata_batch_add_tail(f.exec_ctx(), &b, &storage[i], elems[i])));
+        "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
   }
 
   f.FlushExecCtx();
-  start = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
+  start = MakeClosure([&](grpc_error* error) {
     if (!state.KeepRunning()) return;
     s.Init(state);
     reset_op();
     op.on_complete = done.get();
     op.send_initial_metadata = true;
     op.payload->send_initial_metadata.send_initial_metadata = &b;
-    s.Op(exec_ctx, &op);
+    s.Op(&op);
   });
-  done = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
+  done = MakeClosure([&](grpc_error* error) {
     reset_op();
     op.cancel_stream = true;
     op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
-    s.Op(exec_ctx, &op);
-    s.DestroyThen(exec_ctx, start.get());
+    s.Op(&op);
+    s.DestroyThen(start.get());
   });
-  GRPC_CLOSURE_SCHED(f.exec_ctx(), start.get(), GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(start.get(), GRPC_ERROR_NONE);
   f.FlushExecCtx();
-  grpc_metadata_batch_destroy(f.exec_ctx(), &b);
+  grpc_metadata_batch_destroy(&b);
   track_counters.Finish(state);
 }
 BENCHMARK_TEMPLATE(BM_StreamCreateSendInitialMetadataDestroy,
@@ -365,6 +354,7 @@
 
 static void BM_TransportEmptyOp(benchmark::State& state) {
   TrackCounters track_counters;
+  grpc_core::ExecCtx exec_ctx;
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   s.Init(state);
@@ -375,21 +365,19 @@
     memset(&op, 0, sizeof(op));
     op.payload = &op_payload;
   };
-  std::unique_ptr<Closure> c =
-      MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
-        if (!state.KeepRunning()) return;
-        reset_op();
-        op.on_complete = c.get();
-        s.Op(exec_ctx, &op);
-      });
-  GRPC_CLOSURE_SCHED(f.exec_ctx(), c.get(), GRPC_ERROR_NONE);
+  std::unique_ptr<Closure> c = MakeClosure([&](grpc_error* error) {
+    if (!state.KeepRunning()) return;
+    reset_op();
+    op.on_complete = c.get();
+    s.Op(&op);
+  });
+  GRPC_CLOSURE_SCHED(c.get(), GRPC_ERROR_NONE);
   f.FlushExecCtx();
   reset_op();
   op.cancel_stream = true;
   op_payload.cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
-  s.Op(f.exec_ctx(), &op);
-  s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx,
-                                                 grpc_error* error) {}));
+  s.Op(&op);
+  s.DestroyThen(MakeOnceClosure([](grpc_error* error) {}));
   f.FlushExecCtx();
   track_counters.Finish(state);
 }
@@ -399,6 +387,7 @@
 
 static void BM_TransportStreamSend(benchmark::State& state) {
   TrackCounters track_counters;
+  grpc_core::ExecCtx exec_ctx;
   Fixture f(grpc::ChannelArguments(), true);
   auto s = std::unique_ptr<Stream>(new Stream(&f));
   s->Init(state);
@@ -420,39 +409,37 @@
   grpc_metadata_batch_init(&b);
   b.deadline = GRPC_MILLIS_INF_FUTURE;
   std::vector<grpc_mdelem> elems =
-      RepresentativeClientInitialMetadata::GetElems(f.exec_ctx());
+      RepresentativeClientInitialMetadata::GetElems();
   std::vector<grpc_linked_mdelem> storage(elems.size());
   for (size_t i = 0; i < elems.size(); i++) {
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "addmd",
-        grpc_metadata_batch_add_tail(f.exec_ctx(), &b, &storage[i], elems[i])));
+        "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
   }
 
   gpr_event* bm_done = new gpr_event;
   gpr_event_init(bm_done);
 
-  std::unique_ptr<Closure> c =
-      MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
-        if (!state.KeepRunning()) {
-          gpr_event_set(bm_done, (void*)1);
-          return;
-        }
-        // force outgoing window to be yuge
-        s->chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
-        f.chttp2_transport()->flow_control->TestOnlyForceHugeWindow();
-        grpc_slice_buffer_stream_init(&send_stream, &send_buffer, 0);
-        reset_op();
-        op.on_complete = c.get();
-        op.send_message = true;
-        op.payload->send_message.send_message = &send_stream.base;
-        s->Op(exec_ctx, &op);
-      });
+  std::unique_ptr<Closure> c = MakeClosure([&](grpc_error* error) {
+    if (!state.KeepRunning()) {
+      gpr_event_set(bm_done, (void*)1);
+      return;
+    }
+    // force outgoing window to be yuge
+    s->chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
+    f.chttp2_transport()->flow_control->TestOnlyForceHugeWindow();
+    grpc_slice_buffer_stream_init(&send_stream, &send_buffer, 0);
+    reset_op();
+    op.on_complete = c.get();
+    op.send_message = true;
+    op.payload->send_message.send_message = &send_stream.base;
+    s->Op(&op);
+  });
 
   reset_op();
   op.send_initial_metadata = true;
   op.payload->send_initial_metadata.send_initial_metadata = &b;
   op.on_complete = c.get();
-  s->Op(f.exec_ctx(), &op);
+  s->Op(&op);
 
   f.FlushExecCtx();
   gpr_event_wait(bm_done, gpr_inf_future(GPR_CLOCK_REALTIME));
@@ -461,13 +448,12 @@
   reset_op();
   op.cancel_stream = true;
   op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
-  s->Op(f.exec_ctx(), &op);
-  s->DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx,
-                                                  grpc_error* error) {}));
+  s->Op(&op);
+  s->DestroyThen(MakeOnceClosure([](grpc_error* error) {}));
   f.FlushExecCtx();
   s.reset();
   track_counters.Finish(state);
-  grpc_metadata_batch_destroy(f.exec_ctx(), &b);
+  grpc_metadata_batch_destroy(&b);
   grpc_slice_buffer_destroy(&send_buffer);
 }
 BENCHMARK(BM_TransportStreamSend)->Range(0, 128 * 1024 * 1024);
@@ -531,6 +517,7 @@
 
 static void BM_TransportStreamRecv(benchmark::State& state) {
   TrackCounters track_counters;
+  grpc_core::ExecCtx exec_ctx;
   Fixture f(grpc::ChannelArguments(), true);
   Stream s(&f);
   s.Init(state);
@@ -551,16 +538,14 @@
   grpc_metadata_batch_init(&b_recv);
   b.deadline = GRPC_MILLIS_INF_FUTURE;
   std::vector<grpc_mdelem> elems =
-      RepresentativeClientInitialMetadata::GetElems(f.exec_ctx());
+      RepresentativeClientInitialMetadata::GetElems();
   std::vector<grpc_linked_mdelem> storage(elems.size());
   for (size_t i = 0; i < elems.size(); i++) {
     GPR_ASSERT(GRPC_LOG_IF_ERROR(
-        "addmd",
-        grpc_metadata_batch_add_tail(f.exec_ctx(), &b, &storage[i], elems[i])));
+        "addmd", grpc_metadata_batch_add_tail(&b, &storage[i], elems[i])));
   }
 
-  std::unique_ptr<Closure> do_nothing =
-      MakeClosure([](grpc_exec_ctx* exec_ctx, grpc_error* error) {});
+  std::unique_ptr<Closure> do_nothing = MakeClosure([](grpc_error* error) {});
 
   uint32_t received;
 
@@ -569,51 +554,49 @@
   std::unique_ptr<Closure> drain_continue;
   grpc_slice recv_slice;
 
-  std::unique_ptr<Closure> c =
-      MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
-        if (!state.KeepRunning()) return;
-        // force outgoing window to be yuge
-        s.chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
-        f.chttp2_transport()->flow_control->TestOnlyForceHugeWindow();
-        received = 0;
-        reset_op();
-        op.on_complete = do_nothing.get();
-        op.recv_message = true;
-        op.payload->recv_message.recv_message = &recv_stream;
-        op.payload->recv_message.recv_message_ready = drain_start.get();
-        s.Op(exec_ctx, &op);
-        f.PushInput(grpc_slice_ref(incoming_data));
-      });
+  std::unique_ptr<Closure> c = MakeClosure([&](grpc_error* error) {
+    if (!state.KeepRunning()) return;
+    // force outgoing window to be yuge
+    s.chttp2_stream()->flow_control->TestOnlyForceHugeWindow();
+    f.chttp2_transport()->flow_control->TestOnlyForceHugeWindow();
+    received = 0;
+    reset_op();
+    op.on_complete = do_nothing.get();
+    op.recv_message = true;
+    op.payload->recv_message.recv_message = &recv_stream;
+    op.payload->recv_message.recv_message_ready = drain_start.get();
+    s.Op(&op);
+    f.PushInput(grpc_slice_ref(incoming_data));
+  });
 
-  drain_start = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
+  drain_start = MakeClosure([&](grpc_error* error) {
     if (recv_stream == nullptr) {
       GPR_ASSERT(!state.KeepRunning());
       return;
     }
-    GRPC_CLOSURE_RUN(exec_ctx, drain.get(), GRPC_ERROR_NONE);
+    GRPC_CLOSURE_RUN(drain.get(), GRPC_ERROR_NONE);
   });
 
-  drain = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
+  drain = MakeClosure([&](grpc_error* error) {
     do {
       if (received == recv_stream->length) {
-        grpc_byte_stream_destroy(exec_ctx, recv_stream);
-        GRPC_CLOSURE_SCHED(exec_ctx, c.get(), GRPC_ERROR_NONE);
+        grpc_byte_stream_destroy(recv_stream);
+        GRPC_CLOSURE_SCHED(c.get(), GRPC_ERROR_NONE);
         return;
       }
-    } while (grpc_byte_stream_next(exec_ctx, recv_stream,
-                                   recv_stream->length - received,
+    } while (grpc_byte_stream_next(recv_stream, recv_stream->length - received,
                                    drain_continue.get()) &&
              GRPC_ERROR_NONE ==
-                 grpc_byte_stream_pull(exec_ctx, recv_stream, &recv_slice) &&
+                 grpc_byte_stream_pull(recv_stream, &recv_slice) &&
              (received += GRPC_SLICE_LENGTH(recv_slice),
-              grpc_slice_unref_internal(exec_ctx, recv_slice), true));
+              grpc_slice_unref_internal(recv_slice), true));
   });
 
-  drain_continue = MakeClosure([&](grpc_exec_ctx* exec_ctx, grpc_error* error) {
-    grpc_byte_stream_pull(exec_ctx, recv_stream, &recv_slice);
+  drain_continue = MakeClosure([&](grpc_error* error) {
+    grpc_byte_stream_pull(recv_stream, &recv_slice);
     received += GRPC_SLICE_LENGTH(recv_slice);
-    grpc_slice_unref_internal(exec_ctx, recv_slice);
-    GRPC_CLOSURE_RUN(exec_ctx, drain.get(), GRPC_ERROR_NONE);
+    grpc_slice_unref_internal(recv_slice);
+    GRPC_CLOSURE_RUN(drain.get(), GRPC_ERROR_NONE);
   });
 
   reset_op();
@@ -624,7 +607,7 @@
   op.payload->recv_initial_metadata.recv_initial_metadata_ready =
       do_nothing.get();
   op.on_complete = c.get();
-  s.Op(f.exec_ctx(), &op);
+  s.Op(&op);
   f.PushInput(SLICE_FROM_BUFFER(
       "\x00\x00\x00\x04\x00\x00\x00\x00\x00"
       // Generated using:
@@ -642,13 +625,12 @@
   reset_op();
   op.cancel_stream = true;
   op.payload->cancel_stream.cancel_error = GRPC_ERROR_CANCELLED;
-  s.Op(f.exec_ctx(), &op);
-  s.DestroyThen(f.exec_ctx(), MakeOnceClosure([](grpc_exec_ctx* exec_ctx,
-                                                 grpc_error* error) {}));
+  s.Op(&op);
+  s.DestroyThen(MakeOnceClosure([](grpc_error* error) {}));
   f.FlushExecCtx();
   track_counters.Finish(state);
-  grpc_metadata_batch_destroy(f.exec_ctx(), &b);
-  grpc_metadata_batch_destroy(f.exec_ctx(), &b_recv);
+  grpc_metadata_batch_destroy(&b);
+  grpc_metadata_batch_destroy(&b_recv);
   grpc_slice_unref(incoming_data);
 }
 BENCHMARK(BM_TransportStreamRecv)->Range(0, 128 * 1024 * 1024);
diff --git a/test/cpp/microbenchmarks/bm_closure.cc b/test/cpp/microbenchmarks/bm_closure.cc
index 2434d4e..4d5a82c 100644
--- a/test/cpp/microbenchmarks/bm_closure.cc
+++ b/test/cpp/microbenchmarks/bm_closure.cc
@@ -34,8 +34,7 @@
 static void BM_NoOpExecCtx(benchmark::State& state) {
   TrackCounters track_counters;
   while (state.KeepRunning()) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_core::ExecCtx exec_ctx;
   }
   track_counters.Finish(state);
 }
@@ -43,16 +42,16 @@
 
 static void BM_WellFlushed(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_WellFlushed);
 
-static void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+static void DoNothing(void* arg, grpc_error* error) {}
 
 static void BM_ClosureInitAgainstExecCtx(benchmark::State& state) {
   TrackCounters track_counters;
@@ -69,13 +68,13 @@
   TrackCounters track_counters;
   grpc_combiner* combiner = grpc_combiner_create();
   grpc_closure c;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     benchmark::DoNotOptimize(GRPC_CLOSURE_INIT(
         &c, DoNothing, nullptr, grpc_combiner_scheduler(combiner)));
   }
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureInitAgainstCombiner);
@@ -84,41 +83,39 @@
   TrackCounters track_counters;
   grpc_closure c;
   GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_RUN(&exec_ctx, &c, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_RUN(&c, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureRunOnExecCtx);
 
 static void BM_ClosureCreateAndRun(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     GRPC_CLOSURE_RUN(
-        &exec_ctx,
         GRPC_CLOSURE_CREATE(DoNothing, nullptr, grpc_schedule_on_exec_ctx),
         GRPC_ERROR_NONE);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureCreateAndRun);
 
 static void BM_ClosureInitAndRun(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_closure c;
   while (state.KeepRunning()) {
     GRPC_CLOSURE_RUN(
-        &exec_ctx,
         GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx),
         GRPC_ERROR_NONE);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureInitAndRun);
@@ -127,12 +124,12 @@
   TrackCounters track_counters;
   grpc_closure c;
   GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSchedOnExecCtx);
@@ -143,13 +140,13 @@
   grpc_closure c2;
   GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
   GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c2, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSched2OnExecCtx);
@@ -162,14 +159,14 @@
   GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
   GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
   GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_schedule_on_exec_ctx);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c2, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c3, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSched3OnExecCtx);
@@ -179,13 +176,13 @@
   // for comparison with the combiner stuff below
   gpr_mu mu;
   gpr_mu_init(&mu);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     gpr_mu_lock(&mu);
-    DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
+    DoNothing(nullptr, GRPC_ERROR_NONE);
     gpr_mu_unlock(&mu);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_AcquireMutex);
@@ -195,16 +192,16 @@
   // for comparison with the combiner stuff below
   gpr_mu mu;
   gpr_mu_init(&mu);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     if (gpr_mu_trylock(&mu)) {
-      DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
+      DoNothing(nullptr, GRPC_ERROR_NONE);
       gpr_mu_unlock(&mu);
     } else {
       abort();
     }
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_TryAcquireMutex);
@@ -213,13 +210,13 @@
   TrackCounters track_counters;
   // for comparison with the combiner stuff below
   gpr_spinlock mu = GPR_SPINLOCK_INITIALIZER;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     gpr_spinlock_lock(&mu);
-    DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
+    DoNothing(nullptr, GRPC_ERROR_NONE);
     gpr_spinlock_unlock(&mu);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_AcquireSpinlock);
@@ -228,16 +225,16 @@
   TrackCounters track_counters;
   // for comparison with the combiner stuff below
   gpr_spinlock mu = GPR_SPINLOCK_INITIALIZER;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     if (gpr_spinlock_trylock(&mu)) {
-      DoNothing(&exec_ctx, nullptr, GRPC_ERROR_NONE);
+      DoNothing(nullptr, GRPC_ERROR_NONE);
       gpr_spinlock_unlock(&mu);
     } else {
       abort();
     }
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_TryAcquireSpinlock);
@@ -247,13 +244,13 @@
   grpc_combiner* combiner = grpc_combiner_create();
   grpc_closure c;
   GRPC_CLOSURE_INIT(&c, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSchedOnCombiner);
@@ -265,14 +262,14 @@
   grpc_closure c2;
   GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
   GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c2, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSched2OnCombiner);
@@ -286,15 +283,15 @@
   GRPC_CLOSURE_INIT(&c1, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
   GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
   GRPC_CLOSURE_INIT(&c3, DoNothing, nullptr, grpc_combiner_scheduler(combiner));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c2, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c3, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSched3OnCombiner);
@@ -309,15 +306,15 @@
                     grpc_combiner_scheduler(combiner1));
   GRPC_CLOSURE_INIT(&c2, DoNothing, nullptr,
                     grpc_combiner_scheduler(combiner2));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c2, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner1, "finished");
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner2, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner1, "finished");
+  GRPC_COMBINER_UNREF(combiner2, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSched2OnTwoCombiners);
@@ -338,17 +335,17 @@
                     grpc_combiner_scheduler(combiner1));
   GRPC_CLOSURE_INIT(&c4, DoNothing, nullptr,
                     grpc_combiner_scheduler(combiner2));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c1, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c2, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c3, GRPC_ERROR_NONE);
-    GRPC_CLOSURE_SCHED(&exec_ctx, &c4, GRPC_ERROR_NONE);
-    grpc_exec_ctx_flush(&exec_ctx);
+    GRPC_CLOSURE_SCHED(&c1, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c2, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c3, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(&c4, GRPC_ERROR_NONE);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner1, "finished");
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner2, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_COMBINER_UNREF(combiner1, "finished");
+  GRPC_COMBINER_UNREF(combiner2, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureSched4OnTwoCombiners);
@@ -362,13 +359,11 @@
     GRPC_CLOSURE_INIT(&closure_, Step, this, scheduler);
   }
 
-  void ScheduleFirst(grpc_exec_ctx* exec_ctx) {
-    GRPC_CLOSURE_SCHED(exec_ctx, &closure_, GRPC_ERROR_NONE);
-  }
+  void ScheduleFirst() { GRPC_CLOSURE_SCHED(&closure_, GRPC_ERROR_NONE); }
 
   void ScheduleFirstAgainstDifferentScheduler(
-      grpc_exec_ctx* exec_ctx, grpc_closure_scheduler* scheduler) {
-    GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(Step, this, scheduler),
+      grpc_closure_scheduler* scheduler) {
+    GRPC_CLOSURE_SCHED(GRPC_CLOSURE_CREATE(Step, this, scheduler),
                        GRPC_ERROR_NONE);
   }
 
@@ -376,47 +371,46 @@
   benchmark::State& state_;
   grpc_closure closure_;
 
-  static void Step(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+  static void Step(void* arg, grpc_error* error) {
     Rescheduler* self = static_cast<Rescheduler*>(arg);
     if (self->state_.KeepRunning()) {
-      GRPC_CLOSURE_SCHED(exec_ctx, &self->closure_, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(&self->closure_, GRPC_ERROR_NONE);
     }
   }
 };
 
 static void BM_ClosureReschedOnExecCtx(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   Rescheduler r(state, grpc_schedule_on_exec_ctx);
-  r.ScheduleFirst(&exec_ctx);
-  grpc_exec_ctx_finish(&exec_ctx);
+  r.ScheduleFirst();
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureReschedOnExecCtx);
 
 static void BM_ClosureReschedOnCombiner(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_combiner* combiner = grpc_combiner_create();
   Rescheduler r(state, grpc_combiner_scheduler(combiner));
-  r.ScheduleFirst(&exec_ctx);
-  grpc_exec_ctx_flush(&exec_ctx);
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  r.ScheduleFirst();
+  grpc_core::ExecCtx::Get()->Flush();
+  GRPC_COMBINER_UNREF(combiner, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureReschedOnCombiner);
 
 static void BM_ClosureReschedOnCombinerFinally(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_combiner* combiner = grpc_combiner_create();
   Rescheduler r(state, grpc_combiner_finally_scheduler(combiner));
-  r.ScheduleFirstAgainstDifferentScheduler(&exec_ctx,
-                                           grpc_combiner_scheduler(combiner));
-  grpc_exec_ctx_flush(&exec_ctx);
-  GRPC_COMBINER_UNREF(&exec_ctx, combiner, "finished");
-  grpc_exec_ctx_finish(&exec_ctx);
+  r.ScheduleFirstAgainstDifferentScheduler(grpc_combiner_scheduler(combiner));
+  grpc_core::ExecCtx::Get()->Flush();
+  GRPC_COMBINER_UNREF(combiner, "finished");
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_ClosureReschedOnCombinerFinally);
diff --git a/test/cpp/microbenchmarks/bm_cq.cc b/test/cpp/microbenchmarks/bm_cq.cc
index f0dede7..9724259 100644
--- a/test/cpp/microbenchmarks/bm_cq.cc
+++ b/test/cpp/microbenchmarks/bm_cq.cc
@@ -66,7 +66,7 @@
 }
 BENCHMARK(BM_CreateDestroyCore);
 
-static void DoneWithCompletionOnStack(grpc_exec_ctx* exec_ctx, void* arg,
+static void DoneWithCompletionOnStack(void* arg,
                                       grpc_cq_completion* completion) {}
 
 class DummyTag final : public internal::CompletionQueueTag {
@@ -81,11 +81,11 @@
   while (state.KeepRunning()) {
     grpc_cq_completion completion;
     DummyTag dummy_tag;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     GPR_ASSERT(grpc_cq_begin_op(c_cq, &dummy_tag));
-    grpc_cq_end_op(&exec_ctx, c_cq, &dummy_tag, GRPC_ERROR_NONE,
-                   DoneWithCompletionOnStack, nullptr, &completion);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_cq_end_op(c_cq, &dummy_tag, GRPC_ERROR_NONE, DoneWithCompletionOnStack,
+                   nullptr, &completion);
+
     void* tag;
     bool ok;
     cq.Next(&tag, &ok);
@@ -101,11 +101,11 @@
   gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
   while (state.KeepRunning()) {
     grpc_cq_completion completion;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     GPR_ASSERT(grpc_cq_begin_op(cq, nullptr));
-    grpc_cq_end_op(&exec_ctx, cq, nullptr, GRPC_ERROR_NONE,
-                   DoneWithCompletionOnStack, nullptr, &completion);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_cq_end_op(cq, nullptr, GRPC_ERROR_NONE, DoneWithCompletionOnStack,
+                   nullptr, &completion);
+
     grpc_completion_queue_next(cq, deadline, nullptr);
   }
   grpc_completion_queue_destroy(cq);
@@ -120,11 +120,11 @@
   gpr_timespec deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
   while (state.KeepRunning()) {
     grpc_cq_completion completion;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     GPR_ASSERT(grpc_cq_begin_op(cq, nullptr));
-    grpc_cq_end_op(&exec_ctx, cq, nullptr, GRPC_ERROR_NONE,
-                   DoneWithCompletionOnStack, nullptr, &completion);
-    grpc_exec_ctx_finish(&exec_ctx);
+    grpc_cq_end_op(cq, nullptr, GRPC_ERROR_NONE, DoneWithCompletionOnStack,
+                   nullptr, &completion);
+
     grpc_completion_queue_pluck(cq, nullptr, deadline, nullptr);
   }
   grpc_completion_queue_destroy(cq);
diff --git a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
index 7ccebb5..874c834 100644
--- a/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
+++ b/test/cpp/microbenchmarks/bm_cq_multiple_threads.cc
@@ -43,9 +43,8 @@
 static grpc_event_engine_vtable g_vtable;
 static const grpc_event_engine_vtable* g_old_vtable;
 
-static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
-                             grpc_closure* closure) {
-  GRPC_CLOSURE_SCHED(exec_ctx, closure, GRPC_ERROR_NONE);
+static void pollset_shutdown(grpc_pollset* ps, grpc_closure* closure) {
+  GRPC_CLOSURE_SCHED(closure, GRPC_ERROR_NONE);
 }
 
 static void pollset_init(grpc_pollset* ps, gpr_mu** mu) {
@@ -53,25 +52,20 @@
   *mu = &ps->mu;
 }
 
-static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* ps) {
-  gpr_mu_destroy(&ps->mu);
-}
+static void pollset_destroy(grpc_pollset* ps) { gpr_mu_destroy(&ps->mu); }
 
-static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* p,
-                                grpc_pollset_worker* worker) {
+static grpc_error* pollset_kick(grpc_pollset* p, grpc_pollset_worker* worker) {
   return GRPC_ERROR_NONE;
 }
 
 /* Callback when the tag is dequeued from the completion queue. Does nothing */
-static void cq_done_cb(grpc_exec_ctx* exec_ctx, void* done_arg,
-                       grpc_cq_completion* cq_completion) {
+static void cq_done_cb(void* done_arg, grpc_cq_completion* cq_completion) {
   gpr_free(cq_completion);
 }
 
 /* Queues a completion tag if deadline is > 0.
  * Does nothing if deadline is 0 (i.e gpr_time_0(GPR_CLOCK_MONOTONIC)) */
-static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
-                                grpc_pollset_worker** worker,
+static grpc_error* pollset_work(grpc_pollset* ps, grpc_pollset_worker** worker,
                                 grpc_millis deadline) {
   if (deadline == 0) {
     gpr_log(GPR_DEBUG, "no-op");
@@ -80,9 +74,9 @@
 
   gpr_mu_unlock(&ps->mu);
   GPR_ASSERT(grpc_cq_begin_op(g_cq, g_tag));
-  grpc_cq_end_op(exec_ctx, g_cq, g_tag, GRPC_ERROR_NONE, cq_done_cb, nullptr,
+  grpc_cq_end_op(g_cq, g_tag, GRPC_ERROR_NONE, cq_done_cb, nullptr,
                  (grpc_cq_completion*)gpr_malloc(sizeof(grpc_cq_completion)));
-  grpc_exec_ctx_flush(exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_mu_lock(&ps->mu);
   return GRPC_ERROR_NONE;
 }
diff --git a/test/cpp/microbenchmarks/bm_error.cc b/test/cpp/microbenchmarks/bm_error.cc
index bbd8b3c..d12f475 100644
--- a/test/cpp/microbenchmarks/bm_error.cc
+++ b/test/cpp/microbenchmarks/bm_error.cc
@@ -246,14 +246,14 @@
 static void BM_ErrorGetStatus(benchmark::State& state) {
   TrackCounters track_counters;
   Fixture fixture;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     grpc_status_code status;
     grpc_slice slice;
-    grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
-                          &status, &slice, nullptr, nullptr);
+    grpc_error_get_status(fixture.error(), fixture.deadline(), &status, &slice,
+                          nullptr, nullptr);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 
@@ -261,13 +261,13 @@
 static void BM_ErrorGetStatusCode(benchmark::State& state) {
   TrackCounters track_counters;
   Fixture fixture;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     grpc_status_code status;
-    grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
-                          &status, nullptr, nullptr, nullptr);
+    grpc_error_get_status(fixture.error(), fixture.deadline(), &status, nullptr,
+                          nullptr, nullptr);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 
@@ -275,13 +275,13 @@
 static void BM_ErrorHttpError(benchmark::State& state) {
   TrackCounters track_counters;
   Fixture fixture;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
     grpc_http2_error_code error;
-    grpc_error_get_status(&exec_ctx, fixture.error(), fixture.deadline(),
-                          nullptr, nullptr, &error, nullptr);
+    grpc_error_get_status(fixture.error(), fixture.deadline(), nullptr, nullptr,
+                          &error, nullptr);
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 
diff --git a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
index bb974fa..d6d7d41 100644
--- a/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
+++ b/test/cpp/microbenchmarks/bm_fullstack_trickle.cc
@@ -177,13 +177,13 @@
   }
 
   void Step(bool update_stats) {
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     inc_time();
     size_t client_backlog =
-        grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.client);
+        grpc_trickle_endpoint_trickle(endpoint_pair_.client);
     size_t server_backlog =
-        grpc_trickle_endpoint_trickle(&exec_ctx, endpoint_pair_.server);
-    grpc_exec_ctx_finish(&exec_ctx);
+        grpc_trickle_endpoint_trickle(endpoint_pair_.server);
+
     if (update_stats) {
       UpdateStats((grpc_chttp2_transport*)client_transport_, &client_stats_,
                   client_backlog);
@@ -445,7 +445,7 @@
 }  // namespace testing
 }  // namespace grpc
 
-extern "C" gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
+extern gpr_timespec (*gpr_now_impl)(gpr_clock_type clock_type);
 
 int main(int argc, char** argv) {
   ::benchmark::Initialize(&argc, argv);
diff --git a/test/cpp/microbenchmarks/bm_metadata.cc b/test/cpp/microbenchmarks/bm_metadata.cc
index 73bce08..f1e7890 100644
--- a/test/cpp/microbenchmarks/bm_metadata.cc
+++ b/test/cpp/microbenchmarks/bm_metadata.cc
@@ -90,11 +90,11 @@
   TrackCounters track_counters;
   gpr_slice k = grpc_slice_from_static_string("key");
   gpr_slice v = grpc_slice_from_static_string("value");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_MetadataFromNonInternedSlices);
@@ -103,11 +103,11 @@
   TrackCounters track_counters;
   gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
   gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_slice_unref(k);
   grpc_slice_unref(v);
   track_counters.Finish(state);
@@ -119,13 +119,13 @@
   TrackCounters track_counters;
   gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
   gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_mdelem seed = grpc_mdelem_create(&exec_ctx, k, v, nullptr);
+  grpc_core::ExecCtx exec_ctx;
+  grpc_mdelem seed = grpc_mdelem_create(k, v, nullptr);
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, seed);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(seed);
+
   grpc_slice_unref(k);
   grpc_slice_unref(v);
   track_counters.Finish(state);
@@ -136,11 +136,11 @@
   TrackCounters track_counters;
   gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
   gpr_slice v = grpc_slice_from_static_string("value");
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_slice_unref(k);
   track_counters.Finish(state);
 }
@@ -152,14 +152,12 @@
   gpr_slice k = grpc_slice_from_static_string("key");
   gpr_slice v = grpc_slice_from_static_string("value");
   char backing_store[sizeof(grpc_mdelem_data)];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(
-        &exec_ctx,
-        grpc_mdelem_create(&exec_ctx, k, v,
-                           reinterpret_cast<grpc_mdelem_data*>(backing_store)));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(
+        k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store)));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_MetadataFromNonInternedSlicesWithBackingStore);
@@ -170,14 +168,12 @@
   gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
   gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
   char backing_store[sizeof(grpc_mdelem_data)];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(
-        &exec_ctx,
-        grpc_mdelem_create(&exec_ctx, k, v,
-                           reinterpret_cast<grpc_mdelem_data*>(backing_store)));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(
+        k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store)));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_slice_unref(k);
   grpc_slice_unref(v);
   track_counters.Finish(state);
@@ -190,14 +186,12 @@
   gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
   gpr_slice v = grpc_slice_from_static_string("value");
   char backing_store[sizeof(grpc_mdelem_data)];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(
-        &exec_ctx,
-        grpc_mdelem_create(&exec_ctx, k, v,
-                           reinterpret_cast<grpc_mdelem_data*>(backing_store)));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(
+        k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store)));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_slice_unref(k);
   track_counters.Finish(state);
 }
@@ -207,11 +201,11 @@
   TrackCounters track_counters;
   gpr_slice k = GRPC_MDSTR_STATUS;
   gpr_slice v = GRPC_MDSTR_200;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_slice_unref(k);
   track_counters.Finish(state);
 }
@@ -222,11 +216,11 @@
   TrackCounters track_counters;
   gpr_slice k = GRPC_MDSTR_STATUS;
   gpr_slice v = GRPC_MDSTR_GZIP;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, grpc_mdelem_create(&exec_ctx, k, v, nullptr));
+    GRPC_MDELEM_UNREF(grpc_mdelem_create(k, v, nullptr));
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+
   grpc_slice_unref(k);
   track_counters.Finish(state);
 }
@@ -235,16 +229,15 @@
 static void BM_MetadataRefUnrefExternal(benchmark::State& state) {
   TrackCounters track_counters;
   char backing_store[sizeof(grpc_mdelem_data)];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_mdelem el =
-      grpc_mdelem_create(&exec_ctx, grpc_slice_from_static_string("a"),
-                         grpc_slice_from_static_string("b"),
-                         reinterpret_cast<grpc_mdelem_data*>(backing_store));
+  grpc_core::ExecCtx exec_ctx;
+  grpc_mdelem el = grpc_mdelem_create(
+      grpc_slice_from_static_string("a"), grpc_slice_from_static_string("b"),
+      reinterpret_cast<grpc_mdelem_data*>(backing_store));
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, GRPC_MDELEM_REF(el));
+    GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, el);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(el);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_MetadataRefUnrefExternal);
@@ -252,47 +245,47 @@
 static void BM_MetadataRefUnrefInterned(benchmark::State& state) {
   TrackCounters track_counters;
   char backing_store[sizeof(grpc_mdelem_data)];
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_slice k = grpc_slice_intern(grpc_slice_from_static_string("key"));
   gpr_slice v = grpc_slice_intern(grpc_slice_from_static_string("value"));
   grpc_mdelem el = grpc_mdelem_create(
-      &exec_ctx, k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store));
+      k, v, reinterpret_cast<grpc_mdelem_data*>(backing_store));
   grpc_slice_unref(k);
   grpc_slice_unref(v);
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, GRPC_MDELEM_REF(el));
+    GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, el);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(el);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_MetadataRefUnrefInterned);
 
 static void BM_MetadataRefUnrefAllocated(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem el =
-      grpc_mdelem_create(&exec_ctx, grpc_slice_from_static_string("a"),
+      grpc_mdelem_create(grpc_slice_from_static_string("a"),
                          grpc_slice_from_static_string("b"), nullptr);
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, GRPC_MDELEM_REF(el));
+    GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, el);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(el);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_MetadataRefUnrefAllocated);
 
 static void BM_MetadataRefUnrefStatic(benchmark::State& state) {
   TrackCounters track_counters;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_mdelem el =
-      grpc_mdelem_create(&exec_ctx, GRPC_MDSTR_STATUS, GRPC_MDSTR_200, nullptr);
+      grpc_mdelem_create(GRPC_MDSTR_STATUS, GRPC_MDSTR_200, nullptr);
   while (state.KeepRunning()) {
-    GRPC_MDELEM_UNREF(&exec_ctx, GRPC_MDELEM_REF(el));
+    GRPC_MDELEM_UNREF(GRPC_MDELEM_REF(el));
   }
-  GRPC_MDELEM_UNREF(&exec_ctx, el);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_MDELEM_UNREF(el);
+
   track_counters.Finish(state);
 }
 BENCHMARK(BM_MetadataRefUnrefStatic);
diff --git a/test/cpp/microbenchmarks/bm_pollset.cc b/test/cpp/microbenchmarks/bm_pollset.cc
index 4da7969..d9d5164 100644
--- a/test/cpp/microbenchmarks/bm_pollset.cc
+++ b/test/cpp/microbenchmarks/bm_pollset.cc
@@ -41,8 +41,8 @@
 
 auto& force_library_initialization = Library::get();
 
-static void shutdown_ps(grpc_exec_ctx* exec_ctx, void* ps, grpc_error* error) {
-  grpc_pollset_destroy(exec_ctx, static_cast<grpc_pollset*>(ps));
+static void shutdown_ps(void* ps, grpc_error* error) {
+  grpc_pollset_destroy(static_cast<grpc_pollset*>(ps));
 }
 
 static void BM_CreateDestroyPollset(benchmark::State& state) {
@@ -50,7 +50,7 @@
   size_t ps_sz = grpc_pollset_size();
   grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_malloc(ps_sz));
   gpr_mu* mu;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_closure shutdown_ps_closure;
   GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
                     grpc_schedule_on_exec_ctx);
@@ -58,11 +58,11 @@
     memset(ps, 0, ps_sz);
     grpc_pollset_init(ps, &mu);
     gpr_mu_lock(mu);
-    grpc_pollset_shutdown(&exec_ctx, ps, &shutdown_ps_closure);
+    grpc_pollset_shutdown(ps, &shutdown_ps_closure);
     gpr_mu_unlock(mu);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(ps);
   track_counters.Finish(state);
 }
@@ -114,17 +114,17 @@
   grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_zalloc(ps_sz));
   gpr_mu* mu;
   grpc_pollset_init(ps, &mu);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   gpr_mu_lock(mu);
   while (state.KeepRunning()) {
-    GRPC_ERROR_UNREF(grpc_pollset_work(&exec_ctx, ps, nullptr, 0));
+    GRPC_ERROR_UNREF(grpc_pollset_work(ps, nullptr, 0));
   }
   grpc_closure shutdown_ps_closure;
   GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, ps, &shutdown_ps_closure);
+  grpc_pollset_shutdown(ps, &shutdown_ps_closure);
   gpr_mu_unlock(mu);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(ps);
   track_counters.Finish(state);
 }
@@ -136,24 +136,23 @@
   grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_zalloc(ps_sz));
   gpr_mu* mu;
   grpc_pollset_init(ps, &mu);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_wakeup_fd wakeup_fd;
   GPR_ASSERT(
       GRPC_LOG_IF_ERROR("wakeup_fd_init", grpc_wakeup_fd_init(&wakeup_fd)));
   grpc_fd* fd = grpc_fd_create(wakeup_fd.read_fd, "xxx");
   while (state.KeepRunning()) {
-    grpc_pollset_add_fd(&exec_ctx, ps, fd);
-    grpc_exec_ctx_flush(&exec_ctx);
+    grpc_pollset_add_fd(ps, fd);
+    grpc_core::ExecCtx::Get()->Flush();
   }
-  grpc_fd_orphan(&exec_ctx, fd, nullptr, nullptr, false /* already_closed */,
-                 "xxx");
+  grpc_fd_orphan(fd, nullptr, nullptr, false /* already_closed */, "xxx");
   grpc_closure shutdown_ps_closure;
   GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
                     grpc_schedule_on_exec_ctx);
   gpr_mu_lock(mu);
-  grpc_pollset_shutdown(&exec_ctx, ps, &shutdown_ps_closure);
+  grpc_pollset_shutdown(ps, &shutdown_ps_closure);
   gpr_mu_unlock(mu);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   gpr_free(ps);
   track_counters.Finish(state);
 }
@@ -170,7 +169,7 @@
     C(F f, grpc_closure_scheduler* scheduler) : f_(f) {
       GRPC_CLOSURE_INIT(this, C::cbfn, this, scheduler);
     }
-    static void cbfn(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {
+    static void cbfn(void* arg, grpc_error* error) {
       C* p = static_cast<C*>(arg);
       p->f_();
     }
@@ -219,11 +218,11 @@
   grpc_pollset* ps = static_cast<grpc_pollset*>(gpr_zalloc(ps_sz));
   gpr_mu* mu;
   grpc_pollset_init(ps, &mu);
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   grpc_wakeup_fd wakeup_fd;
   GRPC_ERROR_UNREF(grpc_wakeup_fd_init(&wakeup_fd));
   grpc_fd* wakeup = grpc_fd_create(wakeup_fd.read_fd, "wakeup_read");
-  grpc_pollset_add_fd(&exec_ctx, ps, wakeup);
+  grpc_pollset_add_fd(ps, wakeup);
   bool done = false;
   Closure* continue_closure = MakeClosure(
       [&]() {
@@ -233,25 +232,23 @@
           return;
         }
         GRPC_ERROR_UNREF(grpc_wakeup_fd_wakeup(&wakeup_fd));
-        grpc_fd_notify_on_read(&exec_ctx, wakeup, continue_closure);
+        grpc_fd_notify_on_read(wakeup, continue_closure);
       },
       grpc_schedule_on_exec_ctx);
   GRPC_ERROR_UNREF(grpc_wakeup_fd_wakeup(&wakeup_fd));
-  grpc_fd_notify_on_read(&exec_ctx, wakeup, continue_closure);
+  grpc_fd_notify_on_read(wakeup, continue_closure);
   gpr_mu_lock(mu);
   while (!done) {
-    GRPC_ERROR_UNREF(
-        grpc_pollset_work(&exec_ctx, ps, nullptr, GRPC_MILLIS_INF_FUTURE));
+    GRPC_ERROR_UNREF(grpc_pollset_work(ps, nullptr, GRPC_MILLIS_INF_FUTURE));
   }
-  grpc_fd_orphan(&exec_ctx, wakeup, nullptr, nullptr,
-                 false /* already_closed */, "done");
+  grpc_fd_orphan(wakeup, nullptr, nullptr, false /* already_closed */, "done");
   wakeup_fd.read_fd = 0;
   grpc_closure shutdown_ps_closure;
   GRPC_CLOSURE_INIT(&shutdown_ps_closure, shutdown_ps, ps,
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(&exec_ctx, ps, &shutdown_ps_closure);
+  grpc_pollset_shutdown(ps, &shutdown_ps_closure);
   gpr_mu_unlock(mu);
-  grpc_exec_ctx_finish(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   grpc_wakeup_fd_destroy(&wakeup_fd);
   gpr_free(ps);
   track_counters.Finish(state);
diff --git a/test/cpp/microbenchmarks/fullstack_fixtures.h b/test/cpp/microbenchmarks/fullstack_fixtures.h
index 7e20843..d1ede75 100644
--- a/test/cpp/microbenchmarks/fullstack_fixtures.h
+++ b/test/cpp/microbenchmarks/fullstack_fixtures.h
@@ -166,7 +166,7 @@
     fixture_configuration.ApplyCommonServerBuilderConfig(&b);
     server_ = b.BuildAndStart();
 
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
 
     /* add server endpoint to server_
      * */
@@ -174,20 +174,19 @@
       const grpc_channel_args* server_args =
           grpc_server_get_channel_args(server_->c_server());
       server_transport_ = grpc_create_chttp2_transport(
-          &exec_ctx, server_args, endpoints.server, false /* is_client */);
+          server_args, endpoints.server, false /* is_client */);
 
       grpc_pollset** pollsets;
       size_t num_pollsets = 0;
       grpc_server_get_pollsets(server_->c_server(), &pollsets, &num_pollsets);
 
       for (size_t i = 0; i < num_pollsets; i++) {
-        grpc_endpoint_add_to_pollset(&exec_ctx, endpoints.server, pollsets[i]);
+        grpc_endpoint_add_to_pollset(endpoints.server, pollsets[i]);
       }
 
-      grpc_server_setup_transport(&exec_ctx, server_->c_server(),
-                                  server_transport_, nullptr, server_args);
-      grpc_chttp2_transport_start_reading(&exec_ctx, server_transport_, nullptr,
-                                          nullptr);
+      grpc_server_setup_transport(server_->c_server(), server_transport_,
+                                  nullptr, server_args);
+      grpc_chttp2_transport_start_reading(server_transport_, nullptr, nullptr);
     }
 
     /* create channel */
@@ -197,19 +196,15 @@
       fixture_configuration.ApplyCommonChannelArguments(&args);
 
       grpc_channel_args c_args = args.c_channel_args();
-      client_transport_ = grpc_create_chttp2_transport(&exec_ctx, &c_args,
-                                                       endpoints.client, true);
+      client_transport_ =
+          grpc_create_chttp2_transport(&c_args, endpoints.client, true);
       GPR_ASSERT(client_transport_);
-      grpc_channel* channel =
-          grpc_channel_create(&exec_ctx, "target", &c_args,
-                              GRPC_CLIENT_DIRECT_CHANNEL, client_transport_);
-      grpc_chttp2_transport_start_reading(&exec_ctx, client_transport_, nullptr,
-                                          nullptr);
+      grpc_channel* channel = grpc_channel_create(
+          "target", &c_args, GRPC_CLIENT_DIRECT_CHANNEL, client_transport_);
+      grpc_chttp2_transport_start_reading(client_transport_, nullptr, nullptr);
 
       channel_ = CreateChannelInternal("", channel);
     }
-
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 
   virtual ~EndpointPairFixture() {
diff --git a/test/cpp/microbenchmarks/helpers.h b/test/cpp/microbenchmarks/helpers.h
index 07be589..afa3e0f 100644
--- a/test/cpp/microbenchmarks/helpers.h
+++ b/test/cpp/microbenchmarks/helpers.h
@@ -54,10 +54,10 @@
 };
 
 #ifdef GPR_LOW_LEVEL_COUNTERS
-extern "C" gpr_atm gpr_mu_locks;
-extern "C" gpr_atm gpr_counter_atm_cas;
-extern "C" gpr_atm gpr_counter_atm_add;
-extern "C" gpr_atm gpr_now_call_count;
+extern gpr_atm gpr_mu_locks;
+extern gpr_atm gpr_counter_atm_cas;
+extern gpr_atm gpr_counter_atm_add;
+extern gpr_atm gpr_now_call_count;
 #endif
 
 class TrackCounters {
diff --git a/test/cpp/naming/resolver_component_test.cc b/test/cpp/naming/resolver_component_test.cc
index 6f1f0c4..3481d9d 100644
--- a/test/cpp/naming/resolver_component_test.cc
+++ b/test/cpp/naming/resolver_component_test.cc
@@ -149,33 +149,33 @@
   std::string expected_lb_policy;
 };
 
-void ArgsInit(grpc_exec_ctx* exec_ctx, ArgsStruct* args) {
+void ArgsInit(ArgsStruct* args) {
   gpr_event_init(&args->ev);
   args->pollset = (grpc_pollset*)gpr_zalloc(grpc_pollset_size());
   grpc_pollset_init(args->pollset, &args->mu);
   args->pollset_set = grpc_pollset_set_create();
-  grpc_pollset_set_add_pollset(exec_ctx, args->pollset_set, args->pollset);
+  grpc_pollset_set_add_pollset(args->pollset_set, args->pollset);
   args->lock = grpc_combiner_create();
   gpr_atm_rel_store(&args->done_atm, 0);
   args->channel_args = nullptr;
 }
 
-void DoNothing(grpc_exec_ctx* exec_ctx, void* arg, grpc_error* error) {}
+void DoNothing(void* arg, grpc_error* error) {}
 
-void ArgsFinish(grpc_exec_ctx* exec_ctx, ArgsStruct* args) {
+void ArgsFinish(ArgsStruct* args) {
   GPR_ASSERT(gpr_event_wait(&args->ev, TestDeadline()));
-  grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
-  grpc_pollset_set_destroy(exec_ctx, args->pollset_set);
+  grpc_pollset_set_del_pollset(args->pollset_set, args->pollset);
+  grpc_pollset_set_destroy(args->pollset_set);
   grpc_closure DoNothing_cb;
   GRPC_CLOSURE_INIT(&DoNothing_cb, DoNothing, nullptr,
                     grpc_schedule_on_exec_ctx);
-  grpc_pollset_shutdown(exec_ctx, args->pollset, &DoNothing_cb);
+  grpc_pollset_shutdown(args->pollset, &DoNothing_cb);
   // exec_ctx needs to be flushed before calling grpc_pollset_destroy()
-  grpc_channel_args_destroy(exec_ctx, args->channel_args);
-  grpc_exec_ctx_flush(exec_ctx);
-  grpc_pollset_destroy(exec_ctx, args->pollset);
+  grpc_channel_args_destroy(args->channel_args);
+  grpc_core::ExecCtx::Get()->Flush();
+  grpc_pollset_destroy(args->pollset);
   gpr_free(args->pollset);
-  GRPC_COMBINER_UNREF(exec_ctx, args->lock, NULL);
+  GRPC_COMBINER_UNREF(args->lock, nullptr);
 }
 
 gpr_timespec NSecondDeadline(int seconds) {
@@ -196,14 +196,13 @@
             time_left.tv_sec, time_left.tv_nsec);
     GPR_ASSERT(gpr_time_cmp(time_left, gpr_time_0(GPR_TIMESPAN)) >= 0);
     grpc_pollset_worker* worker = nullptr;
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
     gpr_mu_lock(args->mu);
     GRPC_LOG_IF_ERROR("pollset_work",
-                      grpc_pollset_work(&exec_ctx, args->pollset, &worker,
+                      grpc_pollset_work(args->pollset, &worker,
                                         grpc_timespec_to_millis_round_up(
                                             NSecondDeadline(1))));
     gpr_mu_unlock(args->mu);
-    grpc_exec_ctx_finish(&exec_ctx);
   }
   gpr_event_set(&args->ev, (void*)1);
 }
@@ -235,8 +234,7 @@
   }
 }
 
-void CheckResolverResultLocked(grpc_exec_ctx* exec_ctx, void* argsp,
-                               grpc_error* err) {
+void CheckResolverResultLocked(void* argsp, grpc_error* err) {
   ArgsStruct* args = (ArgsStruct*)argsp;
   grpc_channel_args* channel_args = args->channel_args;
   const grpc_arg* channel_arg =
@@ -272,15 +270,14 @@
   }
   gpr_atm_rel_store(&args->done_atm, 1);
   gpr_mu_lock(args->mu);
-  GRPC_LOG_IF_ERROR("pollset_kick",
-                    grpc_pollset_kick(exec_ctx, args->pollset, nullptr));
+  GRPC_LOG_IF_ERROR("pollset_kick", grpc_pollset_kick(args->pollset, nullptr));
   gpr_mu_unlock(args->mu);
 }
 
 TEST(ResolverComponentTest, TestResolvesRelevantRecords) {
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+  grpc_core::ExecCtx exec_ctx;
   ArgsStruct args;
-  ArgsInit(&exec_ctx, &args);
+  ArgsInit(&args);
   args.expected_addrs = ParseExpectedAddrs(FLAGS_expected_addrs);
   args.expected_service_config_string = FLAGS_expected_chosen_service_config;
   args.expected_lb_policy = FLAGS_expected_lb_policy;
@@ -290,19 +287,18 @@
                       FLAGS_local_dns_server_address.c_str(),
                       FLAGS_target_name.c_str()));
   // create resolver and resolve
-  grpc_resolver* resolver = grpc_resolver_create(&exec_ctx, whole_uri, nullptr,
-                                                 args.pollset_set, args.lock);
+  grpc_resolver* resolver =
+      grpc_resolver_create(whole_uri, nullptr, args.pollset_set, args.lock);
   gpr_free(whole_uri);
   grpc_closure on_resolver_result_changed;
   GRPC_CLOSURE_INIT(&on_resolver_result_changed, CheckResolverResultLocked,
                     (void*)&args, grpc_combiner_scheduler(args.lock));
-  grpc_resolver_next_locked(&exec_ctx, resolver, &args.channel_args,
+  grpc_resolver_next_locked(resolver, &args.channel_args,
                             &on_resolver_result_changed);
-  grpc_exec_ctx_flush(&exec_ctx);
+  grpc_core::ExecCtx::Get()->Flush();
   PollPollsetUntilRequestDone(&args);
-  GRPC_RESOLVER_UNREF(&exec_ctx, resolver, NULL);
-  ArgsFinish(&exec_ctx, &args);
-  grpc_exec_ctx_finish(&exec_ctx);
+  GRPC_RESOLVER_UNREF(resolver, nullptr);
+  ArgsFinish(&args);
 }
 
 }  // namespace
diff --git a/test/cpp/performance/writes_per_rpc_test.cc b/test/cpp/performance/writes_per_rpc_test.cc
index 1c6f44d..0b9dc83 100644
--- a/test/cpp/performance/writes_per_rpc_test.cc
+++ b/test/cpp/performance/writes_per_rpc_test.cc
@@ -82,27 +82,26 @@
     ApplyCommonServerBuilderConfig(&b);
     server_ = b.BuildAndStart();
 
-    grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
+    grpc_core::ExecCtx exec_ctx;
 
     /* add server endpoint to server_ */
     {
       const grpc_channel_args* server_args =
           grpc_server_get_channel_args(server_->c_server());
       grpc_transport* transport = grpc_create_chttp2_transport(
-          &exec_ctx, server_args, endpoints.server, false /* is_client */);
+          server_args, endpoints.server, false /* is_client */);
 
       grpc_pollset** pollsets;
       size_t num_pollsets = 0;
       grpc_server_get_pollsets(server_->c_server(), &pollsets, &num_pollsets);
 
       for (size_t i = 0; i < num_pollsets; i++) {
-        grpc_endpoint_add_to_pollset(&exec_ctx, endpoints.server, pollsets[i]);
+        grpc_endpoint_add_to_pollset(endpoints.server, pollsets[i]);
       }
 
-      grpc_server_setup_transport(&exec_ctx, server_->c_server(), transport,
-                                  nullptr, server_args);
-      grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr,
-                                          nullptr);
+      grpc_server_setup_transport(server_->c_server(), transport, nullptr,
+                                  server_args);
+      grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
     }
 
     /* create channel */
@@ -112,18 +111,15 @@
       ApplyCommonChannelArguments(&args);
 
       grpc_channel_args c_args = args.c_channel_args();
-      grpc_transport* transport = grpc_create_chttp2_transport(
-          &exec_ctx, &c_args, endpoints.client, true);
+      grpc_transport* transport =
+          grpc_create_chttp2_transport(&c_args, endpoints.client, true);
       GPR_ASSERT(transport);
       grpc_channel* channel = grpc_channel_create(
-          &exec_ctx, "target", &c_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
-      grpc_chttp2_transport_start_reading(&exec_ctx, transport, nullptr,
-                                          nullptr);
+          "target", &c_args, GRPC_CLIENT_DIRECT_CHANNEL, transport);
+      grpc_chttp2_transport_start_reading(transport, nullptr, nullptr);
 
       channel_ = CreateChannelInternal("", channel);
     }
-
-    grpc_exec_ctx_finish(&exec_ctx);
   }
 
   virtual ~EndpointPairFixture() {
diff --git a/test/cpp/qps/BUILD b/test/cpp/qps/BUILD
index 0d91d52..f1abb19 100644
--- a/test/cpp/qps/BUILD
+++ b/test/cpp/qps/BUILD
@@ -106,7 +106,7 @@
         "histogram.h",
         "stats.h",
     ],
-    deps = ["//:gpr"],
+    deps = ["//test/core/util:grpc_test_util"],
 )
 
 grpc_cc_test(
diff --git a/test/cpp/qps/client_async.cc b/test/cpp/qps/client_async.cc
index 0788821..7cf9d3e 100644
--- a/test/cpp/qps/client_async.cc
+++ b/test/cpp/qps/client_async.cc
@@ -280,6 +280,7 @@
         },
         &got_tag, &ok, gpr_inf_future(GPR_CLOCK_REALTIME))) {
       t->UpdateHistogram(entry_ptr);
+      entry = HistogramEntry();
       shutdown_mu->lock();
       ctx = ProcessTag(thread_idx, got_tag);
       if (ctx == nullptr) {
diff --git a/test/cpp/qps/client_sync.cc b/test/cpp/qps/client_sync.cc
index 9f20b14..82a3f00 100644
--- a/test/cpp/qps/client_sync.cc
+++ b/test/cpp/qps/client_sync.cc
@@ -60,21 +60,20 @@
     SetupLoadTest(config, num_threads_);
   }
 
-  virtual ~SynchronousClient(){};
+  virtual ~SynchronousClient() {}
 
-  virtual void InitThreadFuncImpl(size_t thread_idx) = 0;
+  virtual bool InitThreadFuncImpl(size_t thread_idx) = 0;
   virtual bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) = 0;
 
   void ThreadFunc(size_t thread_idx, Thread* t) override {
-    InitThreadFuncImpl(thread_idx);
+    if (!InitThreadFuncImpl(thread_idx)) {
+      return;
+    }
     for (;;) {
       // run the loop body
       HistogramEntry entry;
       const bool thread_still_ok = ThreadFuncImpl(&entry, thread_idx);
       t->UpdateHistogram(&entry);
-      if (!thread_still_ok) {
-        gpr_log(GPR_ERROR, "Finishing client thread due to RPC error");
-      }
       if (!thread_still_ok || ThreadCompleted()) {
         return;
       }
@@ -109,9 +108,6 @@
 
   size_t num_threads_;
   std::vector<SimpleResponse> responses_;
-
- private:
-  void DestroyMultithreading() override final { EndThreads(); }
 };
 
 class SynchronousUnaryClient final : public SynchronousClient {
@@ -122,7 +118,7 @@
   }
   ~SynchronousUnaryClient() {}
 
-  void InitThreadFuncImpl(size_t thread_idx) override {}
+  bool InitThreadFuncImpl(size_t thread_idx) override { return true; }
 
   bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
     if (!WaitToIssue(thread_idx)) {
@@ -140,6 +136,9 @@
     entry->set_status(s.error_code());
     return true;
   }
+
+ private:
+  void DestroyMultithreading() override final { EndThreads(); }
 };
 
 template <class StreamType>
@@ -149,31 +148,30 @@
       : SynchronousClient(config),
         context_(num_threads_),
         stream_(num_threads_),
+        stream_mu_(num_threads_),
+        shutdown_(num_threads_),
         messages_per_stream_(config.messages_per_stream()),
         messages_issued_(num_threads_) {
     StartThreads(num_threads_);
   }
   virtual ~SynchronousStreamingClient() {
-    std::vector<std::thread> cleanup_threads;
-    for (size_t i = 0; i < num_threads_; i++) {
-      cleanup_threads.emplace_back([this, i]() {
-        auto stream = &stream_[i];
-        if (*stream) {
-          // forcibly cancel the streams, then finish
-          context_[i].TryCancel();
-          (*stream)->Finish().IgnoreError();
-          // don't log any error message on !ok since this was canceled
-        }
-      });
-    }
-    for (auto& th : cleanup_threads) {
-      th.join();
-    }
+    CleanupAllStreams([this](size_t thread_idx) {
+      // Don't log any kind of error since we may have canceled this
+      stream_[thread_idx]->Finish().IgnoreError();
+    });
   }
 
  protected:
   std::vector<grpc::ClientContext> context_;
   std::vector<std::unique_ptr<StreamType>> stream_;
+  // stream_mu_ is only needed when changing an element of stream_ or context_
+  std::vector<std::mutex> stream_mu_;
+  // use struct Bool rather than bool because vector<bool> is not concurrent
+  struct Bool {
+    bool val;
+    Bool() : val(false) {}
+  };
+  std::vector<Bool> shutdown_;
   const int messages_per_stream_;
   std::vector<int> messages_issued_;
 
@@ -182,12 +180,40 @@
     // don't set the value since the stream is failed and shouldn't be timed
     entry->set_status(s.error_code());
     if (!s.ok()) {
-      gpr_log(GPR_ERROR, "Stream %" PRIuPTR " received an error %s", thread_idx,
-              s.error_message().c_str());
+      std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+      if (!shutdown_[thread_idx].val) {
+        gpr_log(GPR_ERROR, "Stream %" PRIuPTR " received an error %s",
+                thread_idx, s.error_message().c_str());
+      }
     }
+    // Lock the stream_mu_ now because the client context could change
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
     context_[thread_idx].~ClientContext();
     new (&context_[thread_idx]) ClientContext();
   }
+
+  void CleanupAllStreams(std::function<void(size_t)> cleaner) {
+    std::vector<std::thread> cleanup_threads;
+    for (size_t i = 0; i < num_threads_; i++) {
+      cleanup_threads.emplace_back([this, i, cleaner] {
+        std::lock_guard<std::mutex> l(stream_mu_[i]);
+        shutdown_[i].val = true;
+        if (stream_[i]) {
+          cleaner(i);
+        }
+      });
+    }
+    for (auto& th : cleanup_threads) {
+      th.join();
+    }
+  }
+
+ private:
+  void DestroyMultithreading() override final {
+    CleanupAllStreams(
+        [this](size_t thread_idx) { context_[thread_idx].TryCancel(); });
+    EndThreads();
+  }
 };
 
 class SynchronousStreamingPingPongClient final
@@ -197,24 +223,21 @@
   SynchronousStreamingPingPongClient(const ClientConfig& config)
       : SynchronousStreamingClient(config) {}
   ~SynchronousStreamingPingPongClient() {
-    std::vector<std::thread> cleanup_threads;
-    for (size_t i = 0; i < num_threads_; i++) {
-      cleanup_threads.emplace_back([this, i]() {
-        auto stream = &stream_[i];
-        if (*stream) {
-          (*stream)->WritesDone();
-        }
-      });
-    }
-    for (auto& th : cleanup_threads) {
-      th.join();
-    }
+    CleanupAllStreams(
+        [this](size_t thread_idx) { stream_[thread_idx]->WritesDone(); });
   }
 
-  void InitThreadFuncImpl(size_t thread_idx) override {
+ private:
+  bool InitThreadFuncImpl(size_t thread_idx) override {
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+    } else {
+      return false;
+    }
     messages_issued_[thread_idx] = 0;
+    return true;
   }
 
   bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
@@ -239,7 +262,13 @@
     stream_[thread_idx]->WritesDone();
     FinishStream(entry, thread_idx);
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] = stub->StreamingCall(&context_[thread_idx]);
+    } else {
+      stream_[thread_idx].reset();
+      return false;
+    }
     messages_issued_[thread_idx] = 0;
     return true;
   }
@@ -251,25 +280,24 @@
   SynchronousStreamingFromClientClient(const ClientConfig& config)
       : SynchronousStreamingClient(config), last_issue_(num_threads_) {}
   ~SynchronousStreamingFromClientClient() {
-    std::vector<std::thread> cleanup_threads;
-    for (size_t i = 0; i < num_threads_; i++) {
-      cleanup_threads.emplace_back([this, i]() {
-        auto stream = &stream_[i];
-        if (*stream) {
-          (*stream)->WritesDone();
-        }
-      });
-    }
-    for (auto& th : cleanup_threads) {
-      th.join();
-    }
+    CleanupAllStreams(
+        [this](size_t thread_idx) { stream_[thread_idx]->WritesDone(); });
   }
 
-  void InitThreadFuncImpl(size_t thread_idx) override {
+ private:
+  std::vector<double> last_issue_;
+
+  bool InitThreadFuncImpl(size_t thread_idx) override {
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] = stub->StreamingFromClient(&context_[thread_idx],
-                                                    &responses_[thread_idx]);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] = stub->StreamingFromClient(&context_[thread_idx],
+                                                      &responses_[thread_idx]);
+    } else {
+      return false;
+    }
     last_issue_[thread_idx] = UsageTimer::Now();
+    return true;
   }
 
   bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
@@ -287,13 +315,16 @@
     stream_[thread_idx]->WritesDone();
     FinishStream(entry, thread_idx);
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] = stub->StreamingFromClient(&context_[thread_idx],
-                                                    &responses_[thread_idx]);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] = stub->StreamingFromClient(&context_[thread_idx],
+                                                      &responses_[thread_idx]);
+    } else {
+      stream_[thread_idx].reset();
+      return false;
+    }
     return true;
   }
-
- private:
-  std::vector<double> last_issue_;
 };
 
 class SynchronousStreamingFromServerClient final
@@ -301,12 +332,24 @@
  public:
   SynchronousStreamingFromServerClient(const ClientConfig& config)
       : SynchronousStreamingClient(config), last_recv_(num_threads_) {}
-  void InitThreadFuncImpl(size_t thread_idx) override {
+  ~SynchronousStreamingFromServerClient() {}
+
+ private:
+  std::vector<double> last_recv_;
+
+  bool InitThreadFuncImpl(size_t thread_idx) override {
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] =
-        stub->StreamingFromServer(&context_[thread_idx], request_);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] =
+          stub->StreamingFromServer(&context_[thread_idx], request_);
+    } else {
+      return false;
+    }
     last_recv_[thread_idx] = UsageTimer::Now();
+    return true;
   }
+
   bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
     GPR_TIMER_SCOPE("SynchronousStreamingFromServerClient::ThreadFunc", 0);
     if (stream_[thread_idx]->Read(&responses_[thread_idx])) {
@@ -317,13 +360,16 @@
     }
     FinishStream(entry, thread_idx);
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] =
-        stub->StreamingFromServer(&context_[thread_idx], request_);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] =
+          stub->StreamingFromServer(&context_[thread_idx], request_);
+    } else {
+      stream_[thread_idx].reset();
+      return false;
+    }
     return true;
   }
-
- private:
-  std::vector<double> last_recv_;
 };
 
 class SynchronousStreamingBothWaysClient final
@@ -333,24 +379,22 @@
   SynchronousStreamingBothWaysClient(const ClientConfig& config)
       : SynchronousStreamingClient(config) {}
   ~SynchronousStreamingBothWaysClient() {
-    std::vector<std::thread> cleanup_threads;
-    for (size_t i = 0; i < num_threads_; i++) {
-      cleanup_threads.emplace_back([this, i]() {
-        auto stream = &stream_[i];
-        if (*stream) {
-          (*stream)->WritesDone();
-        }
-      });
-    }
-    for (auto& th : cleanup_threads) {
-      th.join();
-    }
+    CleanupAllStreams(
+        [this](size_t thread_idx) { stream_[thread_idx]->WritesDone(); });
   }
 
-  void InitThreadFuncImpl(size_t thread_idx) override {
+ private:
+  bool InitThreadFuncImpl(size_t thread_idx) override {
     auto* stub = channels_[thread_idx % channels_.size()].get_stub();
-    stream_[thread_idx] = stub->StreamingBothWays(&context_[thread_idx]);
+    std::lock_guard<std::mutex> l(stream_mu_[thread_idx]);
+    if (!shutdown_[thread_idx].val) {
+      stream_[thread_idx] = stub->StreamingBothWays(&context_[thread_idx]);
+    } else {
+      return false;
+    }
+    return true;
   }
+
   bool ThreadFuncImpl(HistogramEntry* entry, size_t thread_idx) override {
     // TODO (vjpai): Do this
     return true;
diff --git a/test/cpp/qps/gen_build_yaml.py b/test/cpp/qps/gen_build_yaml.py
index 1ef8f65..bd40d0a 100755
--- a/test/cpp/qps/gen_build_yaml.py
+++ b/test/cpp/qps/gen_build_yaml.py
@@ -101,24 +101,26 @@
     }
     for scenario_json in scenario_config.CXXLanguage().scenarios()
     if 'inproc' in scenario_json.get('CATEGORIES', [])
-  ] + [
-    {
-      'name': 'json_run_localhost',
-      'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
-      'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
-      'ci_platforms': ['linux'],
-      'platforms': ['linux'],
-      'flaky': False,
-      'language': 'c++',
-      'boringssl': True,
-      'defaults': 'boringssl',
-      'cpu_cost': guess_cpu(scenario_json, True),
-      'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
-      'timeout_seconds': 10*60,
-      'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
-      'auto_timeout_scaling': False
-   }
-    for scenario_json in scenario_config.CXXLanguage().scenarios()
-    if 'scalable' in scenario_json.get('CATEGORIES', [])
   ]
+  # Disabled until https://github.com/grpc/grpc/issues/13122 is resolved.
+  # + [
+  #   {
+  #     'name': 'json_run_localhost',
+  #     'shortname': 'json_run_localhost:%s_low_thread_count' % scenario_json['name'],
+  #     'args': ['--scenarios_json', _scenario_json_string(scenario_json, True)],
+  #     'ci_platforms': ['linux'],
+  #     'platforms': ['linux'],
+  #     'flaky': False,
+  #     'language': 'c++',
+  #     'boringssl': True,
+  #     'defaults': 'boringssl',
+  #     'cpu_cost': guess_cpu(scenario_json, True),
+  #     'exclude_configs': sorted(c for c in configs_from_yaml if c not in ('tsan', 'asan')),
+  #     'timeout_seconds': 10*60,
+  #     'excluded_poll_engines': scenario_json.get('EXCLUDED_POLL_ENGINES', []),
+  #     'auto_timeout_scaling': False
+  #  }
+  #   for scenario_json in scenario_config.CXXLanguage().scenarios()
+  #   if 'scalable' in scenario_json.get('CATEGORIES', [])
+  # ]
 })
diff --git a/test/cpp/qps/histogram.h b/test/cpp/qps/histogram.h
index e31d5d7..ba72b5b 100644
--- a/test/cpp/qps/histogram.h
+++ b/test/cpp/qps/histogram.h
@@ -19,8 +19,8 @@
 #ifndef TEST_QPS_HISTOGRAM_H
 #define TEST_QPS_HISTOGRAM_H
 
-#include <grpc/support/histogram.h>
 #include "src/proto/grpc/testing/stats.pb.h"
+#include "test/core/util/histogram.h"
 
 namespace grpc {
 namespace testing {
@@ -29,36 +29,36 @@
  public:
   // TODO: look into making histogram params not hardcoded for C++
   Histogram()
-      : impl_(gpr_histogram_create(default_resolution(),
-                                   default_max_possible())) {}
+      : impl_(grpc_histogram_create(default_resolution(),
+                                    default_max_possible())) {}
   ~Histogram() {
-    if (impl_) gpr_histogram_destroy(impl_);
+    if (impl_) grpc_histogram_destroy(impl_);
   }
   Histogram(Histogram&& other) : impl_(other.impl_) { other.impl_ = nullptr; }
 
-  void Merge(const Histogram& h) { gpr_histogram_merge(impl_, h.impl_); }
-  void Add(double value) { gpr_histogram_add(impl_, value); }
+  void Merge(const Histogram& h) { grpc_histogram_merge(impl_, h.impl_); }
+  void Add(double value) { grpc_histogram_add(impl_, value); }
   double Percentile(double pctile) const {
-    return gpr_histogram_percentile(impl_, pctile);
+    return grpc_histogram_percentile(impl_, pctile);
   }
-  double Count() const { return gpr_histogram_count(impl_); }
+  double Count() const { return grpc_histogram_count(impl_); }
   void Swap(Histogram* other) { std::swap(impl_, other->impl_); }
   void FillProto(HistogramData* p) {
     size_t n;
-    const auto* data = gpr_histogram_get_contents(impl_, &n);
+    const auto* data = grpc_histogram_get_contents(impl_, &n);
     for (size_t i = 0; i < n; i++) {
       p->add_bucket(data[i]);
     }
-    p->set_min_seen(gpr_histogram_minimum(impl_));
-    p->set_max_seen(gpr_histogram_maximum(impl_));
-    p->set_sum(gpr_histogram_sum(impl_));
-    p->set_sum_of_squares(gpr_histogram_sum_of_squares(impl_));
-    p->set_count(gpr_histogram_count(impl_));
+    p->set_min_seen(grpc_histogram_minimum(impl_));
+    p->set_max_seen(grpc_histogram_maximum(impl_));
+    p->set_sum(grpc_histogram_sum(impl_));
+    p->set_sum_of_squares(grpc_histogram_sum_of_squares(impl_));
+    p->set_count(grpc_histogram_count(impl_));
   }
   void MergeProto(const HistogramData& p) {
-    gpr_histogram_merge_contents(impl_, &*p.bucket().begin(), p.bucket_size(),
-                                 p.min_seen(), p.max_seen(), p.sum(),
-                                 p.sum_of_squares(), p.count());
+    grpc_histogram_merge_contents(impl_, &*p.bucket().begin(), p.bucket_size(),
+                                  p.min_seen(), p.max_seen(), p.sum(),
+                                  p.sum_of_squares(), p.count());
   }
 
   static double default_resolution() { return 0.01; }
@@ -68,7 +68,7 @@
   Histogram(const Histogram&);
   Histogram& operator=(const Histogram&);
 
-  gpr_histogram* impl_;
+  grpc_histogram* impl_;
 };
 }  // namespace testing
 }  // namespace grpc
diff --git a/test/cpp/qps/qps_interarrival_test.cc b/test/cpp/qps/qps_interarrival_test.cc
index 461bf62..625b7db 100644
--- a/test/cpp/qps/qps_interarrival_test.cc
+++ b/test/cpp/qps/qps_interarrival_test.cc
@@ -20,7 +20,7 @@
 #include <iostream>
 
 // Use the C histogram rather than C++ to avoid depending on proto
-#include <grpc/support/histogram.h>
+#include "test/core/util/histogram.h"
 
 #include "test/cpp/qps/interarrival.h"
 #include "test/cpp/util/test_config.h"
@@ -31,21 +31,21 @@
 static void RunTest(RandomDistInterface&& r, int threads, std::string title) {
   InterarrivalTimer timer;
   timer.init(r, threads);
-  gpr_histogram* h(gpr_histogram_create(0.01, 60e9));
+  grpc_histogram* h(grpc_histogram_create(0.01, 60e9));
 
   for (int i = 0; i < 10000000; i++) {
     for (int j = 0; j < threads; j++) {
-      gpr_histogram_add(h, timer.next(j));
+      grpc_histogram_add(h, timer.next(j));
     }
   }
 
   std::cout << title << " Distribution" << std::endl;
   std::cout << "Value, Percentile" << std::endl;
   for (double pct = 0.0; pct < 100.0; pct += 1.0) {
-    std::cout << gpr_histogram_percentile(h, pct) << "," << pct << std::endl;
+    std::cout << grpc_histogram_percentile(h, pct) << "," << pct << std::endl;
   }
 
-  gpr_histogram_destroy(h);
+  grpc_histogram_destroy(h);
 }
 
 using grpc::testing::ExpDist;
diff --git a/test/cpp/qps/qps_worker.cc b/test/cpp/qps/qps_worker.cc
index c288b03..4c9ab0e 100644
--- a/test/cpp/qps/qps_worker.cc
+++ b/test/cpp/qps/qps_worker.cc
@@ -32,12 +32,12 @@
 #include <grpc/grpc.h>
 #include <grpc/support/alloc.h>
 #include <grpc/support/cpu.h>
-#include <grpc/support/histogram.h>
 #include <grpc/support/host_port.h>
 #include <grpc/support/log.h>
 
 #include "src/proto/grpc/testing/services.pb.h"
 #include "test/core/util/grpc_profiler.h"
+#include "test/core/util/histogram.h"
 #include "test/cpp/qps/client.h"
 #include "test/cpp/qps/server.h"
 #include "test/cpp/util/create_test_channel.h"
diff --git a/test/cpp/server/server_builder_test.cc b/test/cpp/server/server_builder_test.cc
index d18459c..694ce54 100644
--- a/test/cpp/server/server_builder_test.cc
+++ b/test/cpp/server/server_builder_test.cc
@@ -22,6 +22,8 @@
 #include <grpc++/server.h>
 #include <grpc++/server_builder.h>
 
+#include <grpc/grpc.h>
+
 #include "src/proto/grpc/testing/echo.grpc.pb.h"
 #include "test/core/util/port.h"
 
@@ -77,5 +79,8 @@
 
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
+  grpc_init();
+  int ret = RUN_ALL_TESTS();
+  grpc_shutdown();
+  return ret;
 }
diff --git a/test/cpp/util/byte_buffer_test.cc b/test/cpp/util/byte_buffer_test.cc
index 8fb51bc..d603b28 100644
--- a/test/cpp/util/byte_buffer_test.cc
+++ b/test/cpp/util/byte_buffer_test.cc
@@ -22,6 +22,7 @@
 #include <vector>
 
 #include <grpc++/support/slice.h>
+#include <grpc/grpc.h>
 #include <grpc/slice.h>
 #include <gtest/gtest.h>
 
@@ -109,5 +110,8 @@
 
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
+  grpc_init();
+  int ret = RUN_ALL_TESTS();
+  grpc_shutdown();
+  return ret;
 }
diff --git a/test/cpp/util/cli_call.cc b/test/cpp/util/cli_call.cc
index c3220ef..4f1a20c 100644
--- a/test/cpp/util/cli_call.cc
+++ b/test/cpp/util/cli_call.cc
@@ -126,7 +126,7 @@
   call_->Write(send_buffer, tag(2));
   write_done_ = false;
   while (!write_done_) {
-    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&write_mu_);
 }
@@ -136,7 +136,7 @@
   call_->WritesDone(tag(4));
   write_done_ = false;
   while (!write_done_) {
-    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_REALTIME));
+    gpr_cv_wait(&write_cv_, &write_mu_, gpr_inf_future(GPR_CLOCK_MONOTONIC));
   }
   gpr_mu_unlock(&write_mu_);
 }
diff --git a/test/cpp/util/cli_credentials.cc b/test/cpp/util/cli_credentials.cc
index f1f43f8..aa4eafb 100644
--- a/test/cpp/util/cli_credentials.cc
+++ b/test/cpp/util/cli_credentials.cc
@@ -22,27 +22,43 @@
 
 DEFINE_bool(enable_ssl, false, "Whether to use ssl/tls.");
 DEFINE_bool(use_auth, false, "Whether to create default google credentials.");
+DEFINE_string(
+    access_token, "",
+    "The access token that will be sent to the server to authenticate RPCs.");
 
 namespace grpc {
 namespace testing {
 
 std::shared_ptr<grpc::ChannelCredentials> CliCredentials::GetCredentials()
     const {
-  if (!FLAGS_enable_ssl) {
-    return grpc::InsecureChannelCredentials();
-  } else {
+  if (!FLAGS_access_token.empty()) {
     if (FLAGS_use_auth) {
-      return grpc::GoogleDefaultCredentials();
-    } else {
-      return grpc::SslCredentials(grpc::SslCredentialsOptions());
+      fprintf(stderr,
+              "warning: use_auth is ignored when access_token is provided.");
     }
+
+    return grpc::CompositeChannelCredentials(
+        grpc::SslCredentials(grpc::SslCredentialsOptions()),
+        grpc::AccessTokenCredentials(FLAGS_access_token));
   }
+
+  if (FLAGS_use_auth) {
+    return grpc::GoogleDefaultCredentials();
+  }
+
+  if (FLAGS_enable_ssl) {
+    return grpc::SslCredentials(grpc::SslCredentialsOptions());
+  }
+
+  return grpc::InsecureChannelCredentials();
 }
 
 const grpc::string CliCredentials::GetCredentialUsage() const {
   return "    --enable_ssl             ; Set whether to use tls\n"
          "    --use_auth               ; Set whether to create default google"
-         " credentials\n";
+         " credentials\n"
+         "    --access_token           ; Set the access token in metadata,"
+         " overrides --use_auth\n";
 }
 }  // namespace testing
 }  // namespace grpc
diff --git a/test/cpp/util/grpc_tool.cc b/test/cpp/util/grpc_tool.cc
index a6d08cd..30c43b2 100644
--- a/test/cpp/util/grpc_tool.cc
+++ b/test/cpp/util/grpc_tool.cc
@@ -124,13 +124,32 @@
     return;
   }
   std::vector<grpc::string> fields;
-  const char* delim = ":";
-  size_t cur, next = -1;
-  do {
-    cur = next + 1;
-    next = FLAGS_metadata.find_first_of(delim, cur);
-    fields.push_back(FLAGS_metadata.substr(cur, next - cur));
-  } while (next != grpc::string::npos);
+  const char delim = ':';
+  const char escape = '\\';
+  size_t cur = -1;
+  std::stringstream ss;
+  while (++cur < FLAGS_metadata.length()) {
+    switch (FLAGS_metadata.at(cur)) {
+      case escape:
+        if (cur < FLAGS_metadata.length() - 1) {
+          char c = FLAGS_metadata.at(++cur);
+          if (c == delim || c == escape) {
+            ss << c;
+            continue;
+          }
+        }
+        fprintf(stderr, "Failed to parse metadata flag.\n");
+        exit(1);
+      case delim:
+        fields.push_back(ss.str());
+        ss.str("");
+        ss.clear();
+        break;
+      default:
+        ss << FLAGS_metadata.at(cur);
+    }
+  }
+  fields.push_back(ss.str());
   if (fields.size() % 2) {
     fprintf(stderr, "Failed to parse metadata flag.\n");
     exit(1);
diff --git a/test/cpp/util/grpc_tool_test.cc b/test/cpp/util/grpc_tool_test.cc
index 1c07b2a..0b599f4 100644
--- a/test/cpp/util/grpc_tool_test.cc
+++ b/test/cpp/util/grpc_tool_test.cc
@@ -85,6 +85,8 @@
 DECLARE_bool(binary_output);
 DECLARE_bool(l);
 DECLARE_bool(batch);
+DECLARE_string(metadata);
+DECLARE_string(protofiles);
 
 namespace {
 
@@ -618,6 +620,8 @@
   // Expected output: ECHO_RESPONSE_MESSAGE
   EXPECT_TRUE(0 == strcmp(output_stream.str().c_str(), ECHO_RESPONSE_MESSAGE));
 
+  FLAGS_binary_input = false;
+  FLAGS_binary_output = false;
   ShutdownServer();
 }
 
@@ -652,6 +656,84 @@
   EXPECT_TRUE(0 == output_stream.tellp());
 }
 
+TEST_F(GrpcToolTest, CallCommandWithMetadata) {
+  // Test input "grpc_cli call localhost:<port> Echo "message: 'Hello'"
+  const grpc::string server_address = SetUpServer();
+  const char* argv[] = {"grpc_cli", "call", server_address.c_str(), "Echo",
+                        "message: 'Hello'"};
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key0:val0:key1:valq:key2:val2";
+    EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv,
+                                     TestCliCredentials(),
+                                     std::bind(PrintStream, &output_stream,
+                                               std::placeholders::_1)));
+    // Expected output: "message: \"Hello\""
+    EXPECT_TRUE(nullptr !=
+                strstr(output_stream.str().c_str(), "message: \"Hello\""));
+  }
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key:val\\:val";
+    EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv,
+                                     TestCliCredentials(),
+                                     std::bind(PrintStream, &output_stream,
+                                               std::placeholders::_1)));
+    // Expected output: "message: \"Hello\""
+    EXPECT_TRUE(nullptr !=
+                strstr(output_stream.str().c_str(), "message: \"Hello\""));
+  }
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key:val\\\\val";
+    EXPECT_TRUE(0 == GrpcToolMainLib(ArraySize(argv), argv,
+                                     TestCliCredentials(),
+                                     std::bind(PrintStream, &output_stream,
+                                               std::placeholders::_1)));
+    // Expected output: "message: \"Hello\""
+    EXPECT_TRUE(nullptr !=
+                strstr(output_stream.str().c_str(), "message: \"Hello\""));
+  }
+
+  FLAGS_metadata = "";
+  ShutdownServer();
+}
+
+TEST_F(GrpcToolTest, CallCommandWithBadMetadata) {
+  // Test input "grpc_cli call localhost:10000 Echo "message: 'Hello'"
+  const char* argv[] = {"grpc_cli", "call", "localhost:10000", "Echo",
+                        "message: 'Hello'"};
+  FLAGS_protofiles = "src/proto/grpc/testing/echo.proto";
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key0:val0:key1";
+    // Exit with 1
+    EXPECT_EXIT(
+        GrpcToolMainLib(
+            ArraySize(argv), argv, TestCliCredentials(),
+            std::bind(PrintStream, &output_stream, std::placeholders::_1)),
+        ::testing::ExitedWithCode(1), ".*Failed to parse metadata flag.*");
+  }
+
+  {
+    std::stringstream output_stream;
+    FLAGS_metadata = "key:val\\val";
+    // Exit with 1
+    EXPECT_EXIT(
+        GrpcToolMainLib(
+            ArraySize(argv), argv, TestCliCredentials(),
+            std::bind(PrintStream, &output_stream, std::placeholders::_1)),
+        ::testing::ExitedWithCode(1), ".*Failed to parse metadata flag.*");
+  }
+
+  FLAGS_metadata = "";
+  FLAGS_protofiles = "";
+}
+
 }  // namespace testing
 }  // namespace grpc
 
diff --git a/test/cpp/util/slice_test.cc b/test/cpp/util/slice_test.cc
index 8a8962d..c2e55f3 100644
--- a/test/cpp/util/slice_test.cc
+++ b/test/cpp/util/slice_test.cc
@@ -18,6 +18,7 @@
 
 #include <grpc++/support/slice.h>
 
+#include <grpc/grpc.h>
 #include <grpc/slice.h>
 #include <gtest/gtest.h>
 
@@ -127,5 +128,8 @@
 
 int main(int argc, char** argv) {
   ::testing::InitGoogleTest(&argc, argv);
-  return RUN_ALL_TESTS();
+  grpc_init();
+  int ret = RUN_ALL_TESTS();
+  grpc_shutdown();
+  return ret;
 }
diff --git a/test/distrib/cpp/run_distrib_test_cmake.bat b/test/distrib/cpp/run_distrib_test_cmake.bat
new file mode 100644
index 0000000..047846b
--- /dev/null
+++ b/test/distrib/cpp/run_distrib_test_cmake.bat
@@ -0,0 +1,75 @@
+@rem Copyright 2016 gRPC authors.
+@rem
+@rem Licensed under the Apache License, Version 2.0 (the "License");
+@rem you may not use this file except in compliance with the License.
+@rem You may obtain a copy of the License at
+@rem
+@rem     http://www.apache.org/licenses/LICENSE-2.0
+@rem
+@rem Unless required by applicable law or agreed to in writing, software
+@rem distributed under the License is distributed on an "AS IS" BASIS,
+@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+@rem See the License for the specific language governing permissions and
+@rem limitations under the License.
+
+@rem enter this directory
+cd /d %~dp0\..\..\..
+
+@rem TODO(jtattermusch): Kokoro has pre-installed protoc.exe in C:\Program Files\ProtoC and that directory
+@rem is on PATH. To avoid picking up the older version protoc.exe, we change the path to something non-existent.
+set PATH=%PATH:ProtoC=DontPickupProtoC%
+
+@rem Install into ./testinstall, but use absolute path and foward slashes
+set INSTALL_DIR=%cd:\=/%/testinstall
+
+@rem Download OpenSSL-Win32 originally installed from https://slproweb.com/products/Win32OpenSSL.html
+powershell -Command "(New-Object Net.WebClient).DownloadFile('https://storage.googleapis.com/grpc-testing.appspot.com/OpenSSL-Win32-1_1_0g.zip', 'OpenSSL-Win32.zip')"
+powershell -Command "Add-Type -Assembly 'System.IO.Compression.FileSystem'; [System.IO.Compression.ZipFile]::ExtractToDirectory('OpenSSL-Win32.zip', '.');"
+
+@rem set absolute path to OpenSSL with forward slashes
+set OPENSSL_DIR=%cd:\=/%/OpenSSL-Win32
+
+cd third_party/zlib
+mkdir cmake
+cd cmake
+cmake -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ..
+cmake --build . --config Release --target install || goto :error
+cd ../../..
+
+cd third_party/protobuf/cmake
+mkdir build
+cd build
+cmake -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% -Dprotobuf_MSVC_STATIC_RUNTIME=OFF -Dprotobuf_BUILD_TESTS=OFF ..
+cmake --build . --config Release --target install || goto :error
+cd ../../../..
+
+cd third_party/cares/cares
+mkdir cmake
+cd cmake
+cmake -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ..
+cmake --build . --config Release --target install || goto :error
+cd ../../../..
+
+@rem OpenSSL-Win32 and OpenSSL-Win64 can be downloaded from https://slproweb.com/products/Win32OpenSSL.html
+cd cmake
+mkdir build
+cd build
+cmake -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% -DOPENSSL_ROOT_DIR=%OPENSSL_DIR% -DOPENSSL_INCLUDE_DIR=%OPENSSL_DIR%/include -DZLIB_LIBRARY=%INSTALL_DIR%/lib/zlibstatic.lib -DZLIB_INCLUDE_DIR=%INSTALL_DIR%/include -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF -DgRPC_PROTOBUF_PROVIDER=package -DgRPC_ZLIB_PROVIDER=package -DgRPC_CARES_PROVIDER=package -DgRPC_SSL_PROVIDER=package -DCMAKE_BUILD_TYPE=Release ../.. || goto :error
+cmake --build . --config Release --target install || goto :error
+cd ../..
+
+# Build helloworld example using cmake
+cd examples/cpp/helloworld
+mkdir cmake
+cd cmake
+mkdir build
+cd build
+cmake -DCMAKE_INSTALL_PREFIX=%INSTALL_DIR% ../.. || goto :error
+cmake --build . --config Release || goto :error
+cd ../../../../..
+
+goto :EOF
+
+:error
+echo Failed!
+exit /b %errorlevel%
diff --git a/test/distrib/cpp/run_distrib_test_cmake.sh b/test/distrib/cpp/run_distrib_test_cmake.sh
index ead8cc1..a9c081c 100755
--- a/test/distrib/cpp/run_distrib_test_cmake.sh
+++ b/test/distrib/cpp/run_distrib_test_cmake.sh
@@ -19,7 +19,6 @@
 
 echo "deb http://ftp.debian.org/debian jessie-backports main" | tee /etc/apt/sources.list.d/jessie-backports.list
 apt-get update
-#apt-get install -t jessie-backports -y libc-ares-dev  # we need specifically version 1.12
 apt-get install -t jessie-backports -y libssl-dev
 
 # Install c-ares
diff --git a/third_party/zlib.BUILD b/third_party/zlib.BUILD
index 7879a81..a71c85f 100644
--- a/third_party/zlib.BUILD
+++ b/third_party/zlib.BUILD
@@ -27,7 +27,7 @@
         "zutil.h",
     ],
     includes = [
-        "include",
+        ".",
     ],
     linkstatic = 1,
     visibility = [
diff --git a/tools/buildgen/build-cleaner.py b/tools/buildgen/build-cleaner.py
index 7b42844..a6b86fb 100755
--- a/tools/buildgen/build-cleaner.py
+++ b/tools/buildgen/build-cleaner.py
@@ -22,65 +22,65 @@
 
 TEST = (os.environ.get('TEST', 'false') == 'true')
 
-_TOP_LEVEL_KEYS = ['settings', 'proto_deps', 'filegroups', 'libs', 'targets', 'vspackages']
+_TOP_LEVEL_KEYS = [
+    'settings', 'proto_deps', 'filegroups', 'libs', 'targets', 'vspackages'
+]
 _ELEM_KEYS = [
-    'name',
-    'gtest',
-    'cpu_cost',
-    'flaky',
-    'build',
-    'run',
-    'language',
-    'public_headers',
-    'headers',
-    'src',
-    'deps']
+    'name', 'gtest', 'cpu_cost', 'flaky', 'build', 'run', 'language',
+    'public_headers', 'headers', 'src', 'deps'
+]
+
 
 def repr_ordered_dict(dumper, odict):
-  return dumper.represent_mapping(u'tag:yaml.org,2002:map', odict.items())
+    return dumper.represent_mapping(u'tag:yaml.org,2002:map', odict.items())
+
 
 yaml.add_representer(collections.OrderedDict, repr_ordered_dict)
 
+
 def rebuild_as_ordered_dict(indict, special_keys):
-  outdict = collections.OrderedDict()
-  for key in sorted(indict.keys()):
-    if '#' in key:
-      outdict[key] = indict[key]
-  for key in special_keys:
-    if key in indict:
-      outdict[key] = indict[key]
-  for key in sorted(indict.keys()):
-    if key in special_keys: continue
-    if '#' in key: continue
-    outdict[key] = indict[key]
-  return outdict
+    outdict = collections.OrderedDict()
+    for key in sorted(indict.keys()):
+        if '#' in key:
+            outdict[key] = indict[key]
+    for key in special_keys:
+        if key in indict:
+            outdict[key] = indict[key]
+    for key in sorted(indict.keys()):
+        if key in special_keys: continue
+        if '#' in key: continue
+        outdict[key] = indict[key]
+    return outdict
+
 
 def clean_elem(indict):
-  for name in ['public_headers', 'headers', 'src']:
-    if name not in indict: continue
-    inlist = indict[name]
-    protos = list(x for x in inlist if os.path.splitext(x)[1] == '.proto')
-    others = set(x for x in inlist if x not in protos)
-    indict[name] = protos + sorted(others)
-  return rebuild_as_ordered_dict(indict, _ELEM_KEYS)
+    for name in ['public_headers', 'headers', 'src']:
+        if name not in indict: continue
+        inlist = indict[name]
+        protos = list(x for x in inlist if os.path.splitext(x)[1] == '.proto')
+        others = set(x for x in inlist if x not in protos)
+        indict[name] = protos + sorted(others)
+    return rebuild_as_ordered_dict(indict, _ELEM_KEYS)
+
 
 for filename in sys.argv[1:]:
-  with open(filename) as f:
-    js = yaml.load(f)
-  js = rebuild_as_ordered_dict(js, _TOP_LEVEL_KEYS)
-  for grp in ['filegroups', 'libs', 'targets']:
-    if grp not in js: continue
-    js[grp] = sorted([clean_elem(x) for x in js[grp]],
-                     key=lambda x: (x.get('language', '_'), x['name']))
-  output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
-  # massage out trailing whitespace
-  lines = []
-  for line in output.splitlines():
-    lines.append(line.rstrip() + '\n')
-  output = ''.join(lines)
-  if TEST:
     with open(filename) as f:
-      assert f.read() == output
-  else:
-    with open(filename, 'w') as f:
-      f.write(output)
+        js = yaml.load(f)
+    js = rebuild_as_ordered_dict(js, _TOP_LEVEL_KEYS)
+    for grp in ['filegroups', 'libs', 'targets']:
+        if grp not in js: continue
+        js[grp] = sorted(
+            [clean_elem(x) for x in js[grp]],
+            key=lambda x: (x.get('language', '_'), x['name']))
+    output = yaml.dump(js, indent=2, width=80, default_flow_style=False)
+    # massage out trailing whitespace
+    lines = []
+    for line in output.splitlines():
+        lines.append(line.rstrip() + '\n')
+    output = ''.join(lines)
+    if TEST:
+        with open(filename) as f:
+            assert f.read() == output
+    else:
+        with open(filename, 'w') as f:
+            f.write(output)
diff --git a/tools/buildgen/bunch.py b/tools/buildgen/bunch.py
index 813051a..f3bfc81 100755
--- a/tools/buildgen/bunch.py
+++ b/tools/buildgen/bunch.py
@@ -11,43 +11,43 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Allows dot-accessible dictionaries."""
 
 
 class Bunch(dict):
 
-  def __init__(self, d):
-    dict.__init__(self, d)
-    self.__dict__.update(d)
+    def __init__(self, d):
+        dict.__init__(self, d)
+        self.__dict__.update(d)
 
 
 # Converts any kind of variable to a Bunch
 def to_bunch(var):
-  if isinstance(var, list):
-    return [to_bunch(i) for i in var]
-  if isinstance(var, dict):
-    ret = {}
-    for k, v in var.items():
-      if isinstance(v, (list, dict)):
-        v = to_bunch(v)
-      ret[k] = v
-    return Bunch(ret)
-  else:
-    return var
+    if isinstance(var, list):
+        return [to_bunch(i) for i in var]
+    if isinstance(var, dict):
+        ret = {}
+        for k, v in var.items():
+            if isinstance(v, (list, dict)):
+                v = to_bunch(v)
+            ret[k] = v
+        return Bunch(ret)
+    else:
+        return var
 
 
 # Merges JSON 'add' into JSON 'dst'
 def merge_json(dst, add):
-  if isinstance(dst, dict) and isinstance(add, dict):
-    for k, v in add.items():
-      if k in dst:
-        if k == '#': continue
-        merge_json(dst[k], v)
-      else:
-        dst[k] = v
-  elif isinstance(dst, list) and isinstance(add, list):
-    dst.extend(add)
-  else:
-    raise Exception('Tried to merge incompatible objects %s %s\n\n%r\n\n%r' % (type(dst).__name__, type(add).__name__, dst, add))
-
+    if isinstance(dst, dict) and isinstance(add, dict):
+        for k, v in add.items():
+            if k in dst:
+                if k == '#': continue
+                merge_json(dst[k], v)
+            else:
+                dst[k] = v
+    elif isinstance(dst, list) and isinstance(add, list):
+        dst.extend(add)
+    else:
+        raise Exception(
+            'Tried to merge incompatible objects %s %s\n\n%r\n\n%r' %
+            (type(dst).__name__, type(add).__name__, dst, add))
diff --git a/tools/buildgen/generate_projects.py b/tools/buildgen/generate_projects.py
index d29cd02..bb5de9c 100755
--- a/tools/buildgen/generate_projects.py
+++ b/tools/buildgen/generate_projects.py
@@ -21,7 +21,9 @@
 import sys
 import tempfile
 import multiprocessing
-sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
+sys.path.append(
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
 
 assert sys.argv[1:], 'run generate_projects.sh instead of this directly'
 
@@ -45,57 +47,58 @@
 
 templates = args.templates
 if not templates:
-  for root, dirs, files in os.walk('templates'):
-    for f in files:
-      templates.append(os.path.join(root, f))
+    for root, dirs, files in os.walk('templates'):
+        for f in files:
+            templates.append(os.path.join(root, f))
 
 pre_jobs = []
 base_cmd = ['python2.7', 'tools/buildgen/mako_renderer.py']
 cmd = base_cmd[:]
 for plugin in plugins:
-  cmd.append('-p')
-  cmd.append(plugin)
+    cmd.append('-p')
+    cmd.append(plugin)
 for js in json:
-  cmd.append('-d')
-  cmd.append(js)
+    cmd.append('-d')
+    cmd.append(js)
 cmd.append('-w')
 preprocessed_build = '.preprocessed_build'
 cmd.append(preprocessed_build)
 if args.output_merged is not None:
-  cmd.append('-M')
-  cmd.append(args.output_merged)
-pre_jobs.append(jobset.JobSpec(cmd, shortname='preprocess', timeout_seconds=None))
+    cmd.append('-M')
+    cmd.append(args.output_merged)
+pre_jobs.append(
+    jobset.JobSpec(cmd, shortname='preprocess', timeout_seconds=None))
 
 jobs = []
 for template in reversed(sorted(templates)):
-  root, f = os.path.split(template)
-  if os.path.splitext(f)[1] == '.template':
-    out_dir = args.base + root[len('templates'):]
-    out = out_dir + '/' + os.path.splitext(f)[0]
-    if not os.path.exists(out_dir):
-      os.makedirs(out_dir)
-    cmd = base_cmd[:]
-    cmd.append('-P')
-    cmd.append(preprocessed_build)
-    cmd.append('-o')
-    if test is None:
-      cmd.append(out)
-    else:
-      tf = tempfile.mkstemp()
-      test[out] = tf[1]
-      os.close(tf[0])
-      cmd.append(test[out])
-    cmd.append(args.base + '/' + root + '/' + f)
-    jobs.append(jobset.JobSpec(cmd, shortname=out, timeout_seconds=None))
+    root, f = os.path.split(template)
+    if os.path.splitext(f)[1] == '.template':
+        out_dir = args.base + root[len('templates'):]
+        out = out_dir + '/' + os.path.splitext(f)[0]
+        if not os.path.exists(out_dir):
+            os.makedirs(out_dir)
+        cmd = base_cmd[:]
+        cmd.append('-P')
+        cmd.append(preprocessed_build)
+        cmd.append('-o')
+        if test is None:
+            cmd.append(out)
+        else:
+            tf = tempfile.mkstemp()
+            test[out] = tf[1]
+            os.close(tf[0])
+            cmd.append(test[out])
+        cmd.append(args.base + '/' + root + '/' + f)
+        jobs.append(jobset.JobSpec(cmd, shortname=out, timeout_seconds=None))
 
 jobset.run(pre_jobs, maxjobs=args.jobs)
 jobset.run(jobs, maxjobs=args.jobs)
 
 if test is not None:
-  for s, g in test.iteritems():
-    if os.path.isfile(g):
-      assert 0 == os.system('diff %s %s' % (s, g)), s
-      os.unlink(g)
-    else:
-      assert 0 == os.system('diff -r %s %s' % (s, g)), s
-      shutil.rmtree(g, ignore_errors=True)
+    for s, g in test.iteritems():
+        if os.path.isfile(g):
+            assert 0 == os.system('diff %s %s' % (s, g)), s
+            os.unlink(g)
+        else:
+            assert 0 == os.system('diff -r %s %s' % (s, g)), s
+            shutil.rmtree(g, ignore_errors=True)
diff --git a/tools/buildgen/mako_renderer.py b/tools/buildgen/mako_renderer.py
index 7738053..acd72bd 100755
--- a/tools/buildgen/mako_renderer.py
+++ b/tools/buildgen/mako_renderer.py
@@ -12,8 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
-
 """Simple Mako renderer.
 
 Just a wrapper around the mako rendering library.
@@ -27,7 +25,6 @@
 import shutil
 import sys
 
-
 from mako.lookup import TemplateLookup
 from mako.runtime import Context
 from mako.template import Template
@@ -37,151 +34,158 @@
 
 # Imports a plugin
 def import_plugin(name):
-  _, base_ex = os.path.split(name)
-  base, _ = os.path.splitext(base_ex)
+    _, base_ex = os.path.split(name)
+    base, _ = os.path.splitext(base_ex)
 
-  with open(name, 'r') as plugin_file:
-    plugin_code = plugin_file.read()
-  plugin_module = imp.new_module(base)
-  exec plugin_code in plugin_module.__dict__
-  return plugin_module
+    with open(name, 'r') as plugin_file:
+        plugin_code = plugin_file.read()
+    plugin_module = imp.new_module(base)
+    exec plugin_code in plugin_module.__dict__
+    return plugin_module
 
 
 def out(msg):
-  print >> sys.stderr, msg
+    print >> sys.stderr, msg
 
 
 def showhelp():
-  out('mako-renderer.py [-o out] [-m cache] [-P preprocessed_input] [-d dict] [-d dict...]'
-      ' [-t template] [-w preprocessed_output]')
+    out('mako-renderer.py [-o out] [-m cache] [-P preprocessed_input] [-d dict] [-d dict...]'
+        ' [-t template] [-w preprocessed_output]')
 
 
 def main(argv):
-  got_input = False
-  module_directory = None
-  preprocessed_output = None
-  dictionary = {}
-  json_dict = {}
-  got_output = False
-  plugins = []
-  output_name = None
-  got_preprocessed_input = False
-  output_merged = None
+    got_input = False
+    module_directory = None
+    preprocessed_output = None
+    dictionary = {}
+    json_dict = {}
+    got_output = False
+    plugins = []
+    output_name = None
+    got_preprocessed_input = False
+    output_merged = None
 
-  try:
-    opts, args = getopt.getopt(argv, 'hM:m:d:o:p:t:P:w:')
-  except getopt.GetoptError:
-    out('Unknown option')
-    showhelp()
-    sys.exit(2)
-
-  for opt, arg in opts:
-    if opt == '-h':
-      out('Displaying showhelp')
-      showhelp()
-      sys.exit()
-    elif opt == '-o':
-      if got_output:
-        out('Got more than one output')
+    try:
+        opts, args = getopt.getopt(argv, 'hM:m:d:o:p:t:P:w:')
+    except getopt.GetoptError:
+        out('Unknown option')
         showhelp()
-        sys.exit(3)
-      got_output = True
-      output_name = arg
-    elif opt == '-m':
-      if module_directory is not None:
-        out('Got more than one cache directory')
+        sys.exit(2)
+
+    for opt, arg in opts:
+        if opt == '-h':
+            out('Displaying showhelp')
+            showhelp()
+            sys.exit()
+        elif opt == '-o':
+            if got_output:
+                out('Got more than one output')
+                showhelp()
+                sys.exit(3)
+            got_output = True
+            output_name = arg
+        elif opt == '-m':
+            if module_directory is not None:
+                out('Got more than one cache directory')
+                showhelp()
+                sys.exit(4)
+            module_directory = arg
+        elif opt == '-M':
+            if output_merged is not None:
+                out('Got more than one output merged path')
+                showhelp()
+                sys.exit(5)
+            output_merged = arg
+        elif opt == '-P':
+            assert not got_preprocessed_input
+            assert json_dict == {}
+            sys.path.insert(0,
+                            os.path.abspath(
+                                os.path.join(
+                                    os.path.dirname(sys.argv[0]), 'plugins')))
+            with open(arg, 'r') as dict_file:
+                dictionary = pickle.load(dict_file)
+            got_preprocessed_input = True
+        elif opt == '-d':
+            assert not got_preprocessed_input
+            with open(arg, 'r') as dict_file:
+                bunch.merge_json(json_dict, yaml.load(dict_file.read()))
+        elif opt == '-p':
+            plugins.append(import_plugin(arg))
+        elif opt == '-w':
+            preprocessed_output = arg
+
+    if not got_preprocessed_input:
+        for plugin in plugins:
+            plugin.mako_plugin(json_dict)
+        if output_merged:
+            with open(output_merged, 'w') as yaml_file:
+                yaml_file.write(yaml.dump(json_dict))
+        for k, v in json_dict.items():
+            dictionary[k] = bunch.to_bunch(v)
+
+    if preprocessed_output:
+        with open(preprocessed_output, 'w') as dict_file:
+            pickle.dump(dictionary, dict_file)
+
+    cleared_dir = False
+    for arg in args:
+        got_input = True
+        with open(arg) as f:
+            srcs = list(yaml.load_all(f.read()))
+        for src in srcs:
+            if isinstance(src, basestring):
+                assert len(srcs) == 1
+                template = Template(
+                    src,
+                    filename=arg,
+                    module_directory=module_directory,
+                    lookup=TemplateLookup(directories=['.']))
+                with open(output_name, 'w') as output_file:
+                    template.render_context(Context(output_file, **dictionary))
+            else:
+                # we have optional control data: this template represents
+                # a directory
+                if not cleared_dir:
+                    if not os.path.exists(output_name):
+                        pass
+                    elif os.path.isfile(output_name):
+                        os.unlink(output_name)
+                    else:
+                        shutil.rmtree(output_name, ignore_errors=True)
+                    cleared_dir = True
+                items = []
+                if 'foreach' in src:
+                    for el in dictionary[src['foreach']]:
+                        if 'cond' in src:
+                            args = dict(dictionary)
+                            args['selected'] = el
+                            if not eval(src['cond'], {}, args):
+                                continue
+                        items.append(el)
+                    assert items
+                else:
+                    items = [None]
+                for item in items:
+                    args = dict(dictionary)
+                    args['selected'] = item
+                    item_output_name = os.path.join(
+                        output_name,
+                        Template(src['output_name']).render(**args))
+                    if not os.path.exists(os.path.dirname(item_output_name)):
+                        os.makedirs(os.path.dirname(item_output_name))
+                    template = Template(
+                        src['template'],
+                        filename=arg,
+                        module_directory=module_directory,
+                        lookup=TemplateLookup(directories=['.']))
+                    with open(item_output_name, 'w') as output_file:
+                        template.render_context(Context(output_file, **args))
+
+    if not got_input and not preprocessed_output:
+        out('Got nothing to do')
         showhelp()
-        sys.exit(4)
-      module_directory = arg
-    elif opt == '-M':
-      if output_merged is not None:
-        out('Got more than one output merged path')
-        showhelp()
-        sys.exit(5)
-      output_merged = arg
-    elif opt == '-P':
-      assert not got_preprocessed_input
-      assert json_dict == {}
-      sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'plugins')))
-      with open(arg, 'r') as dict_file:
-        dictionary = pickle.load(dict_file)
-      got_preprocessed_input = True
-    elif opt == '-d':
-      assert not got_preprocessed_input
-      with open(arg, 'r') as dict_file:
-        bunch.merge_json(json_dict, yaml.load(dict_file.read()))
-    elif opt == '-p':
-      plugins.append(import_plugin(arg))
-    elif opt == '-w':
-      preprocessed_output = arg
 
-  if not got_preprocessed_input:
-    for plugin in plugins:
-      plugin.mako_plugin(json_dict)
-    if output_merged:
-      with open(output_merged, 'w') as yaml_file:
-        yaml_file.write(yaml.dump(json_dict))
-    for k, v in json_dict.items():
-      dictionary[k] = bunch.to_bunch(v)
-
-  if preprocessed_output:
-    with open(preprocessed_output, 'w') as dict_file:
-      pickle.dump(dictionary, dict_file)
-
-  cleared_dir = False
-  for arg in args:
-    got_input = True
-    with open(arg) as f:
-      srcs = list(yaml.load_all(f.read()))
-    for src in srcs:
-      if isinstance(src, basestring):
-        assert len(srcs) == 1
-        template = Template(src,
-                            filename=arg,
-                            module_directory=module_directory,
-                            lookup=TemplateLookup(directories=['.']))
-        with open(output_name, 'w') as output_file:
-          template.render_context(Context(output_file, **dictionary))
-      else:
-        # we have optional control data: this template represents
-        # a directory
-        if not cleared_dir:
-          if not os.path.exists(output_name):
-            pass
-          elif os.path.isfile(output_name):
-            os.unlink(output_name)
-          else:
-            shutil.rmtree(output_name, ignore_errors=True)
-          cleared_dir = True
-        items = []
-        if 'foreach' in src:
-          for el in dictionary[src['foreach']]:
-            if 'cond' in src:
-              args = dict(dictionary)
-              args['selected'] = el
-              if not eval(src['cond'], {}, args):
-                continue
-            items.append(el)
-          assert items
-        else:
-          items = [None]
-        for item in items:
-          args = dict(dictionary)
-          args['selected'] = item
-          item_output_name = os.path.join(
-              output_name, Template(src['output_name']).render(**args))
-          if not os.path.exists(os.path.dirname(item_output_name)):
-            os.makedirs(os.path.dirname(item_output_name))
-          template = Template(src['template'],
-                              filename=arg,
-                              module_directory=module_directory,
-                              lookup=TemplateLookup(directories=['.']))
-          with open(item_output_name, 'w') as output_file:
-            template.render_context(Context(output_file, **args))
-
-  if not got_input and not preprocessed_output:
-    out('Got nothing to do')
-    showhelp()
 
 if __name__ == '__main__':
-  main(sys.argv[1:])
+    main(sys.argv[1:])
diff --git a/tools/buildgen/plugins/expand_bin_attrs.py b/tools/buildgen/plugins/expand_bin_attrs.py
index 6ad6e9c..d5acd8d 100755
--- a/tools/buildgen/plugins/expand_bin_attrs.py
+++ b/tools/buildgen/plugins/expand_bin_attrs.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen expand binary attributes plugin.
 
 This fills in any optional attributes.
@@ -20,7 +19,7 @@
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for expand_filegroups.
+    """The exported plugin code for expand_filegroups.
 
   The list of libs in the build.yaml file can contain "filegroups" tags.
   These refer to the filegroups in the root object. We will expand and
@@ -28,20 +27,20 @@
 
   """
 
-  targets = dictionary.get('targets')
-  default_platforms = ['windows', 'posix', 'linux', 'mac']
+    targets = dictionary.get('targets')
+    default_platforms = ['windows', 'posix', 'linux', 'mac']
 
-  for tgt in targets:
-    tgt['flaky'] = tgt.get('flaky', False)
-    tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
-    tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
-    tgt['boringssl'] = tgt.get('boringssl', False)
-    tgt['zlib'] = tgt.get('zlib', False)
-    tgt['ares'] = tgt.get('ares', False)
-    tgt['gtest'] = tgt.get('gtest', False)
+    for tgt in targets:
+        tgt['flaky'] = tgt.get('flaky', False)
+        tgt['platforms'] = sorted(tgt.get('platforms', default_platforms))
+        tgt['ci_platforms'] = sorted(tgt.get('ci_platforms', tgt['platforms']))
+        tgt['boringssl'] = tgt.get('boringssl', False)
+        tgt['zlib'] = tgt.get('zlib', False)
+        tgt['ares'] = tgt.get('ares', False)
+        tgt['gtest'] = tgt.get('gtest', False)
 
-  libs = dictionary.get('libs')
-  for lib in libs:
-    lib['boringssl'] = lib.get('boringssl', False)
-    lib['zlib'] = lib.get('zlib', False)
-    lib['ares'] = lib.get('ares', False)
+    libs = dictionary.get('libs')
+    for lib in libs:
+        lib['boringssl'] = lib.get('boringssl', False)
+        lib['zlib'] = lib.get('zlib', False)
+        lib['ares'] = lib.get('ares', False)
diff --git a/tools/buildgen/plugins/expand_filegroups.py b/tools/buildgen/plugins/expand_filegroups.py
index 6697040..99d9463 100755
--- a/tools/buildgen/plugins/expand_filegroups.py
+++ b/tools/buildgen/plugins/expand_filegroups.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen expand filegroups plugin.
 
 This takes the list of libs from our yaml dictionary,
@@ -21,132 +20,134 @@
 
 
 def excluded(filename, exclude_res):
-  for r in exclude_res:
-    if r.search(filename):
-      return True
-  return False
+    for r in exclude_res:
+        if r.search(filename):
+            return True
+    return False
 
 
 def uniquify(lst):
-  out = []
-  for el in lst:
-    if el not in out:
-      out.append(el)
-  return out
+    out = []
+    for el in lst:
+        if el not in out:
+            out.append(el)
+    return out
 
 
 FILEGROUP_LISTS = ['src', 'headers', 'public_headers', 'deps']
 
-
 FILEGROUP_DEFAULTS = {
-  'language': 'c',
-  'boringssl': False,
-  'zlib': False,
-  'ares': False,
+    'language': 'c',
+    'boringssl': False,
+    'zlib': False,
+    'ares': False,
 }
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for expand_filegroups.
+    """The exported plugin code for expand_filegroups.
 
   The list of libs in the build.yaml file can contain "filegroups" tags.
   These refer to the filegroups in the root object. We will expand and
   merge filegroups on the src, headers and public_headers properties.
 
   """
-  libs = dictionary.get('libs')
-  targets = dictionary.get('targets')
-  filegroups_list = dictionary.get('filegroups')
-  filegroups = {}
+    libs = dictionary.get('libs')
+    targets = dictionary.get('targets')
+    filegroups_list = dictionary.get('filegroups')
+    filegroups = {}
 
-  for fg in filegroups_list:
-    for lst in FILEGROUP_LISTS:
-      fg[lst] = fg.get(lst, [])
-      fg['own_%s' % lst] = list(fg[lst])
-    for attr, val in FILEGROUP_DEFAULTS.iteritems():
-      if attr not in fg:
-        fg[attr] = val
-
-  todo = list(filegroups_list)
-  skips = 0
-
-  while todo:
-    assert skips != len(todo), "infinite loop in filegroup uses clauses: %r" % [t['name'] for t in todo]
-    # take the first element of the todo list
-    cur = todo[0]
-    todo = todo[1:]
-    # check all uses filegroups are present (if no, skip and come back later)
-    skip = False
-    for uses in cur.get('uses', []):
-      if uses not in filegroups:
-        skip = True
-    if skip:
-      skips += 1
-      todo.append(cur)
-    else:
-      skips = 0
-      assert 'plugins' not in cur
-      plugins = []
-      for uses in cur.get('uses', []):
-        for plugin in filegroups[uses]['plugins']:
-          if plugin not in plugins:
-            plugins.append(plugin)
+    for fg in filegroups_list:
         for lst in FILEGROUP_LISTS:
-          vals = cur.get(lst, [])
-          vals.extend(filegroups[uses].get(lst, []))
-          cur[lst] = vals
-      cur_plugin_name = cur.get('plugin')
-      if cur_plugin_name:
-        plugins.append(cur_plugin_name)
-      cur['plugins'] = plugins
-      filegroups[cur['name']] = cur
+            fg[lst] = fg.get(lst, [])
+            fg['own_%s' % lst] = list(fg[lst])
+        for attr, val in FILEGROUP_DEFAULTS.iteritems():
+            if attr not in fg:
+                fg[attr] = val
 
-  # build reverse dependency map
-  things = {}
-  for thing in dictionary['libs'] + dictionary['targets'] + dictionary['filegroups']:
-    things[thing['name']] = thing
-    thing['used_by'] = []
-  thing_deps = lambda t: t.get('uses', []) + t.get('filegroups', []) + t.get('deps', [])
-  for thing in things.itervalues():
-    done = set()
-    todo = thing_deps(thing)
+    todo = list(filegroups_list)
+    skips = 0
+
     while todo:
-      cur = todo[0]
-      todo = todo[1:]
-      if cur in done: continue
-      things[cur]['used_by'].append(thing['name'])
-      todo.extend(thing_deps(things[cur]))
-      done.add(cur)
+        assert skips != len(
+            todo), "infinite loop in filegroup uses clauses: %r" % [
+                t['name'] for t in todo
+            ]
+        # take the first element of the todo list
+        cur = todo[0]
+        todo = todo[1:]
+        # check all uses filegroups are present (if no, skip and come back later)
+        skip = False
+        for uses in cur.get('uses', []):
+            if uses not in filegroups:
+                skip = True
+        if skip:
+            skips += 1
+            todo.append(cur)
+        else:
+            skips = 0
+            assert 'plugins' not in cur
+            plugins = []
+            for uses in cur.get('uses', []):
+                for plugin in filegroups[uses]['plugins']:
+                    if plugin not in plugins:
+                        plugins.append(plugin)
+                for lst in FILEGROUP_LISTS:
+                    vals = cur.get(lst, [])
+                    vals.extend(filegroups[uses].get(lst, []))
+                    cur[lst] = vals
+            cur_plugin_name = cur.get('plugin')
+            if cur_plugin_name:
+                plugins.append(cur_plugin_name)
+            cur['plugins'] = plugins
+            filegroups[cur['name']] = cur
 
-  # the above expansion can introduce duplicate filenames: contract them here
-  for fg in filegroups.itervalues():
-    for lst in FILEGROUP_LISTS:
-      fg[lst] = uniquify(fg.get(lst, []))
+    # build reverse dependency map
+    things = {}
+    for thing in dictionary['libs'] + dictionary['targets'] + dictionary['filegroups']:
+        things[thing['name']] = thing
+        thing['used_by'] = []
+    thing_deps = lambda t: t.get('uses', []) + t.get('filegroups', []) + t.get('deps', [])
+    for thing in things.itervalues():
+        done = set()
+        todo = thing_deps(thing)
+        while todo:
+            cur = todo[0]
+            todo = todo[1:]
+            if cur in done: continue
+            things[cur]['used_by'].append(thing['name'])
+            todo.extend(thing_deps(things[cur]))
+            done.add(cur)
 
-  for tgt in dictionary['targets']:
-    for lst in FILEGROUP_LISTS:
-      tgt[lst] = tgt.get(lst, [])
-      tgt['own_%s' % lst] = list(tgt[lst])
+    # the above expansion can introduce duplicate filenames: contract them here
+    for fg in filegroups.itervalues():
+        for lst in FILEGROUP_LISTS:
+            fg[lst] = uniquify(fg.get(lst, []))
 
-  for lib in libs + targets:
-    assert 'plugins' not in lib
-    plugins = []
-    for lst in FILEGROUP_LISTS:
-      vals = lib.get(lst, [])
-      lib[lst] = list(vals)
-      lib['own_%s' % lst] = list(vals)
-    for fg_name in lib.get('filegroups', []):
-      fg = filegroups[fg_name]
-      for plugin in fg['plugins']:
-        if plugin not in plugins:
-          plugins.append(plugin)
-      for lst in FILEGROUP_LISTS:
-        vals = lib.get(lst, [])
-        vals.extend(fg.get(lst, []))
-        lib[lst] = vals
-      lib['plugins'] = plugins
-    if lib.get('generate_plugin_registry', False):
-      lib['src'].append('src/core/plugin_registry/%s_plugin_registry.cc' %
-                        lib['name'])
-    for lst in FILEGROUP_LISTS:
-      lib[lst] = uniquify(lib.get(lst, []))
+    for tgt in dictionary['targets']:
+        for lst in FILEGROUP_LISTS:
+            tgt[lst] = tgt.get(lst, [])
+            tgt['own_%s' % lst] = list(tgt[lst])
+
+    for lib in libs + targets:
+        assert 'plugins' not in lib
+        plugins = []
+        for lst in FILEGROUP_LISTS:
+            vals = lib.get(lst, [])
+            lib[lst] = list(vals)
+            lib['own_%s' % lst] = list(vals)
+        for fg_name in lib.get('filegroups', []):
+            fg = filegroups[fg_name]
+            for plugin in fg['plugins']:
+                if plugin not in plugins:
+                    plugins.append(plugin)
+            for lst in FILEGROUP_LISTS:
+                vals = lib.get(lst, [])
+                vals.extend(fg.get(lst, []))
+                lib[lst] = vals
+            lib['plugins'] = plugins
+        if lib.get('generate_plugin_registry', False):
+            lib['src'].append(
+                'src/core/plugin_registry/%s_plugin_registry.cc' % lib['name'])
+        for lst in FILEGROUP_LISTS:
+            lib[lst] = uniquify(lib.get(lst, []))
diff --git a/tools/buildgen/plugins/expand_version.py b/tools/buildgen/plugins/expand_version.py
index d8a3600..8f56ce8 100755
--- a/tools/buildgen/plugins/expand_version.py
+++ b/tools/buildgen/plugins/expand_version.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen package version plugin
 
 This parses the list of targets from the yaml build file, and creates
@@ -19,84 +18,90 @@
 
 """
 
-
 import re
 
 LANGUAGES = [
-  'core',
-  'cpp',
-  'csharp',
-  'objc',
-  'php',
-  'python',
-  'ruby',
-  ]
+    'core',
+    'cpp',
+    'csharp',
+    'objc',
+    'php',
+    'python',
+    'ruby',
+]
+
 
 class Version:
 
-  def __init__(self, s):
-    self.tag = None
-    if '-' in s:
-      s, self.tag = s.split('-')
-    self.major, self.minor, self.patch = [int(x) for x in s.split('.')]
+    def __init__(self, s):
+        self.tag = None
+        if '-' in s:
+            s, self.tag = s.split('-')
+        self.major, self.minor, self.patch = [int(x) for x in s.split('.')]
 
-  def __str__(self):
-    """Version string in a somewhat idiomatic style for most languages"""
-    s = '%d.%d.%d' % (self.major, self.minor, self.patch)
-    if self.tag:
-      s += '-%s' % self.tag
-    return s
+    def __str__(self):
+        """Version string in a somewhat idiomatic style for most languages"""
+        s = '%d.%d.%d' % (self.major, self.minor, self.patch)
+        if self.tag:
+            s += '-%s' % self.tag
+        return s
 
-  def pep440(self):
-    """Version string in Python PEP440 style"""
-    s = '%d.%d.%d' % (self.major, self.minor, self.patch)
-    if self.tag:
-      # we need to translate from grpc version tags to pep440 version
-      # tags; this code is likely to be a little ad-hoc
-      if self.tag == 'dev':
-        s += '.dev0'
-      elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
-        s += 'rc%d' % int(self.tag[3:])
-      else:
-        raise Exception('Don\'t know how to translate version tag "%s" to pep440' % self.tag)
-    return s
+    def pep440(self):
+        """Version string in Python PEP440 style"""
+        s = '%d.%d.%d' % (self.major, self.minor, self.patch)
+        if self.tag:
+            # we need to translate from grpc version tags to pep440 version
+            # tags; this code is likely to be a little ad-hoc
+            if self.tag == 'dev':
+                s += '.dev0'
+            elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
+                s += 'rc%d' % int(self.tag[3:])
+            else:
+                raise Exception(
+                    'Don\'t know how to translate version tag "%s" to pep440' %
+                    self.tag)
+        return s
 
-  def ruby(self):
-    """Version string in Ruby style"""
-    if self.tag:
-      return '%d.%d.%d.%s' % (self.major, self.minor, self.patch, self.tag)
-    else:
-      return '%d.%d.%d' % (self.major, self.minor, self.patch)
+    def ruby(self):
+        """Version string in Ruby style"""
+        if self.tag:
+            return '%d.%d.%d.%s' % (self.major, self.minor, self.patch,
+                                    self.tag)
+        else:
+            return '%d.%d.%d' % (self.major, self.minor, self.patch)
 
-  def php(self):
-    """Version string for PHP PECL package"""
-    s = '%d.%d.%d' % (self.major, self.minor, self.patch)
-    if self.tag:
-      if self.tag == 'dev':
-        s += 'dev'
-      elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
-        s += 'RC%d' % int(self.tag[3:])
-      else:
-        raise Exception('Don\'t know how to translate version tag "%s" to PECL version' % self.tag)
-    return s
+    def php(self):
+        """Version string for PHP PECL package"""
+        s = '%d.%d.%d' % (self.major, self.minor, self.patch)
+        if self.tag:
+            if self.tag == 'dev':
+                s += 'dev'
+            elif len(self.tag) >= 3 and self.tag[0:3] == 'pre':
+                s += 'RC%d' % int(self.tag[3:])
+            else:
+                raise Exception(
+                    'Don\'t know how to translate version tag "%s" to PECL version'
+                    % self.tag)
+        return s
 
-  def php_composer(self):
-    """Version string for PHP Composer package"""
-    return '%d.%d.%d' % (self.major, self.minor, self.patch)
+    def php_composer(self):
+        """Version string for PHP Composer package"""
+        return '%d.%d.%d' % (self.major, self.minor, self.patch)
+
 
 def mako_plugin(dictionary):
-  """Expand version numbers:
+    """Expand version numbers:
      - for each language, ensure there's a language_version tag in
        settings (defaulting to the master version tag)
      - expand version strings to major, minor, patch, and tag
   """
 
-  settings = dictionary['settings']
-  master_version = Version(settings['version'])
-  settings['version'] = master_version
-  for language in LANGUAGES:
-    version_tag = '%s_version' % language
-    if version_tag in settings:
-      settings[version_tag] = Version(settings[version_tag])
-    else:
-      settings[version_tag] = master_version
+    settings = dictionary['settings']
+    master_version = Version(settings['version'])
+    settings['version'] = master_version
+    for language in LANGUAGES:
+        version_tag = '%s_version' % language
+        if version_tag in settings:
+            settings[version_tag] = Version(settings[version_tag])
+        else:
+            settings[version_tag] = master_version
diff --git a/tools/buildgen/plugins/generate_vsprojects.py b/tools/buildgen/plugins/generate_vsprojects.py
index 06755f6..f7ef492 100755
--- a/tools/buildgen/plugins/generate_vsprojects.py
+++ b/tools/buildgen/plugins/generate_vsprojects.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen vsprojects plugin.
 
 This parses the list of libraries, and generates globals "vsprojects"
@@ -19,62 +18,68 @@
 
 """
 
-
 import hashlib
 import re
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for generate_vsprojeccts
+    """The exported plugin code for generate_vsprojeccts
 
   We want to help the work of the visual studio generators.
 
   """
 
-  libs = dictionary.get('libs', [])
-  targets = dictionary.get('targets', [])
+    libs = dictionary.get('libs', [])
+    targets = dictionary.get('targets', [])
 
-  for lib in libs:
-    lib['is_library'] = True
-  for target in targets:
-    target['is_library'] = False
+    for lib in libs:
+        lib['is_library'] = True
+    for target in targets:
+        target['is_library'] = False
 
-  projects = []
-  projects.extend(libs)
-  projects.extend(targets)
-  for target in projects:
-    if 'build' in target and target['build'] == 'test':
-      default_test_dir = 'test'
-    else:
-      default_test_dir = '.'
-    if 'vs_config_type' not in target:
-      if 'build' in target and target['build'] == 'test':
-        target['vs_config_type'] = 'Application'
-      else:
-        target['vs_config_type'] = 'StaticLibrary'
-    if 'vs_packages' not in target:
-      target['vs_packages'] = []
-    if 'vs_props' not in target:
-      target['vs_props'] = []
-    target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
-    if target.get('vs_project_guid', None) is None and 'windows' in target.get('platforms', ['windows']):
-      name = target['name']
-      guid = re.sub('(........)(....)(....)(....)(.*)',
-             r'{\1-\2-\3-\4-\5}',
-             hashlib.md5(name).hexdigest())
-      target['vs_project_guid'] = guid.upper()
-  # Exclude projects without a visual project guid, such as the tests.
-  projects = [project for project in projects
-                if project.get('vs_project_guid', None)]
+    projects = []
+    projects.extend(libs)
+    projects.extend(targets)
+    for target in projects:
+        if 'build' in target and target['build'] == 'test':
+            default_test_dir = 'test'
+        else:
+            default_test_dir = '.'
+        if 'vs_config_type' not in target:
+            if 'build' in target and target['build'] == 'test':
+                target['vs_config_type'] = 'Application'
+            else:
+                target['vs_config_type'] = 'StaticLibrary'
+        if 'vs_packages' not in target:
+            target['vs_packages'] = []
+        if 'vs_props' not in target:
+            target['vs_props'] = []
+        target['vs_proj_dir'] = target.get('vs_proj_dir', default_test_dir)
+        if target.get('vs_project_guid',
+                      None) is None and 'windows' in target.get(
+                          'platforms', ['windows']):
+            name = target['name']
+            guid = re.sub('(........)(....)(....)(....)(.*)',
+                          r'{\1-\2-\3-\4-\5}',
+                          hashlib.md5(name).hexdigest())
+            target['vs_project_guid'] = guid.upper()
+    # Exclude projects without a visual project guid, such as the tests.
+    projects = [
+        project for project in projects if project.get('vs_project_guid', None)
+    ]
 
-  projects = [project for project in projects
-                if project['language'] != 'c++' or project['build'] == 'all' or project['build'] == 'protoc' or (project['language'] == 'c++' and  (project['build'] == 'test' or project['build'] == 'private'))]
+    projects = [
+        project for project in projects
+        if project['language'] != 'c++' or project['build'] == 'all' or
+        project['build'] == 'protoc' or (project['language'] == 'c++' and (
+            project['build'] == 'test' or project['build'] == 'private'))
+    ]
 
-  project_dict = dict([(p['name'], p) for p in projects])
+    project_dict = dict([(p['name'], p) for p in projects])
 
-  packages = dictionary.get('vspackages', [])
-  packages_dict = dict([(p['name'], p) for p in packages])
+    packages = dictionary.get('vspackages', [])
+    packages_dict = dict([(p['name'], p) for p in packages])
 
-  dictionary['vsprojects'] = projects
-  dictionary['vsproject_dict'] = project_dict
-  dictionary['vspackages_dict'] = packages_dict
+    dictionary['vsprojects'] = projects
+    dictionary['vsproject_dict'] = project_dict
+    dictionary['vspackages_dict'] = packages_dict
diff --git a/tools/buildgen/plugins/list_api.py b/tools/buildgen/plugins/list_api.py
index bed98da..f7ecb97 100755
--- a/tools/buildgen/plugins/list_api.py
+++ b/tools/buildgen/plugins/list_api.py
@@ -21,44 +21,47 @@
 import sys
 import yaml
 
-
 _RE_API = r'(?:GPRAPI|GRPCAPI|CENSUSAPI)([^;]*);'
 
 
 def list_c_apis(filenames):
-  for filename in filenames:
-    with open(filename, 'r') as f:
-      text = f.read()
-    for m in re.finditer(_RE_API, text):
-      api_declaration = re.sub('[ \r\n\t]+', ' ', m.group(1))
-      type_and_name, args_and_close = api_declaration.split('(', 1)
-      args = args_and_close[:args_and_close.rfind(')')].strip()
-      last_space = type_and_name.rfind(' ')
-      last_star = type_and_name.rfind('*')
-      type_end = max(last_space, last_star)
-      return_type = type_and_name[0:type_end+1].strip()
-      name = type_and_name[type_end+1:].strip()
-      yield {'return_type': return_type, 'name': name, 'arguments': args, 'header': filename}
+    for filename in filenames:
+        with open(filename, 'r') as f:
+            text = f.read()
+        for m in re.finditer(_RE_API, text):
+            api_declaration = re.sub('[ \r\n\t]+', ' ', m.group(1))
+            type_and_name, args_and_close = api_declaration.split('(', 1)
+            args = args_and_close[:args_and_close.rfind(')')].strip()
+            last_space = type_and_name.rfind(' ')
+            last_star = type_and_name.rfind('*')
+            type_end = max(last_space, last_star)
+            return_type = type_and_name[0:type_end + 1].strip()
+            name = type_and_name[type_end + 1:].strip()
+            yield {
+                'return_type': return_type,
+                'name': name,
+                'arguments': args,
+                'header': filename
+            }
 
 
 def headers_under(directory):
-  for root, dirnames, filenames in os.walk(directory):
-    for filename in fnmatch.filter(filenames, '*.h'):
-      yield os.path.join(root, filename)
+    for root, dirnames, filenames in os.walk(directory):
+        for filename in fnmatch.filter(filenames, '*.h'):
+            yield os.path.join(root, filename)
 
 
 def mako_plugin(dictionary):
-  apis = []
-  headers = []
+    apis = []
+    headers = []
 
-  for lib in dictionary['libs']:
-    if lib['name'] in ['grpc', 'gpr']:
-      headers.extend(lib['public_headers'])
+    for lib in dictionary['libs']:
+        if lib['name'] in ['grpc', 'gpr']:
+            headers.extend(lib['public_headers'])
 
-  apis.extend(list_c_apis(sorted(set(headers))))
-  dictionary['c_apis'] = apis
+    apis.extend(list_c_apis(sorted(set(headers))))
+    dictionary['c_apis'] = apis
 
 
 if __name__ == '__main__':
-  print yaml.dump([api for api in list_c_apis(headers_under('include/grpc'))])
-
+    print yaml.dump([api for api in list_c_apis(headers_under('include/grpc'))])
diff --git a/tools/buildgen/plugins/list_protos.py b/tools/buildgen/plugins/list_protos.py
index 07a860c..0aa5fe5 100755
--- a/tools/buildgen/plugins/list_protos.py
+++ b/tools/buildgen/plugins/list_protos.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen .proto files list plugin.
 
 This parses the list of targets from the yaml build file, and creates
@@ -19,12 +18,11 @@
 
 """
 
-
 import re
 
 
 def mako_plugin(dictionary):
-  """The exported plugin code for list_protos.
+    """The exported plugin code for list_protos.
 
   Some projects generators may want to get the full list of unique .proto files
   that are being included in a project. This code extracts all files referenced
@@ -33,23 +31,23 @@
 
   """
 
-  libs = dictionary.get('libs', [])
-  targets = dictionary.get('targets', [])
+    libs = dictionary.get('libs', [])
+    targets = dictionary.get('targets', [])
 
-  proto_re = re.compile('(.*)\\.proto')
+    proto_re = re.compile('(.*)\\.proto')
 
-  protos = set()
-  for lib in libs:
-    for src in lib.get('src', []):
-      m = proto_re.match(src)
-      if m:
-        protos.add(m.group(1))
-  for tgt in targets:
-    for src in tgt.get('src', []):
-      m = proto_re.match(src)
-      if m:
-        protos.add(m.group(1))
+    protos = set()
+    for lib in libs:
+        for src in lib.get('src', []):
+            m = proto_re.match(src)
+            if m:
+                protos.add(m.group(1))
+    for tgt in targets:
+        for src in tgt.get('src', []):
+            m = proto_re.match(src)
+            if m:
+                protos.add(m.group(1))
 
-  protos = sorted(protos)
+    protos = sorted(protos)
 
-  dictionary['protos'] = protos
+    dictionary['protos'] = protos
diff --git a/tools/buildgen/plugins/make_fuzzer_tests.py b/tools/buildgen/plugins/make_fuzzer_tests.py
index 56dad2d..f644a7c 100644
--- a/tools/buildgen/plugins/make_fuzzer_tests.py
+++ b/tools/buildgen/plugins/make_fuzzer_tests.py
@@ -11,35 +11,37 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Create tests for each fuzzer"""
 
 import copy
 import glob
 
+
 def mako_plugin(dictionary):
-  targets = dictionary['targets']
-  tests = dictionary['tests']
-  for tgt in targets:
-    if tgt['build'] == 'fuzzer':
-      new_target = copy.deepcopy(tgt)
-      new_target['build'] = 'test'
-      new_target['name'] += '_one_entry'
-      new_target['run'] = False
-      new_target['src'].append('test/core/util/one_corpus_entry_fuzzer.cc')
-      new_target['own_src'].append('test/core/util/one_corpus_entry_fuzzer.cc')
-      targets.append(new_target)
-      for corpus in new_target['corpus_dirs']:
-        for fn in sorted(glob.glob('%s/*' % corpus)):
-          tests.append({
-              'name': new_target['name'],
-              'args': [fn],
-              'exclude_iomgrs': ['uv'],
-              'exclude_configs': ['tsan'],
-              'uses_polling': False,
-              'platforms': ['mac', 'linux'],
-              'ci_platforms': ['linux'],
-              'flaky': False,
-              'language': 'c',
-              'cpu_cost': 0.1,
-          })
+    targets = dictionary['targets']
+    tests = dictionary['tests']
+    for tgt in targets:
+        if tgt['build'] == 'fuzzer':
+            new_target = copy.deepcopy(tgt)
+            new_target['build'] = 'test'
+            new_target['name'] += '_one_entry'
+            new_target['run'] = False
+            new_target['src'].append(
+                'test/core/util/one_corpus_entry_fuzzer.cc')
+            new_target['own_src'].append(
+                'test/core/util/one_corpus_entry_fuzzer.cc')
+            targets.append(new_target)
+            for corpus in new_target['corpus_dirs']:
+                for fn in sorted(glob.glob('%s/*' % corpus)):
+                    tests.append({
+                        'name': new_target['name'],
+                        'args': [fn],
+                        'exclude_iomgrs': ['uv'],
+                        'exclude_configs': ['tsan'],
+                        'uses_polling': False,
+                        'platforms': ['mac', 'linux'],
+                        'ci_platforms': ['linux'],
+                        'flaky': False,
+                        'language': 'c',
+                        'cpu_cost': 0.1,
+                    })
diff --git a/tools/buildgen/plugins/transitive_dependencies.py b/tools/buildgen/plugins/transitive_dependencies.py
index bf5263e..258e10b 100644
--- a/tools/buildgen/plugins/transitive_dependencies.py
+++ b/tools/buildgen/plugins/transitive_dependencies.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Buildgen transitive dependencies
 
 This takes the list of libs, node_modules, and targets from our
@@ -20,35 +19,40 @@
 
 """
 
+
 def get_lib(libs, name):
-  try:
-    return next(lib for lib in libs if lib['name']==name)
-  except StopIteration:
-    return None
+    try:
+        return next(lib for lib in libs if lib['name'] == name)
+    except StopIteration:
+        return None
+
 
 def transitive_deps(lib, libs):
-  if lib is not None and 'deps' in lib:
-    # Recursively call transitive_deps on each dependency, and take the union
-    return set.union(set(lib['deps']),
-                     *[set(transitive_deps(get_lib(libs, dep), libs))
-                       for dep in lib['deps']])
-  else:
-    return set()
+    if lib is not None and 'deps' in lib:
+        # Recursively call transitive_deps on each dependency, and take the union
+        return set.union(
+            set(lib['deps']), *[
+                set(transitive_deps(get_lib(libs, dep), libs))
+                for dep in lib['deps']
+            ])
+    else:
+        return set()
+
 
 def mako_plugin(dictionary):
-  """The exported plugin code for transitive_dependencies.
+    """The exported plugin code for transitive_dependencies.
 
   Iterate over each list and check each item for a deps list. We add a
   transitive_deps property to each with the transitive closure of those
   dependency lists.
   """
-  libs = dictionary.get('libs')
+    libs = dictionary.get('libs')
 
-  for target_name, target_list in dictionary.items():
-    for target in target_list:
-      if isinstance(target, dict) and 'deps' in target:
-        target['transitive_deps'] = transitive_deps(target, libs)
+    for target_name, target_list in dictionary.items():
+        for target in target_list:
+            if isinstance(target, dict) and 'deps' in target:
+                target['transitive_deps'] = transitive_deps(target, libs)
 
-  python_dependencies = dictionary.get('python_dependencies')
-  python_dependencies['transitive_deps'] = (
-      transitive_deps(python_dependencies, libs))
+    python_dependencies = dictionary.get('python_dependencies')
+    python_dependencies['transitive_deps'] = (transitive_deps(
+        python_dependencies, libs))
diff --git a/tools/codegen/core/gen_header_frame.py b/tools/codegen/core/gen_header_frame.py
index 5375c14..7219d4d 100755
--- a/tools/codegen/core/gen_header_frame.py
+++ b/tools/codegen/core/gen_header_frame.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Read from stdin a set of colon separated http headers:
    :path: /foo/bar
    content-type: application/grpc
@@ -24,109 +23,118 @@
 import sys
 import argparse
 
+
 def append_never_indexed(payload_line, n, count, key, value):
-  payload_line.append(0x10)
-  assert(len(key) <= 126)
-  payload_line.append(len(key))
-  payload_line.extend(ord(c) for c in key)
-  assert(len(value) <= 126)
-  payload_line.append(len(value))
-  payload_line.extend(ord(c) for c in value)
+    payload_line.append(0x10)
+    assert (len(key) <= 126)
+    payload_line.append(len(key))
+    payload_line.extend(ord(c) for c in key)
+    assert (len(value) <= 126)
+    payload_line.append(len(value))
+    payload_line.extend(ord(c) for c in value)
+
 
 def append_inc_indexed(payload_line, n, count, key, value):
-  payload_line.append(0x40)
-  assert(len(key) <= 126)
-  payload_line.append(len(key))
-  payload_line.extend(ord(c) for c in key)
-  assert(len(value) <= 126)
-  payload_line.append(len(value))
-  payload_line.extend(ord(c) for c in value)
+    payload_line.append(0x40)
+    assert (len(key) <= 126)
+    payload_line.append(len(key))
+    payload_line.extend(ord(c) for c in key)
+    assert (len(value) <= 126)
+    payload_line.append(len(value))
+    payload_line.extend(ord(c) for c in value)
+
 
 def append_pre_indexed(payload_line, n, count, key, value):
-  payload_line.append(0x80 + 61 + count - n)
+    payload_line.append(0x80 + 61 + count - n)
+
 
 _COMPRESSORS = {
-  'never': append_never_indexed,
-  'inc': append_inc_indexed,
-  'pre': append_pre_indexed,
+    'never': append_never_indexed,
+    'inc': append_inc_indexed,
+    'pre': append_pre_indexed,
 }
 
 argp = argparse.ArgumentParser('Generate header frames')
-argp.add_argument('--set_end_stream', default=False, action='store_const', const=True)
-argp.add_argument('--no_framing', default=False, action='store_const', const=True)
-argp.add_argument('--compression', choices=sorted(_COMPRESSORS.keys()), default='never')
+argp.add_argument(
+    '--set_end_stream', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--no_framing', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--compression', choices=sorted(_COMPRESSORS.keys()), default='never')
 argp.add_argument('--hex', default=False, action='store_const', const=True)
 args = argp.parse_args()
 
 # parse input, fill in vals
 vals = []
 for line in sys.stdin:
-  line = line.strip()
-  if line == '': continue
-  if line[0] == '#': continue
-  key_tail, value = line[1:].split(':')
-  key = (line[0] + key_tail).strip()
-  value = value.strip()
-  vals.append((key, value))
+    line = line.strip()
+    if line == '': continue
+    if line[0] == '#': continue
+    key_tail, value = line[1:].split(':')
+    key = (line[0] + key_tail).strip()
+    value = value.strip()
+    vals.append((key, value))
 
 # generate frame payload binary data
 payload_bytes = []
 if not args.no_framing:
-  payload_bytes.append([]) # reserve space for header
+    payload_bytes.append([])  # reserve space for header
 payload_len = 0
 n = 0
 for key, value in vals:
-  payload_line = []
-  _COMPRESSORS[args.compression](payload_line, n, len(vals), key, value)
-  n += 1
-  payload_len += len(payload_line)
-  payload_bytes.append(payload_line)
+    payload_line = []
+    _COMPRESSORS[args.compression](payload_line, n, len(vals), key, value)
+    n += 1
+    payload_len += len(payload_line)
+    payload_bytes.append(payload_line)
 
 # fill in header
 if not args.no_framing:
-  flags = 0x04  # END_HEADERS
-  if args.set_end_stream:
-    flags |= 0x01  # END_STREAM
-  payload_bytes[0].extend([
-      (payload_len >> 16) & 0xff,
-      (payload_len >> 8) & 0xff,
-      (payload_len) & 0xff,
-      # header frame
-      0x01,
-      # flags
-      flags,
-      # stream id
-      0x00,
-      0x00,
-      0x00,
-      0x01
-  ])
+    flags = 0x04  # END_HEADERS
+    if args.set_end_stream:
+        flags |= 0x01  # END_STREAM
+    payload_bytes[0].extend([
+        (payload_len >> 16) & 0xff,
+        (payload_len >> 8) & 0xff,
+        (payload_len) & 0xff,
+        # header frame
+        0x01,
+        # flags
+        flags,
+        # stream id
+        0x00,
+        0x00,
+        0x00,
+        0x01
+    ])
 
 hex_bytes = [ord(c) for c in "abcdefABCDEF0123456789"]
 
+
 def esc_c(line):
-  out = "\""
-  last_was_hex = False
-  for c in line:
-    if 32 <= c < 127:
-      if c in hex_bytes and last_was_hex:
-        out += "\"\""
-      if c != ord('"'):
-        out += chr(c)
-      else:
-        out += "\\\""
-      last_was_hex = False
-    else:
-      out += "\\x%02x" % c
-      last_was_hex = True
-  return out + "\""
+    out = "\""
+    last_was_hex = False
+    for c in line:
+        if 32 <= c < 127:
+            if c in hex_bytes and last_was_hex:
+                out += "\"\""
+            if c != ord('"'):
+                out += chr(c)
+            else:
+                out += "\\\""
+            last_was_hex = False
+        else:
+            out += "\\x%02x" % c
+            last_was_hex = True
+    return out + "\""
+
 
 # dump bytes
 if args.hex:
-  all_bytes = []
-  for line in payload_bytes:
-    all_bytes.extend(line)
-  print '{%s}' % ', '.join('0x%02x' % c for c in all_bytes)
+    all_bytes = []
+    for line in payload_bytes:
+        all_bytes.extend(line)
+    print '{%s}' % ', '.join('0x%02x' % c for c in all_bytes)
 else:
-  for line in payload_bytes:
-    print esc_c(line)
+    for line in payload_bytes:
+        print esc_c(line)
diff --git a/tools/codegen/core/gen_hpack_tables.c b/tools/codegen/core/gen_hpack_tables.cc
similarity index 99%
rename from tools/codegen/core/gen_hpack_tables.c
rename to tools/codegen/core/gen_hpack_tables.cc
index 73dfa9f..0e7a7b8 100644
--- a/tools/codegen/core/gen_hpack_tables.c
+++ b/tools/codegen/core/gen_hpack_tables.cc
@@ -16,7 +16,7 @@
  *
  */
 
-/* generates constant tables for hpack.c */
+/* generates constant tables for hpack.cc */
 
 #include <assert.h>
 #include <stddef.h>
diff --git a/tools/codegen/core/gen_legal_metadata_characters.c b/tools/codegen/core/gen_legal_metadata_characters.cc
similarity index 96%
rename from tools/codegen/core/gen_legal_metadata_characters.c
rename to tools/codegen/core/gen_legal_metadata_characters.cc
index 5e292ab..fbabd24 100644
--- a/tools/codegen/core/gen_legal_metadata_characters.c
+++ b/tools/codegen/core/gen_legal_metadata_characters.cc
@@ -16,7 +16,7 @@
  *
  */
 
-/* generates constant table for metadata.c */
+/* generates constant table for metadata.cc */
 
 #include <stdio.h>
 #include <string.h>
diff --git a/tools/codegen/core/gen_percent_encoding_tables.c b/tools/codegen/core/gen_percent_encoding_tables.cc
similarity index 97%
rename from tools/codegen/core/gen_percent_encoding_tables.c
rename to tools/codegen/core/gen_percent_encoding_tables.cc
index 49ea5ea..a99024e 100644
--- a/tools/codegen/core/gen_percent_encoding_tables.c
+++ b/tools/codegen/core/gen_percent_encoding_tables.cc
@@ -16,7 +16,7 @@
  *
  */
 
-/* generates constant table for metadata.c */
+/* generates constant table for metadata.cc */
 
 #include <stdio.h>
 #include <string.h>
diff --git a/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py b/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
index 8b5b618..fa87c97 100755
--- a/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
+++ b/tools/codegen/core/gen_server_registered_method_bad_client_test_body.py
@@ -14,48 +14,42 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+
 def esc_c(line):
-  out = "\""
-  last_was_hex = False
-  for c in line:
-    if 32 <= c < 127:
-      if c in hex_bytes and last_was_hex:
-        out += "\"\""
-      if c != ord('"'):
-        out += chr(c)
-      else:
-        out += "\\\""
-      last_was_hex = False
-    else:
-      out += "\\x%02x" % c
-      last_was_hex = True
-  return out + "\""
+    out = "\""
+    last_was_hex = False
+    for c in line:
+        if 32 <= c < 127:
+            if c in hex_bytes and last_was_hex:
+                out += "\"\""
+            if c != ord('"'):
+                out += chr(c)
+            else:
+                out += "\\\""
+            last_was_hex = False
+        else:
+            out += "\\x%02x" % c
+            last_was_hex = True
+    return out + "\""
+
 
 done = set()
 
 for message_length in range(0, 3):
-  for send_message_length in range(0, message_length + 1):
-    payload = [
-      0,
-      (message_length >> 24) & 0xff,
-      (message_length >> 16) & 0xff,
-      (message_length >> 8) & 0xff,
-      (message_length) & 0xff
-    ] + send_message_length * [0]
-    for frame_length in range(0, len(payload) + 1):
-      is_end = frame_length == len(payload) and send_message_length == message_length
-      frame = [
-        (frame_length >> 16) & 0xff,
-        (frame_length >> 8) & 0xff,
-        (frame_length) & 0xff,
-        0,
-        1 if is_end else 0,
-        0, 0, 0, 1
-      ] + payload[0:frame_length]
-      text = esc_c(frame)
-      if text not in done:
-        print 'GRPC_RUN_BAD_CLIENT_TEST(verifier_%s, PFX_STR %s, %s);' % (
-            'succeeds' if is_end else 'fails', 
-            text, 
-            '0' if is_end else 'GRPC_BAD_CLIENT_DISCONNECT')
-        done.add(text)
+    for send_message_length in range(0, message_length + 1):
+        payload = [
+            0, (message_length >> 24) & 0xff, (message_length >> 16) & 0xff,
+            (message_length >> 8) & 0xff, (message_length) & 0xff
+        ] + send_message_length * [0]
+        for frame_length in range(0, len(payload) + 1):
+            is_end = frame_length == len(
+                payload) and send_message_length == message_length
+            frame = [(frame_length >> 16) & 0xff, (frame_length >> 8) & 0xff,
+                     (frame_length) & 0xff, 0, 1
+                     if is_end else 0, 0, 0, 0, 1] + payload[0:frame_length]
+            text = esc_c(frame)
+            if text not in done:
+                print 'GRPC_RUN_BAD_CLIENT_TEST(verifier_%s, PFX_STR %s, %s);' % (
+                    'succeeds' if is_end else 'fails', text, '0'
+                    if is_end else 'GRPC_BAD_CLIENT_DISCONNECT')
+                done.add(text)
diff --git a/tools/codegen/core/gen_settings_ids.py b/tools/codegen/core/gen_settings_ids.py
index 481c421..5c3f066 100755
--- a/tools/codegen/core/gen_settings_ids.py
+++ b/tools/codegen/core/gen_settings_ids.py
@@ -24,92 +24,114 @@
 OnError = collections.namedtuple('OnError', 'behavior code')
 clamp_invalid_value = OnError('CLAMP_INVALID_VALUE', 'PROTOCOL_ERROR')
 disconnect_on_invalid_value = lambda e: OnError('DISCONNECT_ON_INVALID_VALUE', e)
-DecoratedSetting = collections.namedtuple('DecoratedSetting', 'enum name setting')
+DecoratedSetting = collections.namedtuple('DecoratedSetting',
+                                          'enum name setting')
 
 _SETTINGS = {
-  'HEADER_TABLE_SIZE': Setting(1, 4096, 0, 0xffffffff, clamp_invalid_value),
-  'ENABLE_PUSH': Setting(2, 1, 0, 1, disconnect_on_invalid_value('PROTOCOL_ERROR')),
-  'MAX_CONCURRENT_STREAMS': Setting(3, 0xffffffff, 0, 0xffffffff, disconnect_on_invalid_value('PROTOCOL_ERROR')),
-  'INITIAL_WINDOW_SIZE': Setting(4, 65535, 0, 0x7fffffff, disconnect_on_invalid_value('FLOW_CONTROL_ERROR')),
-  'MAX_FRAME_SIZE': Setting(5, 16384, 16384, 16777215, disconnect_on_invalid_value('PROTOCOL_ERROR')),
-  'MAX_HEADER_LIST_SIZE': Setting(6, _MAX_HEADER_LIST_SIZE, 0, _MAX_HEADER_LIST_SIZE, clamp_invalid_value),
-  'GRPC_ALLOW_TRUE_BINARY_METADATA': Setting(0xfe03, 0, 0, 1, clamp_invalid_value),
+    'HEADER_TABLE_SIZE':
+    Setting(1, 4096, 0, 0xffffffff, clamp_invalid_value),
+    'ENABLE_PUSH':
+    Setting(2, 1, 0, 1, disconnect_on_invalid_value('PROTOCOL_ERROR')),
+    'MAX_CONCURRENT_STREAMS':
+    Setting(3, 0xffffffff, 0, 0xffffffff,
+            disconnect_on_invalid_value('PROTOCOL_ERROR')),
+    'INITIAL_WINDOW_SIZE':
+    Setting(4, 65535, 0, 0x7fffffff,
+            disconnect_on_invalid_value('FLOW_CONTROL_ERROR')),
+    'MAX_FRAME_SIZE':
+    Setting(5, 16384, 16384, 16777215,
+            disconnect_on_invalid_value('PROTOCOL_ERROR')),
+    'MAX_HEADER_LIST_SIZE':
+    Setting(6, _MAX_HEADER_LIST_SIZE, 0, _MAX_HEADER_LIST_SIZE,
+            clamp_invalid_value),
+    'GRPC_ALLOW_TRUE_BINARY_METADATA':
+    Setting(0xfe03, 0, 0, 1, clamp_invalid_value),
 }
 
 H = open('src/core/ext/transport/chttp2/transport/http2_settings.h', 'w')
 C = open('src/core/ext/transport/chttp2/transport/http2_settings.c', 'w')
 
+
 # utility: print a big comment block into a set of files
 def put_banner(files, banner):
-  for f in files:
-    print >>f, '/*'
-    for line in banner:
-      print >>f, ' * %s' % line
-    print >>f, ' */'
-    print >>f
+    for f in files:
+        print >> f, '/*'
+        for line in banner:
+            print >> f, ' * %s' % line
+        print >> f, ' */'
+        print >> f
+
 
 # copy-paste copyright notice from this file
 with open(sys.argv[0]) as my_source:
-  copyright = []
-  for line in my_source:
-    if line[0] != '#': break
-  for line in my_source:
-    if line[0] == '#':
-      copyright.append(line)
-      break
-  for line in my_source:
-    if line[0] != '#':
-      break
-    copyright.append(line)
-  put_banner([H,C], [line[2:].rstrip() for line in copyright])
+    copyright = []
+    for line in my_source:
+        if line[0] != '#': break
+    for line in my_source:
+        if line[0] == '#':
+            copyright.append(line)
+            break
+    for line in my_source:
+        if line[0] != '#':
+            break
+        copyright.append(line)
+    put_banner([H, C], [line[2:].rstrip() for line in copyright])
 
-put_banner([H,C], ["Automatically generated by tools/codegen/core/gen_settings_ids.py"])
+put_banner(
+    [H, C],
+    ["Automatically generated by tools/codegen/core/gen_settings_ids.py"])
 
-print >>H, "#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
-print >>H, "#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
-print >>H
-print >>H, "#include <stdint.h>"
-print >>H, "#include <stdbool.h>"
-print >>H
+print >> H, "#ifndef GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
+print >> H, "#define GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H"
+print >> H
+print >> H, "#include <stdint.h>"
+print >> H, "#include <stdbool.h>"
+print >> H
 
-print >>C, "#include \"src/core/ext/transport/chttp2/transport/http2_settings.h\""
-print >>C
-print >>C, "#include <grpc/support/useful.h>"
-print >>C, "#include \"src/core/lib/transport/http2_errors.h\""
-print >>C
+print >> C, "#include \"src/core/ext/transport/chttp2/transport/http2_settings.h\""
+print >> C
+print >> C, "#include <grpc/support/useful.h>"
+print >> C, "#include \"src/core/lib/transport/http2_errors.h\""
+print >> C
 
 p = perfection.hash_parameters(sorted(x.id for x in _SETTINGS.values()))
 print p
 
+
 def hash(i):
-  i += p.offset
-  x = i % p.t
-  y = i / p.t
-  return x + p.r[y]
+    i += p.offset
+    x = i % p.t
+    y = i / p.t
+    return x + p.r[y]
 
-decorated_settings = [DecoratedSetting(hash(setting.id), name, setting)
-                      for name, setting in _SETTINGS.iteritems()]
 
-print >>H, 'typedef enum {'
+decorated_settings = [
+    DecoratedSetting(hash(setting.id), name, setting)
+    for name, setting in _SETTINGS.iteritems()
+]
+
+print >> H, 'typedef enum {'
 for decorated_setting in sorted(decorated_settings):
-  print >>H, '  GRPC_CHTTP2_SETTINGS_%s = %d, /* wire id %d */' % (
-      decorated_setting.name, decorated_setting.enum, decorated_setting.setting.id)
-print >>H, '} grpc_chttp2_setting_id;'
-print >>H
-print >>H, '#define GRPC_CHTTP2_NUM_SETTINGS %d' % (max(x.enum for x in decorated_settings) + 1)
+    print >> H, '  GRPC_CHTTP2_SETTINGS_%s = %d, /* wire id %d */' % (
+        decorated_setting.name, decorated_setting.enum,
+        decorated_setting.setting.id)
+print >> H, '} grpc_chttp2_setting_id;'
+print >> H
+print >> H, '#define GRPC_CHTTP2_NUM_SETTINGS %d' % (
+    max(x.enum for x in decorated_settings) + 1)
 
-print >>H, 'extern const uint16_t grpc_setting_id_to_wire_id[];'
-print >>C, 'const uint16_t grpc_setting_id_to_wire_id[] = {%s};' % ','.join(
+print >> H, 'extern const uint16_t grpc_setting_id_to_wire_id[];'
+print >> C, 'const uint16_t grpc_setting_id_to_wire_id[] = {%s};' % ','.join(
     '%d' % s for s in p.slots)
-print >>H
-print >>H, "bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);"
+print >> H
+print >> H, "bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out);"
 cgargs = {
-      'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
-      't': p.t,
-      'offset': abs(p.offset),
-      'offset_sign': '+' if p.offset > 0 else '-'
-  }
-print >>C, """
+    'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
+    't': p.t,
+    'offset': abs(p.offset),
+    'offset_sign': '+' if p.offset > 0 else '-'
+}
+print >> C, """
 bool grpc_wire_id_to_setting_id(uint32_t wire_id, grpc_chttp2_setting_id *out) {
   uint32_t i = wire_id %(offset_sign)s %(offset)d;
   uint32_t x = i %% %(t)d;
@@ -118,17 +140,17 @@
   switch (y) {
 """ % cgargs
 for i, r in enumerate(p.r):
-  if not r: continue
-  if r < 0: print >>C, 'case %d: h -= %d; break;' % (i, -r)
-  else: print >>C, 'case %d: h += %d; break;' % (i, r)
-print >>C, """
+    if not r: continue
+    if r < 0: print >> C, 'case %d: h -= %d; break;' % (i, -r)
+    else: print >> C, 'case %d: h += %d; break;' % (i, r)
+print >> C, """
   }
   *out = (grpc_chttp2_setting_id)h;
   return h < GPR_ARRAY_SIZE(grpc_setting_id_to_wire_id) && grpc_setting_id_to_wire_id[h] == wire_id;
 }
 """ % cgargs
 
-print >>H, """
+print >> H, """
 typedef enum {
   GRPC_CHTTP2_CLAMP_INVALID_VALUE,
   GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE
@@ -145,25 +167,25 @@
 
 extern const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS];
 """
-print >>C, "const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {"
+print >> C, "const grpc_chttp2_setting_parameters grpc_chttp2_settings_parameters[GRPC_CHTTP2_NUM_SETTINGS] = {"
 i = 0
 for decorated_setting in sorted(decorated_settings):
-  while i < decorated_setting.enum:
-    print >>C, "{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},"
+    while i < decorated_setting.enum:
+        print >> C, "{NULL, 0, 0, 0, GRPC_CHTTP2_DISCONNECT_ON_INVALID_VALUE, GRPC_HTTP2_PROTOCOL_ERROR},"
+        i += 1
+    print >> C, "{\"%s\", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s}," % (
+        decorated_setting.name,
+        decorated_setting.setting.default,
+        decorated_setting.setting.min,
+        decorated_setting.setting.max,
+        decorated_setting.setting.on_error.behavior,
+        decorated_setting.setting.on_error.code,
+    )
     i += 1
-  print >>C, "{\"%s\", %du, %du, %du, GRPC_CHTTP2_%s, GRPC_HTTP2_%s}," % (
-    decorated_setting.name,
-    decorated_setting.setting.default,
-    decorated_setting.setting.min,
-    decorated_setting.setting.max,
-    decorated_setting.setting.on_error.behavior,
-    decorated_setting.setting.on_error.code,
-  )
-  i += 1
-print >>C, "};"
+print >> C, "};"
 
-print >>H
-print >>H, "#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */"
+print >> H
+print >> H, "#endif /* GRPC_CORE_EXT_TRANSPORT_CHTTP2_TRANSPORT_HTTP2_SETTINGS_H */"
 
 H.close()
 C.close()
diff --git a/tools/codegen/core/gen_static_metadata.py b/tools/codegen/core/gen_static_metadata.py
index 355f3f4..3a171bb 100755
--- a/tools/codegen/core/gen_static_metadata.py
+++ b/tools/codegen/core/gen_static_metadata.py
@@ -172,65 +172,66 @@
     'gzip',
 ]
 
+
 # utility: mangle the name of a config
 def mangle(elem, name=None):
-  xl = {
-      '-': '_',
-      ':': '',
-      '/': 'slash',
-      '.': 'dot',
-      ',': 'comma',
-      ' ': '_',
-  }
+    xl = {
+        '-': '_',
+        ':': '',
+        '/': 'slash',
+        '.': 'dot',
+        ',': 'comma',
+        ' ': '_',
+    }
 
-  def m0(x):
-    if not x:
-      return 'empty'
-    r = ''
-    for c in x:
-      put = xl.get(c, c.lower())
-      if not put:
-        continue
-      last_is_underscore = r[-1] == '_' if r else True
-      if last_is_underscore and put == '_':
-        continue
-      elif len(put) > 1:
-        if not last_is_underscore:
-          r += '_'
-        r += put
-        r += '_'
-      else:
-        r += put
-    if r[-1] == '_':
-      r = r[:-1]
-    return r
+    def m0(x):
+        if not x:
+            return 'empty'
+        r = ''
+        for c in x:
+            put = xl.get(c, c.lower())
+            if not put:
+                continue
+            last_is_underscore = r[-1] == '_' if r else True
+            if last_is_underscore and put == '_':
+                continue
+            elif len(put) > 1:
+                if not last_is_underscore:
+                    r += '_'
+                r += put
+                r += '_'
+            else:
+                r += put
+        if r[-1] == '_':
+            r = r[:-1]
+        return r
 
-  def n(default, name=name):
-    if name is None:
-      return 'grpc_%s_' % default
-    if name == '':
-      return ''
-    return 'grpc_%s_' % name
+    def n(default, name=name):
+        if name is None:
+            return 'grpc_%s_' % default
+        if name == '':
+            return ''
+        return 'grpc_%s_' % name
 
-  if isinstance(elem, tuple):
-    return '%s%s_%s' % (n('mdelem'), m0(elem[0]), m0(elem[1]))
-  else:
-    return '%s%s' % (n('mdstr'), m0(elem))
+    if isinstance(elem, tuple):
+        return '%s%s_%s' % (n('mdelem'), m0(elem[0]), m0(elem[1]))
+    else:
+        return '%s%s' % (n('mdstr'), m0(elem))
 
 
 # utility: generate some hash value for a string
 def fake_hash(elem):
-  return hashlib.md5(elem).hexdigest()[0:8]
+    return hashlib.md5(elem).hexdigest()[0:8]
 
 
 # utility: print a big comment block into a set of files
 def put_banner(files, banner):
-  for f in files:
-    print >> f, '/*'
-    for line in banner:
-      print >> f, ' * %s' % line
-    print >> f, ' */'
-    print >> f
+    for f in files:
+        print >> f, '/*'
+        for line in banner:
+            print >> f, ' * %s' % line
+        print >> f, ' */'
+        print >> f
 
 
 # build a list of all the strings we need
@@ -240,43 +241,43 @@
 # put metadata batch callouts first, to make the check of if a static metadata
 # string is a callout trivial
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  if elem not in all_strs:
-    all_strs.append(elem)
-for elem in CONFIG:
-  if isinstance(elem, tuple):
-    if elem[0] not in all_strs:
-      all_strs.append(elem[0])
-    if elem[1] not in all_strs:
-      all_strs.append(elem[1])
-    if elem not in all_elems:
-      all_elems.append(elem)
-  else:
     if elem not in all_strs:
-      all_strs.append(elem)
+        all_strs.append(elem)
+for elem in CONFIG:
+    if isinstance(elem, tuple):
+        if elem[0] not in all_strs:
+            all_strs.append(elem[0])
+        if elem[1] not in all_strs:
+            all_strs.append(elem[1])
+        if elem not in all_elems:
+            all_elems.append(elem)
+    else:
+        if elem not in all_strs:
+            all_strs.append(elem)
 compression_elems = []
 for mask in range(1, 1 << len(COMPRESSION_ALGORITHMS)):
-  val = ','.join(COMPRESSION_ALGORITHMS[alg]
-                 for alg in range(0, len(COMPRESSION_ALGORITHMS))
-                 if (1 << alg) & mask)
-  elem = ('grpc-accept-encoding', val)
-  if val not in all_strs:
-    all_strs.append(val)
-  if elem not in all_elems:
-    all_elems.append(elem)
-  compression_elems.append(elem)
-  static_userdata[elem] = 1 + (mask | 1)
+    val = ','.join(COMPRESSION_ALGORITHMS[alg]
+                   for alg in range(0, len(COMPRESSION_ALGORITHMS))
+                   if (1 << alg) & mask)
+    elem = ('grpc-accept-encoding', val)
+    if val not in all_strs:
+        all_strs.append(val)
+    if elem not in all_elems:
+        all_elems.append(elem)
+    compression_elems.append(elem)
+    static_userdata[elem] = 1 + (mask | 1)
 stream_compression_elems = []
 for mask in range(1, 1 << len(STREAM_COMPRESSION_ALGORITHMS)):
-  val = ','.join(STREAM_COMPRESSION_ALGORITHMS[alg]
-                 for alg in range(0, len(STREAM_COMPRESSION_ALGORITHMS))
-                 if (1 << alg) & mask)
-  elem = ('accept-encoding', val)
-  if val not in all_strs:
-    all_strs.append(val)
-  if elem not in all_elems:
-    all_elems.append(elem)
-  stream_compression_elems.append(elem)
-  static_userdata[elem] = 1 + (mask | 1)
+    val = ','.join(STREAM_COMPRESSION_ALGORITHMS[alg]
+                   for alg in range(0, len(STREAM_COMPRESSION_ALGORITHMS))
+                   if (1 << alg) & mask)
+    elem = ('accept-encoding', val)
+    if val not in all_strs:
+        all_strs.append(val)
+    if elem not in all_elems:
+        all_elems.append(elem)
+    stream_compression_elems.append(elem)
+    static_userdata[elem] = 1 + (mask | 1)
 
 # output configuration
 args = sys.argv[1:]
@@ -284,62 +285,62 @@
 C = None
 D = None
 if args:
-  if 'header' in args:
-    H = sys.stdout
-  else:
-    H = open('/dev/null', 'w')
-  if 'source' in args:
-    C = sys.stdout
-  else:
-    C = open('/dev/null', 'w')
-  if 'dictionary' in args:
-    D = sys.stdout
-  else:
-    D = open('/dev/null', 'w')
+    if 'header' in args:
+        H = sys.stdout
+    else:
+        H = open('/dev/null', 'w')
+    if 'source' in args:
+        C = sys.stdout
+    else:
+        C = open('/dev/null', 'w')
+    if 'dictionary' in args:
+        D = sys.stdout
+    else:
+        D = open('/dev/null', 'w')
 else:
-  H = open(
-      os.path.join(
-          os.path.dirname(sys.argv[0]),
-          '../../../src/core/lib/transport/static_metadata.h'), 'w')
-  C = open(
-      os.path.join(
-          os.path.dirname(sys.argv[0]),
-          '../../../src/core/lib/transport/static_metadata.cc'), 'w')
-  D = open(
-      os.path.join(
-          os.path.dirname(sys.argv[0]),
-          '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
+    H = open(
+        os.path.join(
+            os.path.dirname(sys.argv[0]),
+            '../../../src/core/lib/transport/static_metadata.h'), 'w')
+    C = open(
+        os.path.join(
+            os.path.dirname(sys.argv[0]),
+            '../../../src/core/lib/transport/static_metadata.cc'), 'w')
+    D = open(
+        os.path.join(
+            os.path.dirname(sys.argv[0]),
+            '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
 
 # copy-paste copyright notice from this file
 with open(sys.argv[0]) as my_source:
-  copyright = []
-  for line in my_source:
-    if line[0] != '#':
-      break
-  for line in my_source:
-    if line[0] == '#':
-      copyright.append(line)
-      break
-  for line in my_source:
-    if line[0] != '#':
-      break
-    copyright.append(line)
-  put_banner([H, C], [line[2:].rstrip() for line in copyright])
+    copyright = []
+    for line in my_source:
+        if line[0] != '#':
+            break
+    for line in my_source:
+        if line[0] == '#':
+            copyright.append(line)
+            break
+    for line in my_source:
+        if line[0] != '#':
+            break
+        copyright.append(line)
+    put_banner([H, C], [line[2:].rstrip() for line in copyright])
 
 hex_bytes = [ord(c) for c in 'abcdefABCDEF0123456789']
 
 
 def esc_dict(line):
-  out = "\""
-  for c in line:
-    if 32 <= c < 127:
-      if c != ord('"'):
-        out += chr(c)
-      else:
-        out += "\\\""
-    else:
-      out += '\\x%02X' % c
-  return out + "\""
+    out = "\""
+    for c in line:
+        if 32 <= c < 127:
+            if c != ord('"'):
+                out += chr(c)
+            else:
+                out += "\\\""
+        else:
+            out += '\\x%02X' % c
+    return out + "\""
 
 
 put_banner([H, C], """WARNING: Auto-generated code.
@@ -347,17 +348,13 @@
 To make changes to this file, change
 tools/codegen/core/gen_static_metadata.py, and then re-run it.
 
-See metadata.h for an explanation of the interface here, and metadata.c for
+See metadata.h for an explanation of the interface here, and metadata.cc for
 an explanation of what's going on.
 """.splitlines())
 
 print >> H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
 print >> H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
 print >> H
-print >> H, '#ifdef __cplusplus'
-print >> H, 'extern "C" {'
-print >> H, '#endif'
-print >> H
 print >> H, '#include "src/core/lib/transport/metadata.h"'
 print >> H
 
@@ -369,33 +366,32 @@
 str_ofs = 0
 id2strofs = {}
 for i, elem in enumerate(all_strs):
-  id2strofs[i] = str_ofs
-  str_ofs += len(elem)
+    id2strofs[i] = str_ofs
+    str_ofs += len(elem)
 
 
 def slice_def(i):
-  return ('{&grpc_static_metadata_refcounts[%d],'
-          ' {{g_bytes+%d, %d}}}') % (
-      i, id2strofs[i], len(all_strs[i]))
+    return ('{&grpc_static_metadata_refcounts[%d],'
+            ' {{g_bytes+%d, %d}}}') % (i, id2strofs[i], len(all_strs[i]))
 
 
 # validate configuration
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  assert elem in all_strs
+    assert elem in all_strs
 
 print >> H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
 print >> H, ('extern const grpc_slice '
              'grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];')
 for i, elem in enumerate(all_strs):
-  print >> H, '/* "%s" */' % elem
-  print >> H, '#define %s (grpc_static_slice_table[%d])' % (
-      mangle(elem).upper(), i)
+    print >> H, '/* "%s" */' % elem
+    print >> H, '#define %s (grpc_static_slice_table[%d])' % (
+        mangle(elem).upper(), i)
 print >> H
-print >> C, 'static uint8_t g_bytes[] = {%s};' % (
-    ','.join('%d' % ord(c) for c in ''.join(all_strs)))
+print >> C, 'static uint8_t g_bytes[] = {%s};' % (','.join(
+    '%d' % ord(c) for c in ''.join(all_strs)))
 print >> C
 print >> C, 'static void static_ref(void *unused) {}'
-print >> C, 'static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}'
+print >> C, 'static void static_unref(void *unused) {}'
 print >> C, ('static const grpc_slice_refcount_vtable static_sub_vtable = '
              '{static_ref, static_unref, grpc_slice_default_eq_impl, '
              'grpc_slice_default_hash_impl};')
@@ -411,7 +407,7 @@
 print >> C, ('grpc_slice_refcount '
              'grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {')
 for i, elem in enumerate(all_strs):
-  print >> C, '  {&grpc_static_metadata_vtable, &static_sub_refcnt},'
+    print >> C, '  {&grpc_static_metadata_vtable, &static_sub_refcnt},'
 print >> C, '};'
 print >> C
 print >> H, '#define GRPC_IS_STATIC_METADATA_STRING(slice) \\'
@@ -421,7 +417,7 @@
 print >> C, ('const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT]'
              ' = {')
 for i, elem in enumerate(all_strs):
-  print >> C, slice_def(i) + ','
+    print >> C, slice_def(i) + ','
 print >> C, '};'
 print >> C
 print >> H, '#define GRPC_STATIC_METADATA_INDEX(static_slice) \\'
@@ -431,10 +427,10 @@
 
 print >> D, '# hpack fuzzing dictionary'
 for i, elem in enumerate(all_strs):
-  print >> D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
+    print >> D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
 for i, elem in enumerate(all_elems):
-  print >> D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
-                               [len(elem[1])] + [ord(c) for c in elem[1]]))
+    print >> D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
+                                 [len(elem[1])] + [ord(c) for c in elem[1]]))
 
 print >> H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
 print >> H, ('extern grpc_mdelem_data '
@@ -442,56 +438,51 @@
 print >> H, ('extern uintptr_t '
              'grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];')
 for i, elem in enumerate(all_elems):
-  print >> H, '/* "%s": "%s" */' % elem
-  print >> H, ('#define %s (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[%d], '
-               'GRPC_MDELEM_STORAGE_STATIC))') % (
-      mangle(elem).upper(), i)
+    print >> H, '/* "%s": "%s" */' % elem
+    print >> H, ('#define %s (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[%d], '
+                 'GRPC_MDELEM_STORAGE_STATIC))') % (mangle(elem).upper(), i)
 print >> H
 print >> C, ('uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] '
              '= {')
-print >> C, '  %s' % ','.join('%d' % static_userdata.get(elem, 0)
-                              for elem in all_elems)
+print >> C, '  %s' % ','.join(
+    '%d' % static_userdata.get(elem, 0) for elem in all_elems)
 print >> C, '};'
 print >> C
 
 
 def str_idx(s):
-  for i, s2 in enumerate(all_strs):
-    if s == s2:
-      return i
+    for i, s2 in enumerate(all_strs):
+        if s == s2:
+            return i
 
 
 def md_idx(m):
-  for i, m2 in enumerate(all_elems):
-    if m == m2:
-      return i
+    for i, m2 in enumerate(all_elems):
+        if m == m2:
+            return i
 
 
 def offset_trials(mink):
-  yield 0
-  for i in range(1, 100):
-    for mul in [-1, 1]:
-      yield mul * i
+    yield 0
+    for i in range(1, 100):
+        for mul in [-1, 1]:
+            yield mul * i
 
 
 def perfect_hash(keys, name):
-  p = perfection.hash_parameters(keys)
+    p = perfection.hash_parameters(keys)
 
-  def f(i, p=p):
-    i += p.offset
-    x = i % p.t
-    y = i / p.t
-    return x + p.r[y]
+    def f(i, p=p):
+        i += p.offset
+        x = i % p.t
+        y = i / p.t
+        return x + p.r[y]
 
-  return {
-      'PHASHRANGE':
-          p.t - 1 + max(p.r),
-      'PHASHNKEYS':
-          len(p.slots),
-      'pyfunc':
-          f,
-      'code':
-          """
+    return {
+        'PHASHRANGE': p.t - 1 + max(p.r),
+        'PHASHNKEYS': len(p.slots),
+        'pyfunc': f,
+        'code': """
 static const int8_t %(name)s_r[] = {%(r)s};
 static uint32_t %(name)s_phash(uint32_t i) {
   i %(offset_sign)s= %(offset)d;
@@ -505,13 +496,13 @@
   return h;
 }
     """ % {
-        'name': name,
-        'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
-        't': p.t,
-        'offset': abs(p.offset),
-        'offset_sign': '+' if p.offset > 0 else '-'
+            'name': name,
+            'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
+            't': p.t,
+            'offset': abs(p.offset),
+            'offset_sign': '+' if p.offset > 0 else '-'
+        }
     }
-  }
 
 
 elem_keys = [
@@ -523,10 +514,10 @@
 keys = [0] * int(elem_hash['PHASHRANGE'])
 idxs = [255] * int(elem_hash['PHASHNKEYS'])
 for i, k in enumerate(elem_keys):
-  h = elem_hash['pyfunc'](k)
-  assert keys[h] == 0
-  keys[h] = k
-  idxs[h] = i
+    h = elem_hash['pyfunc'](k)
+    assert keys[h] == 0
+    keys[h] = k
+    idxs[h] = i
 print >> C, 'static const uint16_t elem_keys[] = {%s};' % ','.join(
     '%d' % k for k in keys)
 print >> C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join(
@@ -544,12 +535,12 @@
 
 print >> C, 'grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {'
 for a, b in all_elems:
-  print >> C, '{%s,%s},' % (slice_def(str_idx(a)), slice_def(str_idx(b)))
+    print >> C, '{%s,%s},' % (slice_def(str_idx(a)), slice_def(str_idx(b)))
 print >> C, '};'
 
 print >> H, 'typedef enum {'
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  print >> H, '  %s,' % mangle(elem, 'batch').upper()
+    print >> H, '  %s,' % mangle(elem, 'batch').upper()
 print >> H, '  GRPC_BATCH_CALLOUTS_COUNT'
 print >> H, '} grpc_metadata_batch_callouts_index;'
 print >> H
@@ -557,7 +548,7 @@
 print >> H, '  struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];'
 print >> H, '  struct {'
 for elem, _ in METADATA_BATCH_CALLOUTS:
-  print >> H, '  struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower()
+    print >> H, '  struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower()
 print >> H, '  } named;'
 print >> H, '} grpc_metadata_batch_callouts;'
 print >> H
@@ -569,7 +560,7 @@
 print >> H
 print >> C, 'bool grpc_static_callout_is_default[GRPC_BATCH_CALLOUTS_COUNT] = {'
 for elem, is_default in METADATA_BATCH_CALLOUTS:
-  print >> C, '  %s, // %s' % (str(is_default).lower(), elem)
+    print >> C, '  %s, // %s' % (str(is_default).lower(), elem)
 print >> C, '};'
 print >> C
 
@@ -588,15 +579,12 @@
     1 << len(STREAM_COMPRESSION_ALGORITHMS))
 print >> C, 'const uint8_t grpc_static_accept_stream_encoding_metadata[%d] = {' % (
     1 << len(STREAM_COMPRESSION_ALGORITHMS))
-print >> C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in stream_compression_elems)
+print >> C, '0,%s' % ','.join(
+    '%d' % md_idx(elem) for elem in stream_compression_elems)
 print >> C, '};'
 
 print >> H, '#define GRPC_MDELEM_ACCEPT_STREAM_ENCODING_FOR_ALGORITHMS(algs) (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[grpc_static_accept_stream_encoding_metadata[(algs)]], GRPC_MDELEM_STORAGE_STATIC))'
 
-print >> H, '#ifdef __cplusplus'
-print >> H, '}'
-print >> H, '#endif'
-
 print >> H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
 
 H.close()
diff --git a/tools/codegen/core/gen_stats_data.py b/tools/codegen/core/gen_stats_data.py
index 072a677..5c9d9e5 100755
--- a/tools/codegen/core/gen_stats_data.py
+++ b/tools/codegen/core/gen_stats_data.py
@@ -22,27 +22,31 @@
 import json
 
 with open('src/core/lib/debug/stats_data.yaml') as f:
-  attrs = yaml.load(f.read())
+    attrs = yaml.load(f.read())
 
 REQUIRED_FIELDS = ['name', 'doc']
 
+
 def make_type(name, fields):
-  return (collections.namedtuple(name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
+    return (collections.namedtuple(name, ' '.join(
+        list(set(REQUIRED_FIELDS + fields)))), [])
+
 
 def c_str(s, encoding='ascii'):
-   if isinstance(s, unicode):
-      s = s.encode(encoding)
-   result = ''
-   for c in s:
-      if not (32 <= ord(c) < 127) or c in ('\\', '"'):
-         result += '\\%03o' % ord(c)
-      else:
-         result += c
-   return '"' + result + '"'
+    if isinstance(s, unicode):
+        s = s.encode(encoding)
+    result = ''
+    for c in s:
+        if not (32 <= ord(c) < 127) or c in ('\\', '"'):
+            result += '\\%03o' % ord(c)
+        else:
+            result += c
+    return '"' + result + '"'
+
 
 types = (
-  make_type('Counter', []),
-  make_type('Histogram', ['max', 'buckets']),
+    make_type('Counter', []),
+    make_type('Histogram', ['max', 'buckets']),
 )
 
 inst_map = dict((t[0].__name__, t[1]) for t in types)
@@ -50,369 +54,407 @@
 stats = []
 
 for attr in attrs:
-  found = False
-  for t, lst in types:
-    t_name = t.__name__.lower()
-    if t_name in attr:
-      name = attr[t_name]
-      del attr[t_name]
-      lst.append(t(name=name, **attr))
-      found = True
-      break
-  assert found, "Bad decl: %s" % attr
+    found = False
+    for t, lst in types:
+        t_name = t.__name__.lower()
+        if t_name in attr:
+            name = attr[t_name]
+            del attr[t_name]
+            lst.append(t(name=name, **attr))
+            found = True
+            break
+    assert found, "Bad decl: %s" % attr
+
 
 def dbl2u64(d):
-  return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
+    return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
+
 
 def shift_works_until(mapped_bounds, shift_bits):
-  for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
-    a, b = ab
-    if (a >> shift_bits) == (b >> shift_bits):
-      return i
-  return len(mapped_bounds)
+    for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
+        a, b = ab
+        if (a >> shift_bits) == (b >> shift_bits):
+            return i
+    return len(mapped_bounds)
+
 
 def find_ideal_shift(mapped_bounds, max_size):
-  best = None
-  for shift_bits in reversed(range(0,64)):
-    n = shift_works_until(mapped_bounds, shift_bits)
-    if n == 0: continue
-    table_size = mapped_bounds[n-1] >> shift_bits
-    if table_size > max_size: continue
-    if table_size > 65535: continue
-    if best is None:
-      best = (shift_bits, n, table_size)
-    elif best[1] < n:
-      best = (shift_bits, n, table_size)
-  print best
-  return best
+    best = None
+    for shift_bits in reversed(range(0, 64)):
+        n = shift_works_until(mapped_bounds, shift_bits)
+        if n == 0: continue
+        table_size = mapped_bounds[n - 1] >> shift_bits
+        if table_size > max_size: continue
+        if table_size > 65535: continue
+        if best is None:
+            best = (shift_bits, n, table_size)
+        elif best[1] < n:
+            best = (shift_bits, n, table_size)
+    print best
+    return best
+
 
 def gen_map_table(mapped_bounds, shift_data):
-  tbl = []
-  cur = 0
-  print mapped_bounds
-  mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
-  print mapped_bounds
-  for i in range(0, mapped_bounds[shift_data[1]-1]):
-    while i > mapped_bounds[cur]:
-      cur += 1
-    tbl.append(cur)
-  return tbl
+    tbl = []
+    cur = 0
+    print mapped_bounds
+    mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
+    print mapped_bounds
+    for i in range(0, mapped_bounds[shift_data[1] - 1]):
+        while i > mapped_bounds[cur]:
+            cur += 1
+        tbl.append(cur)
+    return tbl
+
 
 static_tables = []
 
+
 def decl_static_table(values, type):
-  global static_tables
-  v = (type, values)
-  for i, vp in enumerate(static_tables):
-    if v == vp: return i
-  print "ADD TABLE: %s %r" % (type, values)
-  r = len(static_tables)
-  static_tables.append(v)
-  return r
+    global static_tables
+    v = (type, values)
+    for i, vp in enumerate(static_tables):
+        if v == vp: return i
+    print "ADD TABLE: %s %r" % (type, values)
+    r = len(static_tables)
+    static_tables.append(v)
+    return r
+
 
 def type_for_uint_table(table):
-  mv = max(table)
-  if mv < 2**8:
-    return 'uint8_t'
-  elif mv < 2**16:
-    return 'uint16_t'
-  elif mv < 2**32:
-    return 'uint32_t'
-  else:
-    return 'uint64_t'
+    mv = max(table)
+    if mv < 2**8:
+        return 'uint8_t'
+    elif mv < 2**16:
+        return 'uint16_t'
+    elif mv < 2**32:
+        return 'uint32_t'
+    else:
+        return 'uint64_t'
+
 
 def gen_bucket_code(histogram):
-  bounds = [0, 1]
-  done_trivial = False
-  done_unmapped = False
-  first_nontrivial = None
-  first_unmapped = None
-  while len(bounds) < histogram.buckets + 1:
-    if len(bounds) == histogram.buckets:
-      nextb = int(histogram.max)
+    bounds = [0, 1]
+    done_trivial = False
+    done_unmapped = False
+    first_nontrivial = None
+    first_unmapped = None
+    while len(bounds) < histogram.buckets + 1:
+        if len(bounds) == histogram.buckets:
+            nextb = int(histogram.max)
+        else:
+            mul = math.pow(
+                float(histogram.max) / bounds[-1],
+                1.0 / (histogram.buckets + 1 - len(bounds)))
+            nextb = int(math.ceil(bounds[-1] * mul))
+        if nextb <= bounds[-1] + 1:
+            nextb = bounds[-1] + 1
+        elif not done_trivial:
+            done_trivial = True
+            first_nontrivial = len(bounds)
+        bounds.append(nextb)
+    bounds_idx = decl_static_table(bounds, 'int')
+    if done_trivial:
+        first_nontrivial_code = dbl2u64(first_nontrivial)
+        code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
+        shift_data = find_ideal_shift(code_bounds[first_nontrivial:],
+                                      256 * histogram.buckets)
+    #print first_nontrivial, shift_data, bounds
+    #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
+    code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max
+    map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
+    if first_nontrivial is None:
+        code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n' %
+                 histogram.name.upper())
     else:
-      mul = math.pow(float(histogram.max) / bounds[-1],
-                     1.0 / (histogram.buckets + 1 - len(bounds)))
-      nextb = int(math.ceil(bounds[-1] * mul))
-    if nextb <= bounds[-1] + 1:
-      nextb = bounds[-1] + 1
-    elif not done_trivial:
-      done_trivial = True
-      first_nontrivial = len(bounds)
-    bounds.append(nextb)
-  bounds_idx = decl_static_table(bounds, 'int')
-  if done_trivial:
-    first_nontrivial_code = dbl2u64(first_nontrivial)
-    code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
-    shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets)
-  #print first_nontrivial, shift_data, bounds
-  #if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
-  code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max
-  map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
-  if first_nontrivial is None:
-    code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n'
-             % histogram.name.upper())
-  else:
-    code += 'if (value < %d) {\n' % first_nontrivial
-    code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n'
-             % histogram.name.upper())
-    code += 'return;\n'
-    code += '}'
-    first_nontrivial_code = dbl2u64(first_nontrivial)
-    if shift_data is not None:
-      map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table))
-      code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n'
-      code += '_val.dbl = value;\n'
-      code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code)
-      code += 'int bucket = '
-      code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial)
-      code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx
-      code += 'bucket -= (_val.uint < _bkt.uint);\n'
-      code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper()
-      code += 'return;\n'
-      code += '}\n'
-    code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper()
-    code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, histogram.buckets)
-  return (code, bounds_idx)
+        code += 'if (value < %d) {\n' % first_nontrivial
+        code += ('GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, value);\n' %
+                 histogram.name.upper())
+        code += 'return;\n'
+        code += '}'
+        first_nontrivial_code = dbl2u64(first_nontrivial)
+        if shift_data is not None:
+            map_table_idx = decl_static_table(map_table,
+                                              type_for_uint_table(map_table))
+            code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n'
+            code += '_val.dbl = value;\n'
+            code += 'if (_val.uint < %dull) {\n' % (
+                (map_table[-1] << shift_data[0]) + first_nontrivial_code)
+            code += 'int bucket = '
+            code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (
+                map_table_idx, first_nontrivial_code, shift_data[0],
+                first_nontrivial)
+            code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx
+            code += 'bucket -= (_val.uint < _bkt.uint);\n'
+            code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper(
+            )
+            code += 'return;\n'
+            code += '}\n'
+        code += 'GRPC_STATS_INC_HISTOGRAM(GRPC_STATS_HISTOGRAM_%s, ' % histogram.name.upper(
+        )
+        code += 'grpc_stats_histo_find_bucket_slow(value, grpc_stats_table_%d, %d));\n' % (
+            bounds_idx, histogram.buckets)
+    return (code, bounds_idx)
+
 
 # utility: print a big comment block into a set of files
 def put_banner(files, banner):
-  for f in files:
-    print >>f, '/*'
-    for line in banner:
-      print >>f, ' * %s' % line
-    print >>f, ' */'
-    print >>f
+    for f in files:
+        print >> f, '/*'
+        for line in banner:
+            print >> f, ' * %s' % line
+        print >> f, ' */'
+        print >> f
+
 
 with open('src/core/lib/debug/stats_data.h', 'w') as H:
-  # copy-paste copyright notice from this file
-  with open(sys.argv[0]) as my_source:
-    copyright = []
-    for line in my_source:
-      if line[0] != '#': break
-    for line in my_source:
-      if line[0] == '#':
-        copyright.append(line)
-        break
-    for line in my_source:
-      if line[0] != '#':
-        break
-      copyright.append(line)
-    put_banner([H], [line[2:].rstrip() for line in copyright])
+    # copy-paste copyright notice from this file
+    with open(sys.argv[0]) as my_source:
+        copyright = []
+        for line in my_source:
+            if line[0] != '#': break
+        for line in my_source:
+            if line[0] == '#':
+                copyright.append(line)
+                break
+        for line in my_source:
+            if line[0] != '#':
+                break
+            copyright.append(line)
+        put_banner([H], [line[2:].rstrip() for line in copyright])
 
-  put_banner([H], ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
+    put_banner(
+        [H],
+        ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
 
-  print >>H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
-  print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
-  print >>H
-  print >>H, "#include <inttypes.h>"
-  print >>H, "#include \"src/core/lib/iomgr/exec_ctx.h\""
-  print >>H
-  print >>H, "#ifdef __cplusplus"
-  print >>H, "extern \"C\" {"
-  print >>H, "#endif"
-  print >>H
+    print >> H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
+    print >> H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
+    print >> H
+    print >> H, "#include <inttypes.h>"
+    print >> H, "#include \"src/core/lib/iomgr/exec_ctx.h\""
+    print >> H
+    print >> H, "#ifdef __cplusplus"
+    print >> H, "extern \"C\" {"
+    print >> H, "#endif"
+    print >> H
 
-  for typename, instances in sorted(inst_map.items()):
-    print >>H, "typedef enum {"
-    for inst in instances:
-      print >>H, "  GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper())
-    print >>H, "  GRPC_STATS_%s_COUNT" % (typename.upper())
-    print >>H, "} grpc_stats_%ss;" % (typename.lower())
-    print >>H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % (
-        typename.lower(), typename.upper())
-    print >>H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % (
-        typename.lower(), typename.upper())
+    for typename, instances in sorted(inst_map.items()):
+        print >> H, "typedef enum {"
+        for inst in instances:
+            print >> H, "  GRPC_STATS_%s_%s," % (typename.upper(),
+                                                 inst.name.upper())
+        print >> H, "  GRPC_STATS_%s_COUNT" % (typename.upper())
+        print >> H, "} grpc_stats_%ss;" % (typename.lower())
+        print >> H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % (
+            typename.lower(), typename.upper())
+        print >> H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % (
+            typename.lower(), typename.upper())
 
-  histo_start = []
-  histo_buckets = []
-  histo_bucket_boundaries = []
+    histo_start = []
+    histo_buckets = []
+    histo_bucket_boundaries = []
 
-  print >>H, "typedef enum {"
-  first_slot = 0
-  for histogram in inst_map['Histogram']:
-    histo_start.append(first_slot)
-    histo_buckets.append(histogram.buckets)
-    print >>H, "  GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (histogram.name.upper(), first_slot)
-    print >>H, "  GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (histogram.name.upper(), histogram.buckets)
-    first_slot += histogram.buckets
-  print >>H, "  GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot
-  print >>H, "} grpc_stats_histogram_constants;"
+    print >> H, "typedef enum {"
+    first_slot = 0
+    for histogram in inst_map['Histogram']:
+        histo_start.append(first_slot)
+        histo_buckets.append(histogram.buckets)
+        print >> H, "  GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (
+            histogram.name.upper(), first_slot)
+        print >> H, "  GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (
+            histogram.name.upper(), histogram.buckets)
+        first_slot += histogram.buckets
+    print >> H, "  GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot
+    print >> H, "} grpc_stats_histogram_constants;"
 
-  for ctr in inst_map['Counter']:
-    print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx) " +
-                "GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % (
-                ctr.name.upper(), ctr.name.upper())
-  for histogram in inst_map['Histogram']:
-    print >>H, "#define GRPC_STATS_INC_%s(exec_ctx, value) grpc_stats_inc_%s((exec_ctx), (int)(value))" % (
-        histogram.name.upper(), histogram.name.lower())
-    print >>H, "void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int x);" % histogram.name.lower()
+    for ctr in inst_map['Counter']:
+        print >> H, ("#define GRPC_STATS_INC_%s() " +
+                     "GRPC_STATS_INC_COUNTER(GRPC_STATS_COUNTER_%s)") % (
+                         ctr.name.upper(), ctr.name.upper())
+    for histogram in inst_map['Histogram']:
+        print >> H, "#define GRPC_STATS_INC_%s(value) grpc_stats_inc_%s( (int)(value))" % (
+            histogram.name.upper(), histogram.name.lower())
+        print >> H, "void grpc_stats_inc_%s(int x);" % histogram.name.lower()
 
-  for i, tbl in enumerate(static_tables):
-    print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1]))
+    for i, tbl in enumerate(static_tables):
+        print >> H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i,
+                                                                  len(tbl[1]))
 
-  print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram'])
-  print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram'])
-  print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram'])
-  print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x);" % len(inst_map['Histogram'])
+    print >> H, "extern const int grpc_stats_histo_buckets[%d];" % len(
+        inst_map['Histogram'])
+    print >> H, "extern const int grpc_stats_histo_start[%d];" % len(
+        inst_map['Histogram'])
+    print >> H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(
+        inst_map['Histogram'])
+    print >> H, "extern void (*const grpc_stats_inc_histogram[%d])(int x);" % len(
+        inst_map['Histogram'])
 
-  print >>H
-  print >>H, "#ifdef __cplusplus"
-  print >>H, "}"
-  print >>H, "#endif"
-  print >>H
-  print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */"
+    print >> H
+    print >> H, "#ifdef __cplusplus"
+    print >> H, "}"
+    print >> H, "#endif"
+    print >> H
+    print >> H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */"
 
 with open('src/core/lib/debug/stats_data.cc', 'w') as C:
-  # copy-paste copyright notice from this file
-  with open(sys.argv[0]) as my_source:
-    copyright = []
-    for line in my_source:
-      if line[0] != '#': break
-    for line in my_source:
-      if line[0] == '#':
-        copyright.append(line)
-        break
-    for line in my_source:
-      if line[0] != '#':
-        break
-      copyright.append(line)
-    put_banner([C], [line[2:].rstrip() for line in copyright])
+    # copy-paste copyright notice from this file
+    with open(sys.argv[0]) as my_source:
+        copyright = []
+        for line in my_source:
+            if line[0] != '#': break
+        for line in my_source:
+            if line[0] == '#':
+                copyright.append(line)
+                break
+        for line in my_source:
+            if line[0] != '#':
+                break
+            copyright.append(line)
+        put_banner([C], [line[2:].rstrip() for line in copyright])
 
-  put_banner([C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
+    put_banner(
+        [C],
+        ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
 
-  print >>C, "#include \"src/core/lib/debug/stats_data.h\""
-  print >>C, "#include \"src/core/lib/debug/stats.h\""
-  print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\""
-  print >>C, "#include <grpc/support/useful.h>"
+    print >> C, "#include \"src/core/lib/debug/stats_data.h\""
+    print >> C, "#include \"src/core/lib/debug/stats.h\""
+    print >> C, "#include \"src/core/lib/iomgr/exec_ctx.h\""
+    print >> C, "#include <grpc/support/useful.h>"
 
-  histo_code = []
-  for histogram in inst_map['Histogram']:
-    code, bounds_idx = gen_bucket_code(histogram)
-    histo_bucket_boundaries.append(bounds_idx)
-    histo_code.append(code)
+    histo_code = []
+    for histogram in inst_map['Histogram']:
+        code, bounds_idx = gen_bucket_code(histogram)
+        histo_bucket_boundaries.append(bounds_idx)
+        histo_code.append(code)
 
-  for typename, instances in sorted(inst_map.items()):
-    print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % (
-        typename.lower(), typename.upper())
-    for inst in instances:
-      print >>C, "  %s," % c_str(inst.name)
-    print >>C, "};"
-    print >>C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % (
-        typename.lower(), typename.upper())
-    for inst in instances:
-      print >>C, "  %s," % c_str(inst.doc)
-    print >>C, "};"
+    for typename, instances in sorted(inst_map.items()):
+        print >> C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % (
+            typename.lower(), typename.upper())
+        for inst in instances:
+            print >> C, "  %s," % c_str(inst.name)
+        print >> C, "};"
+        print >> C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % (
+            typename.lower(), typename.upper())
+        for inst in instances:
+            print >> C, "  %s," % c_str(inst.doc)
+        print >> C, "};"
 
-  for i, tbl in enumerate(static_tables):
-    print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % (
-        tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1]))
+    for i, tbl in enumerate(static_tables):
+        print >> C, "const %s grpc_stats_table_%d[%d] = {%s};" % (
+            tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1]))
 
-  for histogram, code in zip(inst_map['Histogram'], histo_code):
-    print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int value) {%s}") % (
-                histogram.name.lower(),
-                code)
+    for histogram, code in zip(inst_map['Histogram'], histo_code):
+        print >> C, ("void grpc_stats_inc_%s(int value) {%s}") % (
+            histogram.name.lower(), code)
 
-  print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % (
-      len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets))
-  print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % (
-      len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
-  print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
-      len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries))
-  print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x) = {%s};" % (
-      len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram']))
+    print >> C, "const int grpc_stats_histo_buckets[%d] = {%s};" % (
+        len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets))
+    print >> C, "const int grpc_stats_histo_start[%d] = {%s};" % (
+        len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
+    print >> C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
+        len(inst_map['Histogram']), ','.join(
+            'grpc_stats_table_%d' % x for x in histo_bucket_boundaries))
+    print >> C, "void (*const grpc_stats_inc_histogram[%d])(int x) = {%s};" % (
+        len(inst_map['Histogram']), ','.join(
+            'grpc_stats_inc_%s' % histogram.name.lower()
+            for histogram in inst_map['Histogram']))
 
 # patch qps_test bigquery schema
 RECORD_EXPLICIT_PERCENTILES = [50, 95, 99]
 
 with open('tools/run_tests/performance/scenario_result_schema.json', 'r') as f:
-  qps_schema = json.loads(f.read())
+    qps_schema = json.loads(f.read())
+
 
 def FindNamed(js, name):
-  for el in js:
-    if el['name'] == name:
-      return el
+    for el in js:
+        if el['name'] == name:
+            return el
+
 
 def RemoveCoreFields(js):
-  new_fields = []
-  for field in js['fields']:
-    if not field['name'].startswith('core_'):
-      new_fields.append(field)
-  js['fields'] = new_fields
+    new_fields = []
+    for field in js['fields']:
+        if not field['name'].startswith('core_'):
+            new_fields.append(field)
+    js['fields'] = new_fields
+
 
 RemoveCoreFields(FindNamed(qps_schema, 'clientStats'))
 RemoveCoreFields(FindNamed(qps_schema, 'serverStats'))
 
+
 def AddCoreFields(js):
-  for counter in inst_map['Counter']:
-    js['fields'].append({
-      'name': 'core_%s' % counter.name,
-      'type': 'INTEGER',
-      'mode': 'NULLABLE'
-    })
-  for histogram in inst_map['Histogram']:
-    js['fields'].append({
-      'name': 'core_%s' % histogram.name,
-      'type': 'STRING',
-      'mode': 'NULLABLE'
-    })
-    js['fields'].append({
-      'name': 'core_%s_bkts' % histogram.name,
-      'type': 'STRING',
-      'mode': 'NULLABLE'
-    })
-    for pctl in RECORD_EXPLICIT_PERCENTILES:
-      js['fields'].append({
-        'name': 'core_%s_%dp' % (histogram.name, pctl),
-        'type': 'FLOAT',
-        'mode': 'NULLABLE'
-      })
+    for counter in inst_map['Counter']:
+        js['fields'].append({
+            'name': 'core_%s' % counter.name,
+            'type': 'INTEGER',
+            'mode': 'NULLABLE'
+        })
+    for histogram in inst_map['Histogram']:
+        js['fields'].append({
+            'name': 'core_%s' % histogram.name,
+            'type': 'STRING',
+            'mode': 'NULLABLE'
+        })
+        js['fields'].append({
+            'name': 'core_%s_bkts' % histogram.name,
+            'type': 'STRING',
+            'mode': 'NULLABLE'
+        })
+        for pctl in RECORD_EXPLICIT_PERCENTILES:
+            js['fields'].append({
+                'name': 'core_%s_%dp' % (histogram.name, pctl),
+                'type': 'FLOAT',
+                'mode': 'NULLABLE'
+            })
+
 
 AddCoreFields(FindNamed(qps_schema, 'clientStats'))
 AddCoreFields(FindNamed(qps_schema, 'serverStats'))
 
 with open('tools/run_tests/performance/scenario_result_schema.json', 'w') as f:
-  f.write(json.dumps(qps_schema, indent=2, sort_keys=True))
+    f.write(json.dumps(qps_schema, indent=2, sort_keys=True))
 
 # and generate a helper script to massage scenario results into the format we'd
 # like to query
 with open('tools/run_tests/performance/massage_qps_stats.py', 'w') as P:
-  with open(sys.argv[0]) as my_source:
-    for line in my_source:
-      if line[0] != '#': break
-    for line in my_source:
-      if line[0] == '#':
-        print >>P, line.rstrip()
-        break
-    for line in my_source:
-      if line[0] != '#':
-        break
-      print >>P, line.rstrip()
+    with open(sys.argv[0]) as my_source:
+        for line in my_source:
+            if line[0] != '#': break
+        for line in my_source:
+            if line[0] == '#':
+                print >> P, line.rstrip()
+                break
+        for line in my_source:
+            if line[0] != '#':
+                break
+            print >> P, line.rstrip()
 
-  print >>P
-  print >>P, '# Autogenerated by tools/codegen/core/gen_stats_data.py'
-  print >>P
+    print >> P
+    print >> P, '# Autogenerated by tools/codegen/core/gen_stats_data.py'
+    print >> P
 
-  print >>P, 'import massage_qps_stats_helpers'
+    print >> P, 'import massage_qps_stats_helpers'
 
-  print >>P, 'def massage_qps_stats(scenario_result):'
-  print >>P, '  for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:'
-  print >>P, '    if "coreStats" not in stats: return'
-  print >>P, '    core_stats = stats["coreStats"]'
-  print >>P, '    del stats["coreStats"]'
-  for counter in inst_map['Counter']:
-    print >>P, '    stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (counter.name, counter.name)
-  for i, histogram in enumerate(inst_map['Histogram']):
-    print >>P, '    h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name
-    print >>P, '    stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name
-    print >>P, '    stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name
-    for pctl in RECORD_EXPLICIT_PERCENTILES:
-      print >>P, '    stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % (
-          histogram.name, pctl, pctl)
+    print >> P, 'def massage_qps_stats(scenario_result):'
+    print >> P, '  for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:'
+    print >> P, '    if "coreStats" not in stats: return'
+    print >> P, '    core_stats = stats["coreStats"]'
+    print >> P, '    del stats["coreStats"]'
+    for counter in inst_map['Counter']:
+        print >> P, '    stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (
+            counter.name, counter.name)
+    for i, histogram in enumerate(inst_map['Histogram']):
+        print >> P, '    h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name
+        print >> P, '    stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name
+        print >> P, '    stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name
+        for pctl in RECORD_EXPLICIT_PERCENTILES:
+            print >> P, '    stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % (
+                histogram.name, pctl, pctl)
 
 with open('src/core/lib/debug/stats_data_bq_schema.sql', 'w') as S:
-  columns = []
-  for counter in inst_map['Counter']:
-    columns.append(('%s_per_iteration' % counter.name, 'FLOAT'))
-  print >>S, ',\n'.join('%s:%s' % x for x in columns)
-
+    columns = []
+    for counter in inst_map['Counter']:
+        columns.append(('%s_per_iteration' % counter.name, 'FLOAT'))
+    print >> S, ',\n'.join('%s:%s' % x for x in columns)
diff --git a/tools/debug/core/chttp2_ref_leak.py b/tools/debug/core/chttp2_ref_leak.py
index d693dd9..a6a5448 100755
--- a/tools/debug/core/chttp2_ref_leak.py
+++ b/tools/debug/core/chttp2_ref_leak.py
@@ -20,8 +20,10 @@
 import sys
 import re
 
+
 def new_obj():
-  return ['destroy']
+    return ['destroy']
+
 
 outstanding = collections.defaultdict(new_obj)
 
@@ -29,14 +31,14 @@
 # chttp2:unref:0x629000005200 2->1 destroy [src/core/ext/transport/chttp2/transport/chttp2_transport.c:599]
 
 for line in sys.stdin:
-  m = re.search(r'chttp2:(  ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
-  if m:
-    if m.group(1) == '  ref':
-      outstanding[m.group(2)].append(m.group(3))
-    else:
-      outstanding[m.group(2)].remove(m.group(3))
+    m = re.search(
+        r'chttp2:(  ref|unref):0x([a-fA-F0-9]+) [^ ]+ ([^[]+) \[(.*)\]', line)
+    if m:
+        if m.group(1) == '  ref':
+            outstanding[m.group(2)].append(m.group(3))
+        else:
+            outstanding[m.group(2)].remove(m.group(3))
 
 for obj, remaining in outstanding.items():
-  if remaining:
-    print 'LEAKED: %s %r' % (obj, remaining)
-
+    if remaining:
+        print 'LEAKED: %s %r' % (obj, remaining)
diff --git a/tools/debug/core/error_ref_leak.py b/tools/debug/core/error_ref_leak.py
index 6582328..7e206c2 100644
--- a/tools/debug/core/error_ref_leak.py
+++ b/tools/debug/core/error_ref_leak.py
@@ -26,22 +26,22 @@
 
 errs = []
 for line in data:
-  # if we care about the line
-  if re.search(r'error.cc', line):
-    # str manip to cut off left part of log line
-    line = line.partition('error.cc:')[-1]
-    line = re.sub(r'\d+] ', r'', line)
-    line = line.strip().split()
-    err = line[0].strip(":")
-    if line[1] == "create":
-      assert(err not in errs)
-      errs.append(err)
-    elif line[0] == "realloc":
-      errs.remove(line[1])
-      errs.append(line[3])
-    # explicitly look for the last dereference 
-    elif line[1] == "1" and line[3] == "0":
-      assert(err in errs)
-      errs.remove(err)
+    # if we care about the line
+    if re.search(r'error.cc', line):
+        # str manip to cut off left part of log line
+        line = line.partition('error.cc:')[-1]
+        line = re.sub(r'\d+] ', r'', line)
+        line = line.strip().split()
+        err = line[0].strip(":")
+        if line[1] == "create":
+            assert (err not in errs)
+            errs.append(err)
+        elif line[0] == "realloc":
+            errs.remove(line[1])
+            errs.append(line[3])
+        # explicitly look for the last dereference
+        elif line[1] == "1" and line[3] == "0":
+            assert (err in errs)
+            errs.remove(err)
 
 print "leaked:", errs
diff --git a/tools/distrib/c-ish/check_documentation.py b/tools/distrib/c-ish/check_documentation.py
index 24da005..fef8f4e 100755
--- a/tools/distrib/c-ish/check_documentation.py
+++ b/tools/distrib/c-ish/check_documentation.py
@@ -22,24 +22,15 @@
 
 # where do we run
 _TARGET_DIRS = [
-  'include/grpc',
-  'include/grpc++',
-  'src/core',
-  'src/cpp',
-  'test/core',
-  'test/cpp'
+    'include/grpc', 'include/grpc++', 'src/core', 'src/cpp', 'test/core',
+    'test/cpp'
 ]
 
 # which file extensions do we care about
-_INTERESTING_EXTENSIONS = [
-  '.c',
-  '.h',
-  '.cc'
-]
+_INTERESTING_EXTENSIONS = ['.c', '.h', '.cc']
 
 # find our home
-_ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
+_ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
 os.chdir(_ROOT)
 
 errors = 0
@@ -47,30 +38,30 @@
 # walk directories, find things
 printed_banner = False
 for target_dir in _TARGET_DIRS:
-  for root, dirs, filenames in os.walk(target_dir):
-    if 'README.md' not in filenames:
-      if not printed_banner:
-        print 'Missing README.md'
-        print '================='
-        printed_banner = True
-      print root
-      errors += 1
+    for root, dirs, filenames in os.walk(target_dir):
+        if 'README.md' not in filenames:
+            if not printed_banner:
+                print 'Missing README.md'
+                print '================='
+                printed_banner = True
+            print root
+            errors += 1
 if printed_banner: print
 printed_banner = False
 for target_dir in _TARGET_DIRS:
-  for root, dirs, filenames in os.walk(target_dir):
-    for filename in filenames:
-      if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS:
-        continue
-      path = os.path.join(root, filename)
-      with open(path) as f:
-        contents = f.read()
-      if '\\file' not in contents:
-        if not printed_banner:
-          print 'Missing \\file comment'
-          print '======================'
-          printed_banner = True
-        print path
-        errors += 1
+    for root, dirs, filenames in os.walk(target_dir):
+        for filename in filenames:
+            if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS:
+                continue
+            path = os.path.join(root, filename)
+            with open(path) as f:
+                contents = f.read()
+            if '\\file' not in contents:
+                if not printed_banner:
+                    print 'Missing \\file comment'
+                    print '======================'
+                    printed_banner = True
+                print path
+                errors += 1
 
 assert errors == 0, 'error count = %d' % errors
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index 6ecaced..8f782e0 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -22,149 +22,137 @@
 import subprocess
 
 # find our home
-ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(ROOT)
 
 # parse command line
 argp = argparse.ArgumentParser(description='copyright checker')
-argp.add_argument('-o', '--output',
-                  default='details',
-                  choices=['list', 'details'])
-argp.add_argument('-s', '--skips',
-                  default=0,
-                  action='store_const',
-                  const=1)
-argp.add_argument('-a', '--ancient',
-                  default=0,
-                  action='store_const',
-                  const=1)
-argp.add_argument('--precommit',
-                  default=False,
-                  action='store_true')
+argp.add_argument(
+    '-o', '--output', default='details', choices=['list', 'details'])
+argp.add_argument('-s', '--skips', default=0, action='store_const', const=1)
+argp.add_argument('-a', '--ancient', default=0, action='store_const', const=1)
+argp.add_argument('--precommit', default=False, action='store_true')
 args = argp.parse_args()
 
 # open the license text
 with open('NOTICE.txt') as f:
-  LICENSE_NOTICE = f.read().splitlines()
-
+    LICENSE_NOTICE = f.read().splitlines()
 
 # license format by file extension
 # key is the file extension, value is a format string
 # that given a line of license text, returns what should
 # be in the file
 LICENSE_PREFIX = {
-  '.bat':       r'@rem\s*',
-  '.c':         r'\s*(?://|\*)\s*',
-  '.cc':        r'\s*(?://|\*)\s*',
-  '.h':         r'\s*(?://|\*)\s*',
-  '.m':         r'\s*\*\s*',
-  '.php':       r'\s*\*\s*',
-  '.js':        r'\s*\*\s*',
-  '.py':        r'#\s*',
-  '.pyx':       r'#\s*',
-  '.pxd':       r'#\s*',
-  '.pxi':       r'#\s*',
-  '.rb':        r'#\s*',
-  '.sh':        r'#\s*',
-  '.proto':     r'//\s*',
-  '.cs':        r'//\s*',
-  '.mak':       r'#\s*',
-  'Makefile':   r'#\s*',
-  'Dockerfile': r'#\s*',
-  'BUILD':      r'#\s*',
+    '.bat': r'@rem\s*',
+    '.c': r'\s*(?://|\*)\s*',
+    '.cc': r'\s*(?://|\*)\s*',
+    '.h': r'\s*(?://|\*)\s*',
+    '.m': r'\s*\*\s*',
+    '.php': r'\s*\*\s*',
+    '.js': r'\s*\*\s*',
+    '.py': r'#\s*',
+    '.pyx': r'#\s*',
+    '.pxd': r'#\s*',
+    '.pxi': r'#\s*',
+    '.rb': r'#\s*',
+    '.sh': r'#\s*',
+    '.proto': r'//\s*',
+    '.cs': r'//\s*',
+    '.mak': r'#\s*',
+    'Makefile': r'#\s*',
+    'Dockerfile': r'#\s*',
+    'BUILD': r'#\s*',
 }
 
 _EXEMPT = frozenset((
-  # Generated protocol compiler output.
-  'examples/python/helloworld/helloworld_pb2.py',
-  'examples/python/helloworld/helloworld_pb2_grpc.py',
-  'examples/python/multiplex/helloworld_pb2.py',
-  'examples/python/multiplex/helloworld_pb2_grpc.py',
-  'examples/python/multiplex/route_guide_pb2.py',
-  'examples/python/multiplex/route_guide_pb2_grpc.py',
-  'examples/python/route_guide/route_guide_pb2.py',
-  'examples/python/route_guide/route_guide_pb2_grpc.py',
+    # Generated protocol compiler output.
+    'examples/python/helloworld/helloworld_pb2.py',
+    'examples/python/helloworld/helloworld_pb2_grpc.py',
+    'examples/python/multiplex/helloworld_pb2.py',
+    'examples/python/multiplex/helloworld_pb2_grpc.py',
+    'examples/python/multiplex/route_guide_pb2.py',
+    'examples/python/multiplex/route_guide_pb2_grpc.py',
+    'examples/python/route_guide/route_guide_pb2.py',
+    'examples/python/route_guide/route_guide_pb2_grpc.py',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
+    'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
+    'src/cpp/server/health/health.pb.h',
+    'src/cpp/server/health/health.pb.c',
 
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
-  'src/core/ext/filters/client_channel/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
-  'src/cpp/server/health/health.pb.h',
-  'src/cpp/server/health/health.pb.c',
-
-  # An older file originally from outside gRPC.
-  'src/php/tests/bootstrap.php',
-  # census.proto copied from github
-  'tools/grpcz/census.proto',
-  # status.proto copied from googleapis
-  'src/proto/grpc/status/status.proto',
+    # An older file originally from outside gRPC.
+    'src/php/tests/bootstrap.php',
+    # census.proto copied from github
+    'tools/grpcz/census.proto',
+    # status.proto copied from googleapis
+    'src/proto/grpc/status/status.proto',
 ))
 
-
 RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+) gRPC authors.'
 RE_LICENSE = dict(
-    (k, r'\n'.join(
-        LICENSE_PREFIX[k] +
-        (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
-        for line in LICENSE_NOTICE))
-     for k, v in LICENSE_PREFIX.iteritems())
+    (k, r'\n'.join(LICENSE_PREFIX[k] +
+                   (RE_YEAR if re.search(RE_YEAR, line) else re.escape(line))
+                   for line in LICENSE_NOTICE))
+    for k, v in LICENSE_PREFIX.iteritems())
 
 if args.precommit:
-  FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
+    FILE_LIST_COMMAND = 'git status -z | grep -Poz \'(?<=^[MARC][MARCD ] )[^\s]+\''
 else:
-  FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | ' \
-                      'grep -v ^third_party/ |' \
-                      'grep -v "\(ares_config.h\|ares_build.h\)"'
+    FILE_LIST_COMMAND = 'git ls-tree -r --name-only -r HEAD | ' \
+                        'grep -v ^third_party/ |' \
+                        'grep -v "\(ares_config.h\|ares_build.h\)"'
+
 
 def load(name):
-  with open(name) as f:
-    return f.read()
+    with open(name) as f:
+        return f.read()
+
 
 def save(name, text):
-  with open(name, 'w') as f:
-    f.write(text)
+    with open(name, 'w') as f:
+        f.write(text)
 
 
-assert(re.search(RE_LICENSE['Makefile'], load('Makefile')))
+assert (re.search(RE_LICENSE['Makefile'], load('Makefile')))
 
 
 def log(cond, why, filename):
-  if not cond: return
-  if args.output == 'details':
-    print '%s: %s' % (why, filename)
-  else:
-    print filename
+    if not cond: return
+    if args.output == 'details':
+        print '%s: %s' % (why, filename)
+    else:
+        print filename
 
 
 # scan files, validate the text
 ok = True
 filename_list = []
 try:
-  filename_list = subprocess.check_output(FILE_LIST_COMMAND,
-                                          shell=True).splitlines()
+    filename_list = subprocess.check_output(
+        FILE_LIST_COMMAND, shell=True).splitlines()
 except subprocess.CalledProcessError:
-  sys.exit(0)
+    sys.exit(0)
 
 for filename in filename_list:
-  if filename in _EXEMPT:
-    continue
-  ext = os.path.splitext(filename)[1]
-  base = os.path.basename(filename)
-  if ext in RE_LICENSE:
-    re_license = RE_LICENSE[ext]
-  elif base in RE_LICENSE:
-    re_license = RE_LICENSE[base]
-  else:
-    log(args.skips, 'skip', filename)
-    continue
-  try:
-    text = load(filename)
-  except:
-    continue
-  m = re.search(re_license, text)
-  if m:
-    pass
-  elif 'DO NOT EDIT' not in text and filename != 'src/boringssl/err_data.c':
-    log(1, 'copyright missing', filename)
-    ok = False
+    if filename in _EXEMPT:
+        continue
+    ext = os.path.splitext(filename)[1]
+    base = os.path.basename(filename)
+    if ext in RE_LICENSE:
+        re_license = RE_LICENSE[ext]
+    elif base in RE_LICENSE:
+        re_license = RE_LICENSE[base]
+    else:
+        log(args.skips, 'skip', filename)
+        continue
+    try:
+        text = load(filename)
+    except:
+        continue
+    m = re.search(re_license, text)
+    if m:
+        pass
+    elif 'DO NOT EDIT' not in text and filename != 'src/boringssl/err_data.c':
+        log(1, 'copyright missing', filename)
+        ok = False
 
 sys.exit(0 if ok else 1)
diff --git a/tools/distrib/check_include_guards.py b/tools/distrib/check_include_guards.py
index e46f1c9..6fc606f 100755
--- a/tools/distrib/check_include_guards.py
+++ b/tools/distrib/check_include_guards.py
@@ -23,136 +23,136 @@
 
 
 def build_valid_guard(fpath):
-  prefix = 'GRPC_' if not fpath.startswith('include/') else ''
-  return prefix + '_'.join(fpath.replace('++', 'XX').replace('.', '_').upper().split('/')[1:])
+    prefix = 'GRPC_' if not fpath.startswith('include/') else ''
+    return prefix + '_'.join(
+        fpath.replace('++', 'XX').replace('.', '_').upper().split('/')[1:])
 
 
 def load(fpath):
-  with open(fpath, 'r') as f:
-    return f.read()
+    with open(fpath, 'r') as f:
+        return f.read()
 
 
 def save(fpath, contents):
-  with open(fpath, 'w') as f:
-    f.write(contents)
+    with open(fpath, 'w') as f:
+        f.write(contents)
 
 
 class GuardValidator(object):
-  def __init__(self):
-    self.ifndef_re = re.compile(r'#ifndef ([A-Z][A-Z_1-9]*)')
-    self.define_re = re.compile(r'#define ([A-Z][A-Z_1-9]*)')
-    self.endif_c_re = re.compile(r'#endif /\* ([A-Z][A-Z_1-9]*) (?:\\ *\n *)?\*/')
-    self.endif_cpp_re = re.compile(r'#endif  // ([A-Z][A-Z_1-9]*)')
-    self.failed = False
 
-  def fail(self, fpath, regexp, fcontents, match_txt, correct, fix):
-    cpp_header = 'grpc++' in fpath
-    self.failed = True
-    invalid_guards_msg_template = (
-        '{0}: Missing preprocessor guards (RE {1}). '
-        'Please wrap your code around the following guards:\n'
-        '#ifndef {2}\n'
-        '#define {2}\n'
-        '...\n'
-        '... epic code ...\n'
-        '...\n') + ('#endif  // {2}' if cpp_header else '#endif /* {2} */')
-    if not match_txt:
-      print invalid_guards_msg_template.format(fpath, regexp.pattern,
-                                               build_valid_guard(fpath))
-      return fcontents
-
-    print ('{}: Wrong preprocessor guards (RE {}):'
-           '\n\tFound {}, expected {}').format(
-        fpath, regexp.pattern, match_txt, correct)
-    if fix:
-      print 'Fixing {}...\n'.format(fpath)
-      fixed_fcontents = re.sub(match_txt, correct, fcontents)
-      if fixed_fcontents:
+    def __init__(self):
+        self.ifndef_re = re.compile(r'#ifndef ([A-Z][A-Z_1-9]*)')
+        self.define_re = re.compile(r'#define ([A-Z][A-Z_1-9]*)')
+        self.endif_c_re = re.compile(
+            r'#endif /\* ([A-Z][A-Z_1-9]*) (?:\\ *\n *)?\*/')
+        self.endif_cpp_re = re.compile(r'#endif  // ([A-Z][A-Z_1-9]*)')
         self.failed = False
-      return fixed_fcontents
-    else:
-      print
-    return fcontents
 
-  def check(self, fpath, fix):
-    cpp_header = 'grpc++' in fpath
-    valid_guard = build_valid_guard(fpath)
+    def fail(self, fpath, regexp, fcontents, match_txt, correct, fix):
+        cpp_header = 'grpc++' in fpath
+        self.failed = True
+        invalid_guards_msg_template = (
+            '{0}: Missing preprocessor guards (RE {1}). '
+            'Please wrap your code around the following guards:\n'
+            '#ifndef {2}\n'
+            '#define {2}\n'
+            '...\n'
+            '... epic code ...\n'
+            '...\n') + ('#endif  // {2}' if cpp_header else '#endif /* {2} */')
+        if not match_txt:
+            print invalid_guards_msg_template.format(fpath, regexp.pattern,
+                                                     build_valid_guard(fpath))
+            return fcontents
 
-    fcontents = load(fpath)
-
-    match = self.ifndef_re.search(fcontents)
-    if not match:
-      print 'something drastically wrong with: %s' % fpath
-      return False # failed
-    if match.lastindex is None:
-      # No ifndef. Request manual addition with hints
-      self.fail(fpath, match.re, match.string, '', '', False)
-      return False  # failed
-
-    # Does the guard end with a '_H'?
-    running_guard = match.group(1)
-    if not running_guard.endswith('_H'):
-      fcontents = self.fail(fpath, match.re, match.string, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
-
-    # Is it the expected one based on the file path?
-    if running_guard != valid_guard:
-      fcontents = self.fail(fpath, match.re, match.string, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
-
-    # Is there a #define? Is it the same as the #ifndef one?
-    match = self.define_re.search(fcontents)
-    if match.lastindex is None:
-      # No define. Request manual addition with hints
-      self.fail(fpath, match.re, match.string, '', '', False)
-      return False  # failed
-
-    # Is the #define guard the same as the #ifndef guard?
-    if match.group(1) != running_guard:
-      fcontents = self.fail(fpath, match.re, match.string, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
-
-    # Is there a properly commented #endif?
-    endif_re = self.endif_cpp_re if cpp_header else self.endif_c_re
-    flines = fcontents.rstrip().splitlines()
-    match = endif_re.search('\n'.join(flines[-2:]))
-    if not match:
-      # No endif. Check if we have the last line as just '#endif' and if so
-      # replace it with a properly commented one.
-      if flines[-1] == '#endif':
-        flines[-1] = ('#endif' +
-                      ('  // {}\n'.format(valid_guard) if cpp_header
-                       else ' /* {} */\n'.format(valid_guard)))
+        print('{}: Wrong preprocessor guards (RE {}):'
+              '\n\tFound {}, expected {}').format(fpath, regexp.pattern,
+                                                  match_txt, correct)
         if fix:
-            fcontents = '\n'.join(flines)
-            save(fpath, fcontents)
-      else:
-        # something else is wrong, bail out
-        self.fail(fpath, endif_re, flines[-1], '', '', False)
-    elif match.group(1) != running_guard:
-      # Is the #endif guard the same as the #ifndef and #define guards?
-      fcontents = self.fail(fpath, endif_re, fcontents, match.group(1),
-                            valid_guard, fix)
-      if fix: save(fpath, fcontents)
+            print 'Fixing {}...\n'.format(fpath)
+            fixed_fcontents = re.sub(match_txt, correct, fcontents)
+            if fixed_fcontents:
+                self.failed = False
+            return fixed_fcontents
+        else:
+            print
+        return fcontents
 
-    return not self.failed  # Did the check succeed? (ie, not failed)
+    def check(self, fpath, fix):
+        cpp_header = 'grpc++' in fpath
+        valid_guard = build_valid_guard(fpath)
+
+        fcontents = load(fpath)
+
+        match = self.ifndef_re.search(fcontents)
+        if not match:
+            print 'something drastically wrong with: %s' % fpath
+            return False  # failed
+        if match.lastindex is None:
+            # No ifndef. Request manual addition with hints
+            self.fail(fpath, match.re, match.string, '', '', False)
+            return False  # failed
+
+        # Does the guard end with a '_H'?
+        running_guard = match.group(1)
+        if not running_guard.endswith('_H'):
+            fcontents = self.fail(fpath, match.re, match.string, match.group(1),
+                                  valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        # Is it the expected one based on the file path?
+        if running_guard != valid_guard:
+            fcontents = self.fail(fpath, match.re, match.string, match.group(1),
+                                  valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        # Is there a #define? Is it the same as the #ifndef one?
+        match = self.define_re.search(fcontents)
+        if match.lastindex is None:
+            # No define. Request manual addition with hints
+            self.fail(fpath, match.re, match.string, '', '', False)
+            return False  # failed
+
+        # Is the #define guard the same as the #ifndef guard?
+        if match.group(1) != running_guard:
+            fcontents = self.fail(fpath, match.re, match.string, match.group(1),
+                                  valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        # Is there a properly commented #endif?
+        endif_re = self.endif_cpp_re if cpp_header else self.endif_c_re
+        flines = fcontents.rstrip().splitlines()
+        match = endif_re.search('\n'.join(flines[-2:]))
+        if not match:
+            # No endif. Check if we have the last line as just '#endif' and if so
+            # replace it with a properly commented one.
+            if flines[-1] == '#endif':
+                flines[-1] = (
+                    '#endif' +
+                    ('  // {}\n'.format(valid_guard)
+                     if cpp_header else ' /* {} */\n'.format(valid_guard)))
+                if fix:
+                    fcontents = '\n'.join(flines)
+                    save(fpath, fcontents)
+            else:
+                # something else is wrong, bail out
+                self.fail(fpath, endif_re, flines[-1], '', '', False)
+        elif match.group(1) != running_guard:
+            # Is the #endif guard the same as the #ifndef and #define guards?
+            fcontents = self.fail(fpath, endif_re, fcontents, match.group(1),
+                                  valid_guard, fix)
+            if fix: save(fpath, fcontents)
+
+        return not self.failed  # Did the check succeed? (ie, not failed)
+
 
 # find our home
-ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(sys.argv[0]), '../..'))
+ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(ROOT)
 
 # parse command line
 argp = argparse.ArgumentParser(description='include guard checker')
-argp.add_argument('-f', '--fix',
-                  default=False,
-                  action='store_true');
-argp.add_argument('--precommit',
-                  default=False,
-                  action='store_true')
+argp.add_argument('-f', '--fix', default=False, action='store_true')
+argp.add_argument('--precommit', default=False, action='store_true')
 args = argp.parse_args()
 
 KNOWN_BAD = set([
@@ -161,12 +161,11 @@
     'include/grpc++/ext/reflection.pb.h',
 ])
 
-
 grep_filter = r"grep -E '^(include|src/core)/.*\.h$'"
 if args.precommit:
-  git_command = 'git diff --name-only HEAD'
+    git_command = 'git diff --name-only HEAD'
 else:
-  git_command = 'git ls-tree -r --name-only -r HEAD'
+    git_command = 'git ls-tree -r --name-only -r HEAD'
 
 FILE_LIST_COMMAND = ' | '.join((git_command, grep_filter))
 
@@ -174,17 +173,17 @@
 ok = True
 filename_list = []
 try:
-  filename_list = subprocess.check_output(FILE_LIST_COMMAND,
-                                          shell=True).splitlines()
-  # Filter out non-existent files (ie, file removed or renamed)
-  filename_list = (f for f in filename_list if os.path.isfile(f))
+    filename_list = subprocess.check_output(
+        FILE_LIST_COMMAND, shell=True).splitlines()
+    # Filter out non-existent files (ie, file removed or renamed)
+    filename_list = (f for f in filename_list if os.path.isfile(f))
 except subprocess.CalledProcessError:
-  sys.exit(0)
+    sys.exit(0)
 
 validator = GuardValidator()
 
 for filename in filename_list:
-  if filename in KNOWN_BAD: continue
-  ok = ok and validator.check(filename, args.fix)
+    if filename in KNOWN_BAD: continue
+    ok = ok and validator.check(filename, args.fix)
 
 sys.exit(0 if ok else 1)
diff --git a/tools/distrib/python/check_grpcio_tools.py b/tools/distrib/python/check_grpcio_tools.py
index b56ccae..2363017 100755
--- a/tools/distrib/python/check_grpcio_tools.py
+++ b/tools/distrib/python/check_grpcio_tools.py
@@ -23,12 +23,11 @@
 submodule_commit_hash = _make.protobuf_submodule_commit_hash()
 
 with open(_make.GRPC_PYTHON_PROTOC_LIB_DEPS, 'r') as _protoc_lib_deps_file:
-  content = _protoc_lib_deps_file.read().splitlines()
+    content = _protoc_lib_deps_file.read().splitlines()
 
-testString = (_make.COMMIT_HASH_PREFIX +
-              submodule_commit_hash +
-              _make.COMMIT_HASH_SUFFIX)
+testString = (
+    _make.COMMIT_HASH_PREFIX + submodule_commit_hash + _make.COMMIT_HASH_SUFFIX)
 
 if testString not in content:
-  print(OUT_OF_DATE_MESSAGE.format(_make.GRPC_PYTHON_PROTOC_LIB_DEPS))
-  raise SystemExit(1)
+    print(OUT_OF_DATE_MESSAGE.format(_make.GRPC_PYTHON_PROTOC_LIB_DEPS))
+    raise SystemExit(1)
diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py
index 1822e51..4d6fcb5 100755
--- a/tools/distrib/python/docgen.py
+++ b/tools/distrib/python/docgen.py
@@ -24,14 +24,20 @@
 import tempfile
 
 parser = argparse.ArgumentParser()
-parser.add_argument('--config', metavar='c', type=str, nargs=1,
-                    help='GRPC/GPR libraries build configuration',
-                    default='opt')
+parser.add_argument(
+    '--config',
+    metavar='c',
+    type=str,
+    nargs=1,
+    help='GRPC/GPR libraries build configuration',
+    default='opt')
 parser.add_argument('--submit', action='store_true')
 parser.add_argument('--gh-user', type=str, help='GitHub user to push as.')
-parser.add_argument('--gh-repo-owner', type=str,
-                    help=('Owner of the GitHub repository to be pushed; '
-                          'defaults to --gh-user.'))
+parser.add_argument(
+    '--gh-repo-owner',
+    type=str,
+    help=('Owner of the GitHub repository to be pushed; '
+          'defaults to --gh-user.'))
 parser.add_argument('--doc-branch', type=str)
 args = parser.parse_args()
 
@@ -59,60 +65,75 @@
 })
 
 subprocess_arguments_list = [
-    {'args': ['virtualenv', VIRTUALENV_DIR], 'env': environment},
-    {'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==9.0.1'],
-     'env': environment},
-    {'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH],
-     'env': environment},
-    {'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'build'], 'env': environment},
-    {'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'doc'], 'env': environment},
+    {
+        'args': ['virtualenv', VIRTUALENV_DIR],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PIP_PATH, 'install', '--upgrade', 'pip==9.0.1'],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PIP_PATH, 'install', '-r', REQUIREMENTS_PATH],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'build'],
+        'env': environment
+    },
+    {
+        'args': [VIRTUALENV_PYTHON_PATH, SETUP_PATH, 'doc'],
+        'env': environment
+    },
 ]
 
 for subprocess_arguments in subprocess_arguments_list:
-  print('Running command: {}'.format(subprocess_arguments['args']))
-  subprocess.check_call(**subprocess_arguments)
+    print('Running command: {}'.format(subprocess_arguments['args']))
+    subprocess.check_call(**subprocess_arguments)
 
 if args.submit:
-  assert args.gh_user
-  assert args.doc_branch
-  github_user = args.gh_user
-  github_repository_owner = (
-      args.gh_repo_owner if args.gh_repo_owner else args.gh_user)
-  # Create a temporary directory out of tree, checkout gh-pages from the
-  # specified repository, edit it, and push it. It's up to the user to then go
-  # onto GitHub and make a PR against grpc/grpc:gh-pages.
-  repo_parent_dir = tempfile.mkdtemp()
-  print('Documentation parent directory: {}'.format(repo_parent_dir))
-  repo_dir = os.path.join(repo_parent_dir, 'grpc')
-  python_doc_dir = os.path.join(repo_dir, 'python')
-  doc_branch = args.doc_branch
+    assert args.gh_user
+    assert args.doc_branch
+    github_user = args.gh_user
+    github_repository_owner = (args.gh_repo_owner
+                               if args.gh_repo_owner else args.gh_user)
+    # Create a temporary directory out of tree, checkout gh-pages from the
+    # specified repository, edit it, and push it. It's up to the user to then go
+    # onto GitHub and make a PR against grpc/grpc:gh-pages.
+    repo_parent_dir = tempfile.mkdtemp()
+    print('Documentation parent directory: {}'.format(repo_parent_dir))
+    repo_dir = os.path.join(repo_parent_dir, 'grpc')
+    python_doc_dir = os.path.join(repo_dir, 'python')
+    doc_branch = args.doc_branch
 
-  print('Cloning your repository...')
-  subprocess.check_call([
-          'git', 'clone', 'https://{}@github.com/{}/grpc'.format(
-              github_user, github_repository_owner)
-      ], cwd=repo_parent_dir)
-  subprocess.check_call([
-          'git', 'remote', 'add', 'upstream', 'https://github.com/grpc/grpc'
-      ], cwd=repo_dir)
-  subprocess.check_call(['git', 'fetch', 'upstream'], cwd=repo_dir)
-  subprocess.check_call([
-          'git', 'checkout', 'upstream/gh-pages', '-b', doc_branch
-      ], cwd=repo_dir)
-  print('Updating documentation...')
-  shutil.rmtree(python_doc_dir, ignore_errors=True)
-  shutil.copytree(DOC_PATH, python_doc_dir)
-  print('Attempting to push documentation...')
-  try:
-    subprocess.check_call(['git', 'add', '--all'], cwd=repo_dir)
-    subprocess.check_call([
-            'git', 'commit', '-m', 'Auto-update Python documentation'
-        ], cwd=repo_dir)
-    subprocess.check_call([
-            'git', 'push', '--set-upstream', 'origin', doc_branch
-        ], cwd=repo_dir)
-  except subprocess.CalledProcessError:
-    print('Failed to push documentation. Examine this directory and push '
-          'manually: {}'.format(repo_parent_dir))
-    sys.exit(1)
-  shutil.rmtree(repo_parent_dir)
+    print('Cloning your repository...')
+    subprocess.check_call(
+        [
+            'git', 'clone', 'https://{}@github.com/{}/grpc'.format(
+                github_user, github_repository_owner)
+        ],
+        cwd=repo_parent_dir)
+    subprocess.check_call(
+        ['git', 'remote', 'add', 'upstream', 'https://github.com/grpc/grpc'],
+        cwd=repo_dir)
+    subprocess.check_call(['git', 'fetch', 'upstream'], cwd=repo_dir)
+    subprocess.check_call(
+        ['git', 'checkout', 'upstream/gh-pages', '-b', doc_branch],
+        cwd=repo_dir)
+    print('Updating documentation...')
+    shutil.rmtree(python_doc_dir, ignore_errors=True)
+    shutil.copytree(DOC_PATH, python_doc_dir)
+    print('Attempting to push documentation...')
+    try:
+        subprocess.check_call(['git', 'add', '--all'], cwd=repo_dir)
+        subprocess.check_call(
+            ['git', 'commit', '-m', 'Auto-update Python documentation'],
+            cwd=repo_dir)
+        subprocess.check_call(
+            ['git', 'push', '--set-upstream', 'origin', doc_branch],
+            cwd=repo_dir)
+    except subprocess.CalledProcessError:
+        print('Failed to push documentation. Examine this directory and push '
+              'manually: {}'.format(repo_parent_dir))
+        sys.exit(1)
+    shutil.rmtree(repo_parent_dir)
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
index 1454b67..5772620 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/__init__.py
@@ -11,4 +11,3 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/command.py b/tools/distrib/python/grpcio_tools/grpc_tools/command.py
index 28be137..7ede05f 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/command.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/command.py
@@ -22,43 +22,44 @@
 
 
 def build_package_protos(package_root):
-  proto_files = []
-  inclusion_root = os.path.abspath(package_root)
-  for root, _, files in os.walk(inclusion_root):
-    for filename in files:
-      if filename.endswith('.proto'):
-        proto_files.append(os.path.abspath(os.path.join(root, filename)))
+    proto_files = []
+    inclusion_root = os.path.abspath(package_root)
+    for root, _, files in os.walk(inclusion_root):
+        for filename in files:
+            if filename.endswith('.proto'):
+                proto_files.append(
+                    os.path.abspath(os.path.join(root, filename)))
 
-  well_known_protos_include = pkg_resources.resource_filename(
-      'grpc_tools', '_proto')
+    well_known_protos_include = pkg_resources.resource_filename(
+        'grpc_tools', '_proto')
 
-  for proto_file in proto_files:
-    command = [
-        'grpc_tools.protoc',
-        '--proto_path={}'.format(inclusion_root),
-        '--proto_path={}'.format(well_known_protos_include),
-        '--python_out={}'.format(inclusion_root),
-        '--grpc_python_out={}'.format(inclusion_root),
-    ] + [proto_file]
-    if protoc.main(command) != 0:
-      sys.stderr.write('warning: {} failed'.format(command))
+    for proto_file in proto_files:
+        command = [
+            'grpc_tools.protoc',
+            '--proto_path={}'.format(inclusion_root),
+            '--proto_path={}'.format(well_known_protos_include),
+            '--python_out={}'.format(inclusion_root),
+            '--grpc_python_out={}'.format(inclusion_root),
+        ] + [proto_file]
+        if protoc.main(command) != 0:
+            sys.stderr.write('warning: {} failed'.format(command))
 
 
 class BuildPackageProtos(setuptools.Command):
-  """Command to generate project *_pb2.py modules from proto files."""
+    """Command to generate project *_pb2.py modules from proto files."""
 
-  description = 'build grpc protobuf modules'
-  user_options = []
+    description = 'build grpc protobuf modules'
+    user_options = []
 
-  def initialize_options(self):
-    pass
+    def initialize_options(self):
+        pass
 
-  def finalize_options(self):
-    pass
+    def finalize_options(self):
+        pass
 
-  def run(self):
-    # due to limitations of the proto generator, we require that only *one*
-    # directory is provided as an 'include' directory. We assume it's the '' key
-    # to `self.distribution.package_dir` (and get a key error if it's not
-    # there).
-    build_package_protos(self.distribution.package_dir[''])
+    def run(self):
+        # due to limitations of the proto generator, we require that only *one*
+        # directory is provided as an 'include' directory. We assume it's the '' key
+        # to `self.distribution.package_dir` (and get a key error if it's not
+        # there).
+        build_package_protos(self.distribution.package_dir[''])
diff --git a/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
index efad51e..582cba0 100644
--- a/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
+++ b/tools/distrib/python/grpcio_tools/grpc_tools/protoc.py
@@ -19,16 +19,18 @@
 
 from grpc_tools import _protoc_compiler
 
+
 def main(command_arguments):
-  """Run the protocol buffer compiler with the given command-line arguments.
+    """Run the protocol buffer compiler with the given command-line arguments.
 
   Args:
     command_arguments: a list of strings representing command line arguments to
         `protoc`.
   """
-  command_arguments = [argument.encode() for argument in command_arguments]
-  return _protoc_compiler.run_main(command_arguments)
+    command_arguments = [argument.encode() for argument in command_arguments]
+    return _protoc_compiler.run_main(command_arguments)
+
 
 if __name__ == '__main__':
-  proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
-  sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
+    proto_include = pkg_resources.resource_filename('grpc_tools', '_proto')
+    sys.exit(main(sys.argv + ['-I{}'.format(proto_include)]))
diff --git a/tools/distrib/python/grpcio_tools/grpc_version.py b/tools/distrib/python/grpcio_tools/grpc_version.py
index 2822a98..c4ed066 100644
--- a/tools/distrib/python/grpcio_tools/grpc_version.py
+++ b/tools/distrib/python/grpcio_tools/grpc_version.py
@@ -14,4 +14,4 @@
 
 # AUTO-GENERATED FROM `$REPO_ROOT/templates/tools/distrib/python/grpcio_tools/grpc_version.py.template`!!!
 
-VERSION='1.8.3'
+VERSION = '1.9.0.dev0'
diff --git a/tools/distrib/python/grpcio_tools/setup.py b/tools/distrib/python/grpcio_tools/setup.py
index 8d95cb5..c6bcee4 100644
--- a/tools/distrib/python/grpcio_tools/setup.py
+++ b/tools/distrib/python/grpcio_tools/setup.py
@@ -66,42 +66,42 @@
 EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
 EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
 if EXTRA_ENV_COMPILE_ARGS is None:
-  EXTRA_ENV_COMPILE_ARGS = '-std=c++11'
-  if 'win32' in sys.platform:
-    if sys.version_info < (3, 5):
-      # We use define flags here and don't directly add to DEFINE_MACROS below to
-      # ensure that the expert user/builder has a way of turning it off (via the
-      # envvars) without adding yet more GRPC-specific envvars.
-      # See https://sourceforge.net/p/mingw-w64/bugs/363/
-      if '32' in platform.architecture()[0]:
-        EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -D_hypot=hypot'
-      else:
-        EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot'
-    else:
-      # We need to statically link the C++ Runtime, only the C runtime is
-      # available dynamically
-      EXTRA_ENV_COMPILE_ARGS += ' /MT'
-  elif "linux" in sys.platform or "darwin" in sys.platform:
-    EXTRA_ENV_COMPILE_ARGS += ' -fno-wrapv -frtti'
+    EXTRA_ENV_COMPILE_ARGS = '-std=c++11'
+    if 'win32' in sys.platform:
+        if sys.version_info < (3, 5):
+            # We use define flags here and don't directly add to DEFINE_MACROS below to
+            # ensure that the expert user/builder has a way of turning it off (via the
+            # envvars) without adding yet more GRPC-specific envvars.
+            # See https://sourceforge.net/p/mingw-w64/bugs/363/
+            if '32' in platform.architecture()[0]:
+                EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s -D_hypot=hypot'
+            else:
+                EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64 -D_hypot=hypot'
+        else:
+            # We need to statically link the C++ Runtime, only the C runtime is
+            # available dynamically
+            EXTRA_ENV_COMPILE_ARGS += ' /MT'
+    elif "linux" in sys.platform or "darwin" in sys.platform:
+        EXTRA_ENV_COMPILE_ARGS += ' -fno-wrapv -frtti'
 if EXTRA_ENV_LINK_ARGS is None:
-  EXTRA_ENV_LINK_ARGS = ''
-  if "linux" in sys.platform or "darwin" in sys.platform:
-    EXTRA_ENV_LINK_ARGS += ' -lpthread'
-  elif "win32" in sys.platform and sys.version_info < (3, 5):
-    msvcr = cygwinccompiler.get_msvcr()[0]
-    # TODO(atash) sift through the GCC specs to see if libstdc++ can have any
-    # influence on the linkage outcome on MinGW for non-C++ programs.
-    EXTRA_ENV_LINK_ARGS += (
-        ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
-        '-static'.format(msvcr=msvcr))
+    EXTRA_ENV_LINK_ARGS = ''
+    if "linux" in sys.platform or "darwin" in sys.platform:
+        EXTRA_ENV_LINK_ARGS += ' -lpthread'
+    elif "win32" in sys.platform and sys.version_info < (3, 5):
+        msvcr = cygwinccompiler.get_msvcr()[0]
+        # TODO(atash) sift through the GCC specs to see if libstdc++ can have any
+        # influence on the linkage outcome on MinGW for non-C++ programs.
+        EXTRA_ENV_LINK_ARGS += (
+            ' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
+            '-static'.format(msvcr=msvcr))
 
 EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
 EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
 
-CC_FILES = [
-  os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
+CC_FILES = [os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
 PROTO_FILES = [
-  os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES]
+    os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES
+]
 CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE)
 PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE)
 
@@ -110,107 +110,116 @@
 
 DEFINE_MACROS = ()
 if "win32" in sys.platform:
-  DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
-  if '64bit' in platform.architecture()[0]:
-    DEFINE_MACROS += (('MS_WIN64', 1),)
+    DEFINE_MACROS += (('WIN32_LEAN_AND_MEAN', 1),)
+    if '64bit' in platform.architecture()[0]:
+        DEFINE_MACROS += (('MS_WIN64', 1),)
 elif "linux" in sys.platform or "darwin" in sys.platform:
-  DEFINE_MACROS += (('HAVE_PTHREAD', 1),)
+    DEFINE_MACROS += (('HAVE_PTHREAD', 1),)
 
 # By default, Python3 distutils enforces compatibility of
 # c plugins (.so files) with the OSX version Python3 was built with.
 # For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
 if 'darwin' in sys.platform and PY3:
-  mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
-  if mac_target and (pkg_resources.parse_version(mac_target) <
-		     pkg_resources.parse_version('10.9.0')):
-    os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
-    os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
-        r'macosx-[0-9]+\.[0-9]+-(.+)',
-        r'macosx-10.9-\1',
-        util.get_platform())
+    mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
+    if mac_target and (pkg_resources.parse_version(mac_target) <
+                       pkg_resources.parse_version('10.9.0')):
+        os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
+        os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
+            r'macosx-[0-9]+\.[0-9]+-(.+)', r'macosx-10.9-\1',
+            util.get_platform())
+
 
 def package_data():
-  tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
-  proto_resources_path = os.path.join(tools_path,
-                                      GRPC_PYTHON_PROTO_RESOURCES_NAME)
-  proto_files = []
-  for proto_file in PROTO_FILES:
-    source = os.path.join(PROTO_INCLUDE, proto_file)
-    target = os.path.join(proto_resources_path, proto_file)
-    relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME, proto_file)
-    try:
-      os.makedirs(os.path.dirname(target))
-    except OSError as error:
-      if error.errno == errno.EEXIST:
-        pass
-      else:
-        raise
-    shutil.copy(source, target)
-    proto_files.append(relative_target)
-  return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
+    tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
+    proto_resources_path = os.path.join(tools_path,
+                                        GRPC_PYTHON_PROTO_RESOURCES_NAME)
+    proto_files = []
+    for proto_file in PROTO_FILES:
+        source = os.path.join(PROTO_INCLUDE, proto_file)
+        target = os.path.join(proto_resources_path, proto_file)
+        relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME,
+                                       proto_file)
+        try:
+            os.makedirs(os.path.dirname(target))
+        except OSError as error:
+            if error.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+        shutil.copy(source, target)
+        proto_files.append(relative_target)
+    return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
+
 
 def extension_modules():
-  if BUILD_WITH_CYTHON:
-    plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')]
-  else:
-    plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')]
+    if BUILD_WITH_CYTHON:
+        plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.pyx')]
+    else:
+        plugin_sources = [os.path.join('grpc_tools', '_protoc_compiler.cpp')]
 
-  plugin_sources += [
-    os.path.join('grpc_tools', 'main.cc'),
-    os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')]
+    plugin_sources += [
+        os.path.join('grpc_tools', 'main.cc'),
+        os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')
+    ]
 
-  #HACK: Substitute the embed.cc, which is a JS to C++
-  #      preprocessor with the generated code.
-  #      The generated code should not be material
-  #      to the parts of protoc we use (it affects
-  #      the JavaScript code generator, supposedly),
-  #      but we need to be cautious about it.
-  cc_files_clone = list(CC_FILES)
-  embed_cc_file = os.path.normpath('google/protobuf/compiler/js/embed.cc')
-  well_known_types_file = os.path.normpath(
-      'google/protobuf/compiler/js/well_known_types_embed.cc')
-  if embed_cc_file in cc_files_clone:
-    cc_files_clone.remove(embed_cc_file)
-  if well_known_types_file in cc_files_clone:
-    cc_files_clone.remove(well_known_types_file)
-    plugin_sources += [os.path.join('grpc_tools', 'protobuf_generated_well_known_types_embed.cc')]
-  plugin_sources += [os.path.join(CC_INCLUDE, cc_file) for cc_file in cc_files_clone]
+    #HACK: Substitute the embed.cc, which is a JS to C++
+    #      preprocessor with the generated code.
+    #      The generated code should not be material
+    #      to the parts of protoc we use (it affects
+    #      the JavaScript code generator, supposedly),
+    #      but we need to be cautious about it.
+    cc_files_clone = list(CC_FILES)
+    embed_cc_file = os.path.normpath('google/protobuf/compiler/js/embed.cc')
+    well_known_types_file = os.path.normpath(
+        'google/protobuf/compiler/js/well_known_types_embed.cc')
+    if embed_cc_file in cc_files_clone:
+        cc_files_clone.remove(embed_cc_file)
+    if well_known_types_file in cc_files_clone:
+        cc_files_clone.remove(well_known_types_file)
+        plugin_sources += [
+            os.path.join('grpc_tools',
+                         'protobuf_generated_well_known_types_embed.cc')
+        ]
+    plugin_sources += [
+        os.path.join(CC_INCLUDE, cc_file) for cc_file in cc_files_clone
+    ]
 
-  plugin_ext = extension.Extension(
-      name='grpc_tools._protoc_compiler',
-      sources=plugin_sources,
-      include_dirs=[
-          '.',
-          'grpc_root',
-          os.path.join('grpc_root', 'include'),
-          CC_INCLUDE,
-      ],
-      language='c++',
-      define_macros=list(DEFINE_MACROS),
-      extra_compile_args=list(EXTRA_COMPILE_ARGS),
-      extra_link_args=list(EXTRA_LINK_ARGS),
-  )
-  extensions = [plugin_ext]
-  if BUILD_WITH_CYTHON:
-    from Cython import Build
-    return Build.cythonize(extensions)
-  else:
-    return extensions
+    plugin_ext = extension.Extension(
+        name='grpc_tools._protoc_compiler',
+        sources=plugin_sources,
+        include_dirs=[
+            '.',
+            'grpc_root',
+            os.path.join('grpc_root', 'include'),
+            CC_INCLUDE,
+        ],
+        language='c++',
+        define_macros=list(DEFINE_MACROS),
+        extra_compile_args=list(EXTRA_COMPILE_ARGS),
+        extra_link_args=list(EXTRA_LINK_ARGS),
+    )
+    extensions = [plugin_ext]
+    if BUILD_WITH_CYTHON:
+        from Cython import Build
+        return Build.cythonize(extensions)
+    else:
+        return extensions
+
 
 setuptools.setup(
-  name='grpcio-tools',
-  version=grpc_version.VERSION,
-  description='Protobuf code generator for gRPC',
-  author='The gRPC Authors',
-  author_email='grpc-io@googlegroups.com',
-  url='https://grpc.io',
-  license='Apache License 2.0',
-  classifiers=CLASSIFIERS,
-  ext_modules=extension_modules(),
-  packages=setuptools.find_packages('.'),
-  install_requires=[
-    'protobuf>=3.5.0.post1',
-    'grpcio>={version}'.format(version=grpc_version.VERSION),
-  ],
-  package_data=package_data(),
+    name='grpcio-tools',
+    version=grpc_version.VERSION,
+    description='Protobuf code generator for gRPC',
+    author='The gRPC Authors',
+    author_email='grpc-io@googlegroups.com',
+    url='https://grpc.io',
+    license='Apache License 2.0',
+    classifiers=CLASSIFIERS,
+    ext_modules=extension_modules(),
+    packages=setuptools.find_packages('.'),
+    install_requires=[
+        'protobuf>=3.5.0.post1',
+        'grpcio>={version}'.format(version=grpc_version.VERSION),
+    ],
+    package_data=package_data(),
 )
diff --git a/tools/distrib/python/make_grpcio_tools.py b/tools/distrib/python/make_grpcio_tools.py
index c865f0b..4847233 100755
--- a/tools/distrib/python/make_grpcio_tools.py
+++ b/tools/distrib/python/make_grpcio_tools.py
@@ -27,7 +27,7 @@
 import traceback
 import uuid
 
-DEPS_FILE_CONTENT="""
+DEPS_FILE_CONTENT = """
 # Copyright 2017 gRPC authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
@@ -60,15 +60,16 @@
 PROTOBUF_PROTO_PREFIX = '//:src/'
 
 GRPC_ROOT = os.path.abspath(
-    os.path.join(os.path.dirname(os.path.abspath(__file__)),
-                 '..', '..', '..'))
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
 
-GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools', 'distrib',
-                                'python', 'grpcio_tools')
+GRPC_PYTHON_ROOT = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python',
+                                'grpcio_tools')
 
-GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT = os.path.join('third_party', 'protobuf', 'src')
+GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT = os.path.join('third_party', 'protobuf',
+                                                  'src')
 GRPC_PROTOBUF = os.path.join(GRPC_ROOT, GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT)
-GRPC_PROTOBUF_SUBMODULE_ROOT = os.path.join(GRPC_ROOT, 'third_party', 'protobuf')
+GRPC_PROTOBUF_SUBMODULE_ROOT = os.path.join(GRPC_ROOT, 'third_party',
+                                            'protobuf')
 GRPC_PROTOC_PLUGINS = os.path.join(GRPC_ROOT, 'src', 'compiler')
 GRPC_PYTHON_PROTOBUF = os.path.join(GRPC_PYTHON_ROOT, 'third_party', 'protobuf',
                                     'src')
@@ -80,81 +81,93 @@
 GRPC_INCLUDE = os.path.join(GRPC_ROOT, 'include')
 GRPC_PYTHON_INCLUDE = os.path.join(GRPC_PYTHON_ROOT, 'grpc_root', 'include')
 
-BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python', 'bazel_deps.sh')
+BAZEL_DEPS = os.path.join(GRPC_ROOT, 'tools', 'distrib', 'python',
+                          'bazel_deps.sh')
 BAZEL_DEPS_PROTOC_LIB_QUERY = '//:protoc_lib'
 BAZEL_DEPS_COMMON_PROTOS_QUERY = '//:well_known_protos'
 
+
 def protobuf_submodule_commit_hash():
-  """Gets the commit hash for the HEAD of the protobuf submodule currently
+    """Gets the commit hash for the HEAD of the protobuf submodule currently
      checked out."""
-  cwd = os.getcwd()
-  os.chdir(GRPC_PROTOBUF_SUBMODULE_ROOT)
-  output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
-  os.chdir(cwd)
-  return output.splitlines()[0].strip()
+    cwd = os.getcwd()
+    os.chdir(GRPC_PROTOBUF_SUBMODULE_ROOT)
+    output = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
+    os.chdir(cwd)
+    return output.splitlines()[0].strip()
+
 
 def bazel_query(query):
-  output = subprocess.check_output([BAZEL_DEPS, query])
-  return output.splitlines()
+    output = subprocess.check_output([BAZEL_DEPS, query])
+    return output.splitlines()
+
 
 def get_deps():
-  """Write the result of the bazel query `query` against protobuf to
+    """Write the result of the bazel query `query` against protobuf to
      `out_file`."""
-  cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
-  cc_files = [
-      name[len(PROTOBUF_CC_PREFIX):] for name in cc_files_output
-      if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)]
-  proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY)
-  proto_files = [
-      name[len(PROTOBUF_PROTO_PREFIX):] for name in proto_files_output
-      if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)]
-  commit_hash = protobuf_submodule_commit_hash()
-  deps_file_content = DEPS_FILE_CONTENT.format(
-      cc_files=cc_files,
-      proto_files=proto_files,
-      cc_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
-      proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
-      commit_hash=COMMIT_HASH_PREFIX + commit_hash + COMMIT_HASH_SUFFIX)
-  return deps_file_content
+    cc_files_output = bazel_query(BAZEL_DEPS_PROTOC_LIB_QUERY)
+    cc_files = [
+        name[len(PROTOBUF_CC_PREFIX):]
+        for name in cc_files_output
+        if name.endswith('.cc') and name.startswith(PROTOBUF_CC_PREFIX)
+    ]
+    proto_files_output = bazel_query(BAZEL_DEPS_COMMON_PROTOS_QUERY)
+    proto_files = [
+        name[len(PROTOBUF_PROTO_PREFIX):]
+        for name in proto_files_output
+        if name.endswith('.proto') and name.startswith(PROTOBUF_PROTO_PREFIX)
+    ]
+    commit_hash = protobuf_submodule_commit_hash()
+    deps_file_content = DEPS_FILE_CONTENT.format(
+        cc_files=cc_files,
+        proto_files=proto_files,
+        cc_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
+        proto_include=repr(GRPC_PYTHON_PROTOBUF_RELATIVE_ROOT),
+        commit_hash=COMMIT_HASH_PREFIX + commit_hash + COMMIT_HASH_SUFFIX)
+    return deps_file_content
+
 
 def long_path(path):
-  if os.name == 'nt':
-    return '\\\\?\\' + path
-  else:
-    return path
+    if os.name == 'nt':
+        return '\\\\?\\' + path
+    else:
+        return path
+
 
 def main():
-  os.chdir(GRPC_ROOT)
+    os.chdir(GRPC_ROOT)
 
-  for source, target in [
-      (GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF),
-      (GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS),
-      (GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]:
-    for source_dir, _, files in os.walk(source):
-      target_dir = os.path.abspath(os.path.join(target, os.path.relpath(source_dir, source)))
-      try:
-        os.makedirs(target_dir)
-      except OSError as error:
-        if error.errno != errno.EEXIST:
-          raise
-      for relative_file in files:
-        source_file = os.path.abspath(os.path.join(source_dir, relative_file))
-        target_file = os.path.abspath(os.path.join(target_dir, relative_file))
-        shutil.copyfile(source_file, target_file)
+    for source, target in [(GRPC_PROTOBUF, GRPC_PYTHON_PROTOBUF),
+                           (GRPC_PROTOC_PLUGINS, GRPC_PYTHON_PROTOC_PLUGINS),
+                           (GRPC_INCLUDE, GRPC_PYTHON_INCLUDE)]:
+        for source_dir, _, files in os.walk(source):
+            target_dir = os.path.abspath(
+                os.path.join(target, os.path.relpath(source_dir, source)))
+            try:
+                os.makedirs(target_dir)
+            except OSError as error:
+                if error.errno != errno.EEXIST:
+                    raise
+            for relative_file in files:
+                source_file = os.path.abspath(
+                    os.path.join(source_dir, relative_file))
+                target_file = os.path.abspath(
+                    os.path.join(target_dir, relative_file))
+                shutil.copyfile(source_file, target_file)
 
-  try:
-    protoc_lib_deps_content = get_deps()
-  except Exception as error:
-    # We allow this script to succeed even if we couldn't get the dependencies,
-    # as then we can assume that even without a successful bazel run the
-    # dependencies currently in source control are 'good enough'.
-    sys.stderr.write("Got non-fatal error:\n")
-    traceback.print_exc(file=sys.stderr)
-    return
-  # If we successfully got the dependencies, truncate and rewrite the deps file.
-  with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file:
-    deps_file.write(protoc_lib_deps_content)
+    try:
+        protoc_lib_deps_content = get_deps()
+    except Exception as error:
+        # We allow this script to succeed even if we couldn't get the dependencies,
+        # as then we can assume that even without a successful bazel run the
+        # dependencies currently in source control are 'good enough'.
+        sys.stderr.write("Got non-fatal error:\n")
+        traceback.print_exc(file=sys.stderr)
+        return
+    # If we successfully got the dependencies, truncate and rewrite the deps file.
+    with open(GRPC_PYTHON_PROTOC_LIB_DEPS, 'w') as deps_file:
+        deps_file.write(protoc_lib_deps_content)
+
 
 if __name__ == '__main__':
-  main()
-
+    main()
diff --git a/tools/distrib/python/submit.py b/tools/distrib/python/submit.py
index 92eab5a..aff71b5 100755
--- a/tools/distrib/python/submit.py
+++ b/tools/distrib/python/submit.py
@@ -21,43 +21,52 @@
 parser = argparse.ArgumentParser(
     description='Submit the package to a PyPI repository.')
 parser.add_argument(
-    '--repository', '-r', metavar='r', type=str, default='pypi',
+    '--repository',
+    '-r',
+    metavar='r',
+    type=str,
+    default='pypi',
     help='The repository to push the package to. '
-         'Ensure the value appears in your .pypirc file. '
-         'Defaults to "pypi".'
-)
+    'Ensure the value appears in your .pypirc file. '
+    'Defaults to "pypi".')
 parser.add_argument(
-    '--identity', '-i', metavar='i', type=str,
-    help='GPG identity to sign the files with.'
-)
+    '--identity',
+    '-i',
+    metavar='i',
+    type=str,
+    help='GPG identity to sign the files with.')
 parser.add_argument(
-    '--username', '-u', metavar='u', type=str,
+    '--username',
+    '-u',
+    metavar='u',
+    type=str,
     help='Username to authenticate with the repository. Not needed if you have '
-         'configured your .pypirc to include your username.'
-)
+    'configured your .pypirc to include your username.')
 parser.add_argument(
-    '--password', '-p', metavar='p', type=str,
+    '--password',
+    '-p',
+    metavar='p',
+    type=str,
     help='Password to authenticate with the repository. Not needed if you have '
-         'configured your .pypirc to include your password.'
-)
+    'configured your .pypirc to include your password.')
 parser.add_argument(
-    '--bdist', '-b', action='store_true',
-    help='Generate a binary distribution (wheel) for the current OS.'
-)
+    '--bdist',
+    '-b',
+    action='store_true',
+    help='Generate a binary distribution (wheel) for the current OS.')
 parser.add_argument(
-    '--dist-args', type=str,
-    help='Additional arguments to pass to the *dist setup.py command.'
-)
+    '--dist-args',
+    type=str,
+    help='Additional arguments to pass to the *dist setup.py command.')
 args = parser.parse_args()
 
 # Move to the root directory of Python GRPC.
-pkgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)),
-                      '../../../')
+pkgdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../')
 # Remove previous distributions; they somehow confuse twine.
 try:
-  shutil.rmtree(os.path.join(pkgdir, 'dist/'))
+    shutil.rmtree(os.path.join(pkgdir, 'dist/'))
 except:
-  pass
+    pass
 
 # Build the Cython C files
 build_env = os.environ.copy()
@@ -67,20 +76,20 @@
 
 # Make the push.
 if args.bdist:
-  cmd = ['python', 'setup.py', 'bdist_wheel']
+    cmd = ['python', 'setup.py', 'bdist_wheel']
 else:
-  cmd = ['python', 'setup.py', 'sdist']
+    cmd = ['python', 'setup.py', 'sdist']
 if args.dist_args:
-  cmd += args.dist_args.split()
+    cmd += args.dist_args.split()
 subprocess.call(cmd, cwd=pkgdir)
 
 cmd = ['twine', 'upload', '-r', args.repository]
 if args.identity is not None:
-  cmd.extend(['-i', args.identity])
+    cmd.extend(['-i', args.identity])
 if args.username is not None:
-  cmd.extend(['-u', args.username])
+    cmd.extend(['-u', args.username])
 if args.password is not None:
-  cmd.extend(['-p', args.password])
+    cmd.extend(['-p', args.password])
 cmd.append('dist/*')
 
 subprocess.call(cmd, cwd=pkgdir)
diff --git a/tools/distrib/run_clang_tidy.py b/tools/distrib/run_clang_tidy.py
index d002a04..72d7956 100755
--- a/tools/distrib/run_clang_tidy.py
+++ b/tools/distrib/run_clang_tidy.py
@@ -20,32 +20,36 @@
 import multiprocessing
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'run_tests', 'python_utils'))
 import jobset
 
 GRPC_CHECKS = [
-  'modernize-use-nullptr',
+    'modernize-use-nullptr',
 ]
 
 extra_args = [
-  '-x',
-  'c++',
-  '-std=c++11',
+    '-x',
+    'c++',
+    '-std=c++11',
 ]
 with open('.clang_complete') as f:
-  for line in f:
-    line = line.strip()
-    if line.startswith('-I'):
-      extra_args.append(line)
+    for line in f:
+        line = line.strip()
+        if line.startswith('-I'):
+            extra_args.append(line)
 
 clang_tidy = os.environ.get('CLANG_TIDY', 'clang-tidy')
 
 argp = argparse.ArgumentParser(description='Run clang-tidy against core')
 argp.add_argument('files', nargs='+', help='Files to tidy')
 argp.add_argument('--fix', dest='fix', action='store_true')
-argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count(),
-                  help='Number of CPUs to use')
+argp.add_argument(
+    '-j',
+    '--jobs',
+    type=int,
+    default=multiprocessing.cpu_count(),
+    help='Number of CPUs to use')
 argp.set_defaults(fix=False)
 args = argp.parse_args()
 
@@ -53,18 +57,16 @@
     clang_tidy,
     '--checks=-*,%s' % ','.join(GRPC_CHECKS),
     '--warnings-as-errors=%s' % ','.join(GRPC_CHECKS)
-] + [
-    '--extra-arg-before=%s' % arg
-    for arg in extra_args
-]
+] + ['--extra-arg-before=%s' % arg for arg in extra_args]
 
 if args.fix:
-  cmdline.append('--fix')
+    cmdline.append('--fix')
 
 jobs = []
 for filename in args.files:
-  jobs.append(jobset.JobSpec(cmdline + [filename],
-                             shortname=filename,
-                             ))#verbose_success=True))
+    jobs.append(jobset.JobSpec(
+        cmdline + [filename],
+        shortname=filename,
+    ))  #verbose_success=True))
 
 jobset.run(jobs, maxjobs=args.jobs)
diff --git a/tools/distrib/yapf_code.sh b/tools/distrib/yapf_code.sh
index e5beb70..d188a02 100755
--- a/tools/distrib/yapf_code.sh
+++ b/tools/distrib/yapf_code.sh
@@ -19,14 +19,13 @@
 cd "$(dirname "${0}")/../.."
 
 DIRS=(
+    'examples/python'
     'src/python'
+    'tools'
 )
 EXCLUSIONS=(
-    'grpcio/grpc_*.py'
-    'grpcio_health_checking/grpc_*.py'
-    'grpcio_reflection/grpc_*.py'
-    'grpcio_testing/grpc_*.py'
-    'grpcio_tests/grpc_*.py'
+    '*protoc_lib_deps.py'  # this file is auto-generated
+    '*_pb2*.py'  # no need to format protoc generated files
 )
 
 VIRTUALENV=yapf_virtual_environment
@@ -35,7 +34,7 @@
 PYTHON=$(realpath "${VIRTUALENV}/bin/python")
 $PYTHON -m pip install --upgrade pip==9.0.1
 $PYTHON -m pip install --upgrade futures
-$PYTHON -m pip install yapf==0.16.0
+$PYTHON -m pip install yapf==0.20.0
 
 yapf() {
     local exclusion exclusion_args=()
@@ -55,7 +54,7 @@
 	tempdir=$(mktemp -d)
 	cp -RT "${dir}" "${tempdir}"
 	yapf "${tempdir}"
-	diff -ru "${dir}" "${tempdir}" || ok=no
+	diff -x '*.pyc' -ru "${dir}" "${tempdir}" || ok=no
 	rm -rf "${tempdir}"
     done
     if [[ ${ok} == no ]]; then
diff --git a/tools/dockerfile/test/sanity/Dockerfile b/tools/dockerfile/test/sanity/Dockerfile
index e4a2972..6e5a133 100644
--- a/tools/dockerfile/test/sanity/Dockerfile
+++ b/tools/dockerfile/test/sanity/Dockerfile
@@ -81,7 +81,8 @@
       libtool \
       curl \
       python-virtualenv \
-      python-lxml
+      python-lxml \
+      shellcheck
 RUN pip install simplejson mako
 
 #======================================
diff --git a/tools/doxygen/Doxyfile.c++ b/tools/doxygen/Doxyfile.c++
index 36bc63e..e62278c 100644
--- a/tools/doxygen/Doxyfile.c++
+++ b/tools/doxygen/Doxyfile.c++
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 1.8.3
+PROJECT_NUMBER         = 1.9.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -913,7 +913,6 @@
 include/grpc/support/avl.h \
 include/grpc/support/cmdline.h \
 include/grpc/support/cpu.h \
-include/grpc/support/histogram.h \
 include/grpc/support/host_port.h \
 include/grpc/support/log.h \
 include/grpc/support/log_windows.h \
diff --git a/tools/doxygen/Doxyfile.c++.internal b/tools/doxygen/Doxyfile.c++.internal
index 7ad1825..d09b325 100644
--- a/tools/doxygen/Doxyfile.c++.internal
+++ b/tools/doxygen/Doxyfile.c++.internal
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 1.8.3
+PROJECT_NUMBER         = 1.9.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -914,7 +914,6 @@
 include/grpc/support/avl.h \
 include/grpc/support/cmdline.h \
 include/grpc/support/cpu.h \
-include/grpc/support/histogram.h \
 include/grpc/support/host_port.h \
 include/grpc/support/log.h \
 include/grpc/support/log_windows.h \
@@ -1033,12 +1032,15 @@
 src/core/lib/support/atomic.h \
 src/core/lib/support/atomic_with_atm.h \
 src/core/lib/support/atomic_with_std.h \
+src/core/lib/support/debug_location.h \
 src/core/lib/support/env.h \
 src/core/lib/support/fork.h \
 src/core/lib/support/manual_constructor.h \
 src/core/lib/support/memory.h \
 src/core/lib/support/mpscq.h \
 src/core/lib/support/murmur_hash.h \
+src/core/lib/support/ref_counted.h \
+src/core/lib/support/ref_counted_ptr.h \
 src/core/lib/support/spinlock.h \
 src/core/lib/support/string.h \
 src/core/lib/support/string_windows.h \
diff --git a/tools/doxygen/Doxyfile.core b/tools/doxygen/Doxyfile.core
index 3c1abad..6ce9041 100644
--- a/tools/doxygen/Doxyfile.core
+++ b/tools/doxygen/Doxyfile.core
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 5.0.0
+PROJECT_NUMBER         = 5.0.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -853,7 +853,6 @@
 include/grpc/support/avl.h \
 include/grpc/support/cmdline.h \
 include/grpc/support/cpu.h \
-include/grpc/support/histogram.h \
 include/grpc/support/host_port.h \
 include/grpc/support/log.h \
 include/grpc/support/log_windows.h \
diff --git a/tools/doxygen/Doxyfile.core.internal b/tools/doxygen/Doxyfile.core.internal
index f7d8ba3..1aff007 100644
--- a/tools/doxygen/Doxyfile.core.internal
+++ b/tools/doxygen/Doxyfile.core.internal
@@ -40,7 +40,7 @@
 # could be handy for archiving the generated documentation or if some version
 # control system is used.
 
-PROJECT_NUMBER         = 5.0.0
+PROJECT_NUMBER         = 5.0.0-dev
 
 # Using the PROJECT_BRIEF tag one can provide an optional one line description
 # for a project that appears at the top of each page and should give viewer a
@@ -853,7 +853,6 @@
 include/grpc/support/avl.h \
 include/grpc/support/cmdline.h \
 include/grpc/support/cpu.h \
-include/grpc/support/histogram.h \
 include/grpc/support/host_port.h \
 include/grpc/support/log.h \
 include/grpc/support/log_windows.h \
@@ -1287,13 +1286,13 @@
 src/core/lib/support/cpu_linux.cc \
 src/core/lib/support/cpu_posix.cc \
 src/core/lib/support/cpu_windows.cc \
+src/core/lib/support/debug_location.h \
 src/core/lib/support/env.h \
 src/core/lib/support/env_linux.cc \
 src/core/lib/support/env_posix.cc \
 src/core/lib/support/env_windows.cc \
 src/core/lib/support/fork.cc \
 src/core/lib/support/fork.h \
-src/core/lib/support/histogram.cc \
 src/core/lib/support/host_port.cc \
 src/core/lib/support/log.cc \
 src/core/lib/support/log_android.cc \
@@ -1306,6 +1305,8 @@
 src/core/lib/support/mpscq.h \
 src/core/lib/support/murmur_hash.cc \
 src/core/lib/support/murmur_hash.h \
+src/core/lib/support/ref_counted.h \
+src/core/lib/support/ref_counted_ptr.h \
 src/core/lib/support/spinlock.h \
 src/core/lib/support/string.cc \
 src/core/lib/support/string.h \
diff --git a/tools/failures/detect_new_failures.py b/tools/failures/detect_new_failures.py
new file mode 100644
index 0000000..4063978
--- /dev/null
+++ b/tools/failures/detect_new_failures.py
@@ -0,0 +1,309 @@
+#!/usr/bin/env python
+# Copyright 2015 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Detect new flakes and create issues for them"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+
+import datetime
+import json
+import logging
+import os
+import pprint
+import sys
+import urllib
+import urllib2
+from collections import namedtuple
+
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
+sys.path.append(gcp_utils_dir)
+
+import big_query_utils
+
+GH_ISSUE_CREATION_URL = 'https://api.github.com/repos/grpc/grpc/issues'
+GH_ISSUE_SEARCH_URL = 'https://api.github.com/search/issues'
+KOKORO_BASE_URL = 'https://kokoro2.corp.google.com/job/'
+
+
+def gh(url, data=None):
+    request = urllib2.Request(url, data=data)
+    assert TOKEN
+    request.add_header('Authorization', 'token {}'.format(TOKEN))
+    if data:
+        request.add_header('Content-type', 'application/json')
+    response = urllib2.urlopen(request)
+    if 200 <= response.getcode() < 300:
+        return json.loads(response.read())
+    else:
+        raise ValueError('Error ({}) accessing {}'.format(
+            response.getcode(), response.geturl()))
+
+
+def search_gh_issues(search_term, status='open'):
+    params = ' '.join((search_term, 'is:issue', 'is:open', 'repo:grpc/grpc'))
+    qargs = urllib.urlencode({'q': params})
+    url = '?'.join((GH_ISSUE_SEARCH_URL, qargs))
+    response = gh(url)
+    return response
+
+
+def create_gh_issue(title, body, labels, assignees=[]):
+    params = {'title': title, 'body': body, 'labels': labels}
+    if assignees:
+        params['assignees'] = assignees
+    data = json.dumps(params)
+    response = gh(GH_ISSUE_CREATION_URL, data)
+    issue_url = response['html_url']
+    print('Created issue {} for {}'.format(issue_url, title))
+
+
+def build_kokoro_url(job_name, build_id):
+    job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
+    return KOKORO_BASE_URL + job_path
+
+
+def create_issues(new_flakes, always_create):
+    for test_name, results_row in new_flakes.items():
+        poll_strategy, job_name, build_id, timestamp = results_row
+        # TODO(dgq): the Kokoro URL has a limited lifetime. The permanent and ideal
+        # URL would be the sponge one, but there's currently no easy way to retrieve
+        # it.
+        url = build_kokoro_url(job_name, build_id)
+        title = 'New Failure: ' + test_name
+        body = '- Test: {}\n- Poll Strategy: {}\n- URL: {}'.format(
+            test_name, poll_strategy, url)
+        labels = ['infra/New Failure']
+        if always_create:
+            proceed = True
+        else:
+            preexisting_issues = search_gh_issues(test_name)
+            if preexisting_issues['total_count'] > 0:
+                print('\nFound {} issues for "{}":'.format(
+                    preexisting_issues['total_count'], test_name))
+                for issue in preexisting_issues['items']:
+                    print('\t"{}" ; URL: {}'.format(issue['title'],
+                                                    issue['html_url']))
+            else:
+                print(
+                    '\nNo preexisting issues found for "{}"'.format(test_name))
+            proceed = raw_input(
+                'Create issue for:\nTitle: {}\nBody: {}\n[Y/n] '.format(
+                    title, body)) in ('y', 'Y', '')
+        if proceed:
+            assignees_str = raw_input(
+                'Asignees? (comma-separated, leave blank for unassigned): ')
+            assignees = [
+                assignee.strip() for assignee in assignees_str.split(',')
+            ]
+            create_gh_issue(title, body, labels, assignees)
+
+
+def print_table(table, format):
+    first_time = True
+    for test_name, results_row in table.items():
+        poll_strategy, job_name, build_id, timestamp = results_row
+        full_kokoro_url = build_kokoro_url(job_name, build_id)
+        if format == 'human':
+            print("\t- Test: {}, Polling: {}, Timestamp: {}, url: {}".format(
+                test_name, poll_strategy, timestamp, full_kokoro_url))
+        else:
+            assert (format == 'csv')
+            if first_time:
+                print('test,timestamp,url')
+                first_time = False
+            print("{},{},{}".format(test_name, timestamp, full_kokoro_url))
+
+
+Row = namedtuple('Row', ['poll_strategy', 'job_name', 'build_id', 'timestamp'])
+
+
+def get_new_failures(dates):
+    bq = big_query_utils.create_big_query()
+    this_script_path = os.path.join(os.path.dirname(__file__))
+    sql_script = os.path.join(this_script_path, 'sql/new_failures_24h.sql')
+    with open(sql_script) as query_file:
+        query = query_file.read().format(
+            calibration_begin=dates['calibration']['begin'],
+            calibration_end=dates['calibration']['end'],
+            reporting_begin=dates['reporting']['begin'],
+            reporting_end=dates['reporting']['end'])
+    logging.debug("Query:\n%s", query)
+    query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+    page = bq.jobs().getQueryResults(
+        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+    rows = page.get('rows')
+    if rows:
+        return {
+            row['f'][0]['v']: Row(
+                poll_strategy=row['f'][1]['v'],
+                job_name=row['f'][2]['v'],
+                build_id=row['f'][3]['v'],
+                timestamp=row['f'][4]['v'])
+            for row in rows
+        }
+    else:
+        return {}
+
+
+def parse_isodate(date_str):
+    return datetime.datetime.strptime(date_str, "%Y-%m-%d").date()
+
+
+def get_new_flakes(args):
+    """The from_date_str argument marks the beginning of the "calibration", used
+  to establish the set of pre-existing flakes, which extends over
+  "calibration_days".  After the calibration period, "reporting_days" is the
+  length of time during which new flakes will be reported.
+
+from
+date
+  |--------------------|---------------|
+  ^____________________^_______________^
+       calibration         reporting
+         days                days
+  """
+    dates = process_date_args(args)
+    new_failures = get_new_failures(dates)
+    logging.info('|new failures| = %d', len(new_failures))
+    return new_failures
+
+
+def build_args_parser():
+    import argparse, datetime
+    parser = argparse.ArgumentParser()
+    today = datetime.date.today()
+    a_week_ago = today - datetime.timedelta(days=7)
+    parser.add_argument(
+        '--calibration_days',
+        type=int,
+        default=7,
+        help='How many days to consider for pre-existing flakes.')
+    parser.add_argument(
+        '--reporting_days',
+        type=int,
+        default=1,
+        help='How many days to consider for the detection of new flakes.')
+    parser.add_argument(
+        '--count_only',
+        dest='count_only',
+        action='store_true',
+        help='Display only number of new flakes.')
+    parser.set_defaults(count_only=False)
+    parser.add_argument(
+        '--create_issues',
+        dest='create_issues',
+        action='store_true',
+        help='Create issues for all new flakes.')
+    parser.set_defaults(create_issues=False)
+    parser.add_argument(
+        '--always_create_issues',
+        dest='always_create_issues',
+        action='store_true',
+        help='Always create issues for all new flakes. Otherwise,'
+        ' interactively prompt for every issue.')
+    parser.set_defaults(always_create_issues=False)
+    parser.add_argument(
+        '--token',
+        type=str,
+        default='',
+        help='GitHub token to use its API with a higher rate limit')
+    parser.add_argument(
+        '--format',
+        type=str,
+        choices=['human', 'csv'],
+        default='human',
+        help='Output format: are you a human or a machine?')
+    parser.add_argument(
+        '--loglevel',
+        type=str,
+        choices=['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL'],
+        default='WARNING',
+        help='Logging level.')
+    return parser
+
+
+def process_date_args(args):
+    calibration_begin = (
+        datetime.date.today() - datetime.timedelta(days=args.calibration_days) -
+        datetime.timedelta(days=args.reporting_days))
+    calibration_end = calibration_begin + datetime.timedelta(
+        days=args.calibration_days)
+    reporting_begin = calibration_end
+    reporting_end = reporting_begin + datetime.timedelta(
+        days=args.reporting_days)
+    return {
+        'calibration': {
+            'begin': calibration_begin,
+            'end': calibration_end
+        },
+        'reporting': {
+            'begin': reporting_begin,
+            'end': reporting_end
+        }
+    }
+
+
+def main():
+    global TOKEN
+    args_parser = build_args_parser()
+    args = args_parser.parse_args()
+    if args.create_issues and not args.token:
+        raise ValueError(
+            'Missing --token argument, needed to create GitHub issues')
+    TOKEN = args.token
+
+    logging_level = getattr(logging, args.loglevel)
+    logging.basicConfig(format='%(asctime)s %(message)s', level=logging_level)
+    new_flakes = get_new_flakes(args)
+
+    dates = process_date_args(args)
+
+    dates_info_string = 'from {} until {} (calibrated from {} until {})'.format(
+        dates['reporting']['begin'].isoformat(),
+        dates['reporting']['end'].isoformat(),
+        dates['calibration']['begin'].isoformat(),
+        dates['calibration']['end'].isoformat())
+
+    if args.format == 'human':
+        if args.count_only:
+            print(len(new_flakes), dates_info_string)
+        elif new_flakes:
+            found_msg = 'Found {} new flakes {}'.format(
+                len(new_flakes), dates_info_string)
+            print(found_msg)
+            print('*' * len(found_msg))
+            print_table(new_flakes, 'human')
+            if args.create_issues:
+                create_issues(new_flakes, args.always_create_issues)
+        else:
+            print('No new flakes found '.format(len(new_flakes)),
+                  dates_info_string)
+    elif args.format == 'csv':
+        if args.count_only:
+            print('from_date,to_date,count')
+            print('{},{},{}'.format(dates['reporting']['begin'].isoformat(),
+                                    dates['reporting']['end'].isoformat(),
+                                    len(new_flakes)))
+        else:
+            print_table(new_flakes, 'csv')
+    else:
+        raise ValueError('Invalid argument for --format: {}'.format(
+            args.format))
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/failures/sql/new_failures_24h.sql b/tools/failures/sql/new_failures_24h.sql
new file mode 100644
index 0000000..6ce0c5d
--- /dev/null
+++ b/tools/failures/sql/new_failures_24h.sql
@@ -0,0 +1,62 @@
+#standardSQL
+WITH calibration AS (
+  SELECT
+    RTRIM(LTRIM(REGEXP_REPLACE(filtered_test_name, r'(/\d+)|(bins/.+/)|(cmake/.+/.+/)', ''))) AS test_binary,
+    REGEXP_EXTRACT(test_name, r'GRPC_POLL_STRATEGY=(\w+)') AS poll_strategy,
+    job_name,
+    build_id
+  FROM (
+    SELECT
+      REGEXP_REPLACE(test_name, r'(/\d+)|(GRPC_POLL_STRATEGY=.+)', '') AS filtered_test_name,
+      test_name,
+      job_name,
+      build_id,
+      timestamp
+    FROM
+      `grpc-testing.jenkins_test_results.aggregate_results`
+    WHERE
+      timestamp > TIMESTAMP(DATETIME("{calibration_begin} 00:00:00", "America/Los_Angeles"))
+      AND timestamp <= TIMESTAMP(DATETIME("{calibration_end} 23:59:59", "America/Los_Angeles"))
+      AND NOT REGEXP_CONTAINS(job_name,
+        'portability')
+      AND result != 'PASSED'
+      AND result != 'SKIPPED' )),
+  reporting AS (
+  SELECT
+    RTRIM(LTRIM(REGEXP_REPLACE(filtered_test_name, r'(/\d+)|(bins/.+/)|(cmake/.+/.+/)', ''))) AS test_binary,
+    REGEXP_EXTRACT(test_name, r'GRPC_POLL_STRATEGY=(\w+)') AS poll_strategy,
+    job_name,
+    build_id,
+    timestamp
+  FROM (
+    SELECT
+      REGEXP_REPLACE(test_name, r'(/\d+)|(GRPC_POLL_STRATEGY=.+)', '') AS filtered_test_name,
+      test_name,
+      job_name,
+      build_id,
+      timestamp
+    FROM
+      `grpc-testing.jenkins_test_results.aggregate_results`
+    WHERE
+      timestamp > TIMESTAMP(DATETIME("{reporting_begin} 00:00:00", "America/Los_Angeles"))
+      AND timestamp <= TIMESTAMP(DATETIME("{reporting_end} 23:59:59", "America/Los_Angeles"))
+      AND NOT REGEXP_CONTAINS(job_name,
+        'portability')
+      AND result != 'PASSED'
+      AND result != 'SKIPPED' ))
+SELECT
+  reporting.test_binary,
+  reporting.poll_strategy,
+  reporting.job_name,
+  reporting.build_id,
+  STRING(reporting.timestamp, "America/Los_Angeles") as timestamp_MTV
+FROM
+  reporting
+LEFT JOIN
+  calibration
+ON
+  reporting.test_binary = calibration.test_binary
+WHERE
+  calibration.test_binary IS NULL
+ORDER BY
+  timestamp DESC;
diff --git a/tools/flakes/detect_flakes.py b/tools/flakes/detect_flakes.py
deleted file mode 100644
index c5c7f61..0000000
--- a/tools/flakes/detect_flakes.py
+++ /dev/null
@@ -1,105 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2015 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Detect new flakes introduced in the last 24h hours with respect to the
-previous six days"""
-
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
-import datetime
-import os
-import sys
-import logging
-logging.basicConfig(format='%(asctime)s %(message)s')
-
-gcp_utils_dir = os.path.abspath(
-    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
-sys.path.append(gcp_utils_dir)
-
-import big_query_utils
-
-def print_table(table):
-    kokoro_base_url = 'https://kokoro.corp.google.com/job/'
-    for k, v in table.items():
-      job_name = v[0]
-      build_id = v[1]
-      ts = int(float(v[2]))
-      # TODO(dgq): timezone handling is wrong. We need to determine the timezone
-      # of the computer running this script.
-      human_ts = datetime.datetime.utcfromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S PDT')
-      job_path = '{}/{}'.format('/job/'.join(job_name.split('/')), build_id)
-      full_kokoro_url = kokoro_base_url + job_path
-      print("Test: {}, Timestamp: {}, url: {}\n".format(k, human_ts, full_kokoro_url))
-
-
-def get_flaky_tests(days_lower_bound, days_upper_bound, limit=None):
-  """ period is one of "WEEK", "DAY", etc.
-  (see https://cloud.google.com/bigquery/docs/reference/standard-sql/functions-and-operators#date_add). """
-
-  bq = big_query_utils.create_big_query()
-  query = """
-SELECT
-  REGEXP_REPLACE(test_name, r'/\d+', '') AS filtered_test_name,
-  job_name,
-  build_id,
-  timestamp
-FROM
-  [grpc-testing:jenkins_test_results.aggregate_results]
-WHERE
-    timestamp > DATE_ADD(CURRENT_DATE(), {days_lower_bound}, "DAY")
-    AND timestamp <= DATE_ADD(CURRENT_DATE(), {days_upper_bound}, "DAY")
-  AND NOT REGEXP_MATCH(job_name, '.*portability.*')
-  AND result != 'PASSED' AND result != 'SKIPPED'
-ORDER BY timestamp desc
-""".format(days_lower_bound=days_lower_bound, days_upper_bound=days_upper_bound)
-  if limit:
-    query += '\n LIMIT {}'.format(limit)
-  query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
-  page = bq.jobs().getQueryResults(
-      pageToken=None, **query_job['jobReference']).execute(num_retries=3)
-  rows = page.get('rows')
-  if rows:
-    return {row['f'][0]['v']:
-            (row['f'][1]['v'], row['f'][2]['v'], row['f'][3]['v'])
-            for row in rows}
-  else:
-    return {}
-
-
-def get_new_flakes():
-  last_week_sans_yesterday = get_flaky_tests(-14, -1)
-  last_24 = get_flaky_tests(0, +1)
-  last_week_sans_yesterday_names = set(last_week_sans_yesterday.keys())
-  last_24_names = set(last_24.keys())
-  logging.debug('|last_week_sans_yesterday| =', len(last_week_sans_yesterday_names))
-  logging.debug('|last_24_names| =', len(last_24_names))
-  new_flakes = last_24_names - last_week_sans_yesterday_names
-  logging.debug('|new_flakes| = ', len(new_flakes))
-  return {k: last_24[k] for k in new_flakes}
-
-
-def main():
-  new_flakes = get_new_flakes()
-  if new_flakes:
-    print("Found {} new flakes:".format(len(new_flakes)))
-    print_table(new_flakes)
-  else:
-    print("No new flakes found!")
-
-
-if __name__ == '__main__':
-  main()
diff --git a/tools/gce/linux_kokoro_performance_worker_init.sh b/tools/gce/linux_kokoro_performance_worker_init.sh
index ac3d393..460f639 100755
--- a/tools/gce/linux_kokoro_performance_worker_init.sh
+++ b/tools/gce/linux_kokoro_performance_worker_init.sh
@@ -114,6 +114,19 @@
 sudo apt-get install -y dotnet-dev-1.0.0-preview2.1-003155
 sudo apt-get install -y dotnet-dev-1.0.1
 
+# C# 1.0.4 SDK
+curl -O https://download.microsoft.com/download/2/4/A/24A06858-E8AC-469B-8AE6-D0CEC9BA982A/dotnet-ubuntu.16.04-x64.1.0.5.tar.gz
+sudo mkdir -p /opt/dotnet
+sudo tar zxf dotnet-ubuntu.16.04-x64.1.0.5.tar.gz -C /opt/dotnet
+sudo ln -s /opt/dotnet/dotnet /usr/local/bin
+
+# C# .NET dependencies
+wget http://security.ubuntu.com/ubuntu/pool/main/i/icu/libicu52_52.1-8ubuntu0.2_amd64.deb
+sudo dpkg -i libicu52_52.1-8ubuntu0.2_amd64.deb
+wget http://security.ubuntu.com/ubuntu/pool/main/i/icu/libicu55_55.1-7ubuntu0.3_amd64.deb
+sudo dpkg -i libicu55_55.1-7ubuntu0.3_amd64.deb
+sudo apt-get update && sudo apt-get install -y libicu55
+
 # Ruby dependencies
 gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
 curl -sSL https://get.rvm.io | bash -s stable --ruby
diff --git a/tools/gcp/utils/big_query_utils.py b/tools/gcp/utils/big_query_utils.py
index 77a5f56..3e811ca 100755
--- a/tools/gcp/utils/big_query_utils.py
+++ b/tools/gcp/utils/big_query_utils.py
@@ -28,154 +28,174 @@
 
 
 def create_big_query():
-  """Authenticates with cloud platform and gets a BiqQuery service object
+    """Authenticates with cloud platform and gets a BiqQuery service object
   """
-  creds = GoogleCredentials.get_application_default()
-  return discovery.build('bigquery', 'v2', credentials=creds, cache_discovery=False)
+    creds = GoogleCredentials.get_application_default()
+    return discovery.build(
+        'bigquery', 'v2', credentials=creds, cache_discovery=False)
 
 
 def create_dataset(biq_query, project_id, dataset_id):
-  is_success = True
-  body = {
-      'datasetReference': {
-          'projectId': project_id,
-          'datasetId': dataset_id
-      }
-  }
+    is_success = True
+    body = {
+        'datasetReference': {
+            'projectId': project_id,
+            'datasetId': dataset_id
+        }
+    }
 
-  try:
-    dataset_req = biq_query.datasets().insert(projectId=project_id, body=body)
-    dataset_req.execute(num_retries=NUM_RETRIES)
-  except HttpError as http_error:
-    if http_error.resp.status == 409:
-      print 'Warning: The dataset %s already exists' % dataset_id
-    else:
-      # Note: For more debugging info, print "http_error.content"
-      print 'Error in creating dataset: %s. Err: %s' % (dataset_id, http_error)
-      is_success = False
-  return is_success
+    try:
+        dataset_req = biq_query.datasets().insert(
+            projectId=project_id, body=body)
+        dataset_req.execute(num_retries=NUM_RETRIES)
+    except HttpError as http_error:
+        if http_error.resp.status == 409:
+            print 'Warning: The dataset %s already exists' % dataset_id
+        else:
+            # Note: For more debugging info, print "http_error.content"
+            print 'Error in creating dataset: %s. Err: %s' % (dataset_id,
+                                                              http_error)
+            is_success = False
+    return is_success
 
 
 def create_table(big_query, project_id, dataset_id, table_id, table_schema,
                  description):
-  fields = [{'name': field_name,
-             'type': field_type,
-             'description': field_description
-             } for (field_name, field_type, field_description) in table_schema]
-  return create_table2(big_query, project_id, dataset_id, table_id,
-                       fields, description)
+    fields = [{
+        'name': field_name,
+        'type': field_type,
+        'description': field_description
+    } for (field_name, field_type, field_description) in table_schema]
+    return create_table2(big_query, project_id, dataset_id, table_id, fields,
+                         description)
 
 
-def create_partitioned_table(big_query, project_id, dataset_id, table_id, table_schema,
-                             description, partition_type='DAY', expiration_ms=_EXPIRATION_MS):
-  """Creates a partitioned table. By default, a date-paritioned table is created with
+def create_partitioned_table(big_query,
+                             project_id,
+                             dataset_id,
+                             table_id,
+                             table_schema,
+                             description,
+                             partition_type='DAY',
+                             expiration_ms=_EXPIRATION_MS):
+    """Creates a partitioned table. By default, a date-paritioned table is created with
   each partition lasting 30 days after it was last modified.
   """
-  fields = [{'name': field_name,
-             'type': field_type,
-             'description': field_description
-             } for (field_name, field_type, field_description) in table_schema]
-  return create_table2(big_query, project_id, dataset_id, table_id,
-                       fields, description, partition_type, expiration_ms)
+    fields = [{
+        'name': field_name,
+        'type': field_type,
+        'description': field_description
+    } for (field_name, field_type, field_description) in table_schema]
+    return create_table2(big_query, project_id, dataset_id, table_id, fields,
+                         description, partition_type, expiration_ms)
 
 
-def create_table2(big_query, project_id, dataset_id, table_id, fields_schema,
-                 description, partition_type=None, expiration_ms=None):
-  is_success = True
+def create_table2(big_query,
+                  project_id,
+                  dataset_id,
+                  table_id,
+                  fields_schema,
+                  description,
+                  partition_type=None,
+                  expiration_ms=None):
+    is_success = True
 
-  body = {
-      'description': description,
-      'schema': {
-          'fields': fields_schema
-      },
-      'tableReference': {
-          'datasetId': dataset_id,
-          'projectId': project_id,
-          'tableId': table_id
-      }
-  }
-
-  if partition_type and expiration_ms:
-    body["timePartitioning"] = {
-      "type": partition_type,
-      "expirationMs": expiration_ms
+    body = {
+        'description': description,
+        'schema': {
+            'fields': fields_schema
+        },
+        'tableReference': {
+            'datasetId': dataset_id,
+            'projectId': project_id,
+            'tableId': table_id
+        }
     }
 
-  try:
-    table_req = big_query.tables().insert(projectId=project_id,
-                                          datasetId=dataset_id,
-                                          body=body)
-    res = table_req.execute(num_retries=NUM_RETRIES)
-    print 'Successfully created %s "%s"' % (res['kind'], res['id'])
-  except HttpError as http_error:
-    if http_error.resp.status == 409:
-      print 'Warning: Table %s already exists' % table_id
-    else:
-      print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
-      is_success = False
-  return is_success
+    if partition_type and expiration_ms:
+        body["timePartitioning"] = {
+            "type": partition_type,
+            "expirationMs": expiration_ms
+        }
+
+    try:
+        table_req = big_query.tables().insert(
+            projectId=project_id, datasetId=dataset_id, body=body)
+        res = table_req.execute(num_retries=NUM_RETRIES)
+        print 'Successfully created %s "%s"' % (res['kind'], res['id'])
+    except HttpError as http_error:
+        if http_error.resp.status == 409:
+            print 'Warning: Table %s already exists' % table_id
+        else:
+            print 'Error in creating table: %s. Err: %s' % (table_id,
+                                                            http_error)
+            is_success = False
+    return is_success
 
 
 def patch_table(big_query, project_id, dataset_id, table_id, fields_schema):
-  is_success = True
+    is_success = True
 
-  body = {
-      'schema': {
-          'fields': fields_schema
-      },
-      'tableReference': {
-          'datasetId': dataset_id,
-          'projectId': project_id,
-          'tableId': table_id
-      }
-  }
+    body = {
+        'schema': {
+            'fields': fields_schema
+        },
+        'tableReference': {
+            'datasetId': dataset_id,
+            'projectId': project_id,
+            'tableId': table_id
+        }
+    }
 
-  try:
-    table_req = big_query.tables().patch(projectId=project_id,
-                                         datasetId=dataset_id,
-                                         tableId=table_id,
-                                         body=body)
-    res = table_req.execute(num_retries=NUM_RETRIES)
-    print 'Successfully patched %s "%s"' % (res['kind'], res['id'])
-  except HttpError as http_error:
-    print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
-    is_success = False
-  return is_success
+    try:
+        table_req = big_query.tables().patch(
+            projectId=project_id,
+            datasetId=dataset_id,
+            tableId=table_id,
+            body=body)
+        res = table_req.execute(num_retries=NUM_RETRIES)
+        print 'Successfully patched %s "%s"' % (res['kind'], res['id'])
+    except HttpError as http_error:
+        print 'Error in creating table: %s. Err: %s' % (table_id, http_error)
+        is_success = False
+    return is_success
 
 
 def insert_rows(big_query, project_id, dataset_id, table_id, rows_list):
-  is_success = True
-  body = {'rows': rows_list}
-  try:
-    insert_req = big_query.tabledata().insertAll(projectId=project_id,
-                                                 datasetId=dataset_id,
-                                                 tableId=table_id,
-                                                 body=body)
-    res = insert_req.execute(num_retries=NUM_RETRIES)
-    if res.get('insertErrors', None):
-      print 'Error inserting rows! Response: %s' % res
-      is_success = False
-  except HttpError as http_error:
-    print 'Error inserting rows to the table %s' % table_id
-    is_success = False
+    is_success = True
+    body = {'rows': rows_list}
+    try:
+        insert_req = big_query.tabledata().insertAll(
+            projectId=project_id,
+            datasetId=dataset_id,
+            tableId=table_id,
+            body=body)
+        res = insert_req.execute(num_retries=NUM_RETRIES)
+        if res.get('insertErrors', None):
+            print 'Error inserting rows! Response: %s' % res
+            is_success = False
+    except HttpError as http_error:
+        print 'Error inserting rows to the table %s' % table_id
+        is_success = False
 
-  return is_success
+    return is_success
 
 
 def sync_query_job(big_query, project_id, query, timeout=5000):
-  query_data = {'query': query, 'timeoutMs': timeout}
-  query_job = None
-  try:
-    query_job = big_query.jobs().query(
-        projectId=project_id,
-        body=query_data).execute(num_retries=NUM_RETRIES)
-  except HttpError as http_error:
-    print 'Query execute job failed with error: %s' % http_error
-    print http_error.content
-  return query_job
+    query_data = {'query': query, 'timeoutMs': timeout}
+    query_job = None
+    try:
+        query_job = big_query.jobs().query(
+            projectId=project_id,
+            body=query_data).execute(num_retries=NUM_RETRIES)
+    except HttpError as http_error:
+        print 'Query execute job failed with error: %s' % http_error
+        print http_error.content
+    return query_job
 
-  # List of (column name, column type, description) tuples
+
+    # List of (column name, column type, description) tuples
 def make_row(unique_row_id, row_values_dict):
-  """row_values_dict is a dictionary of column name and column value.
+    """row_values_dict is a dictionary of column name and column value.
   """
-  return {'insertId': unique_row_id, 'json': row_values_dict}
+    return {'insertId': unique_row_id, 'json': row_values_dict}
diff --git a/tools/github/pr_latency.py b/tools/github/pr_latency.py
index 5d63583..34870a5 100644
--- a/tools/github/pr_latency.py
+++ b/tools/github/pr_latency.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Measure the time between PR creation and completion of all tests.
 
 You'll need a github API token to avoid being rate-limited. See
@@ -46,118 +45,159 @@
 
 
 def gh(url):
-  request = urllib2.Request(url)
-  if TOKEN:
-    request.add_header('Authorization', 'token {}'.format(TOKEN))
-  response = urllib2.urlopen(request)
-  return response.read()
+    request = urllib2.Request(url)
+    if TOKEN:
+        request.add_header('Authorization', 'token {}'.format(TOKEN))
+    response = urllib2.urlopen(request)
+    return response.read()
 
 
 def print_csv_header():
-  print('pr,base_time,test_time,latency_seconds,successes,failures,errors')
+    print('pr,base_time,test_time,latency_seconds,successes,failures,errors')
 
 
-def output(pr, base_time, test_time, diff_time, successes, failures, errors, mode='human'):
-  if mode == 'human':
-    print("PR #{} base time: {} UTC, Tests completed at: {} UTC. Latency: {}."
-          "\n\tSuccesses: {}, Failures: {}, Errors: {}".format(
-              pr, base_time, test_time, diff_time, successes, failures, errors))
-  elif mode == 'csv':
-    print(','.join([str(pr), str(base_time),
-                    str(test_time), str(int((test_time-base_time).total_seconds())),
-                    str(successes), str(failures), str(errors)]))
+def output(pr,
+           base_time,
+           test_time,
+           diff_time,
+           successes,
+           failures,
+           errors,
+           mode='human'):
+    if mode == 'human':
+        print(
+            "PR #{} base time: {} UTC, Tests completed at: {} UTC. Latency: {}."
+            "\n\tSuccesses: {}, Failures: {}, Errors: {}".format(
+                pr, base_time, test_time, diff_time, successes, failures,
+                errors))
+    elif mode == 'csv':
+        print(','.join([
+            str(pr),
+            str(base_time),
+            str(test_time),
+            str(int((test_time - base_time).total_seconds())),
+            str(successes),
+            str(failures),
+            str(errors)
+        ]))
 
 
 def parse_timestamp(datetime_str):
-  return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%SZ')
+    return datetime.strptime(datetime_str, '%Y-%m-%dT%H:%M:%SZ')
 
 
 def to_posix_timestamp(dt):
-  return str((dt - datetime(1970, 1, 1)).total_seconds())
+    return str((dt - datetime(1970, 1, 1)).total_seconds())
 
 
 def get_pr_data():
-  latest_prs = json.loads(gh(PRS))
-  res =  [{'number': pr['number'],
-           'created_at': parse_timestamp(pr['created_at']),
-           'updated_at': parse_timestamp(pr['updated_at']),
-           'statuses_url': pr['statuses_url']}
-          for pr in latest_prs]
-  return res
+    latest_prs = json.loads(gh(PRS))
+    res = [{
+        'number': pr['number'],
+        'created_at': parse_timestamp(pr['created_at']),
+        'updated_at': parse_timestamp(pr['updated_at']),
+        'statuses_url': pr['statuses_url']
+    } for pr in latest_prs]
+    return res
 
 
 def get_commits_data(pr_number):
-  commits = json.loads(gh(COMMITS.format(pr_number=pr_number)))
-  return {'num_commits': len(commits),
-          'most_recent_date': parse_timestamp(commits[-1]['commit']['author']['date'])}
+    commits = json.loads(gh(COMMITS.format(pr_number=pr_number)))
+    return {
+        'num_commits': len(commits),
+        'most_recent_date':
+        parse_timestamp(commits[-1]['commit']['author']['date'])
+    }
 
 
 def get_status_data(statuses_url, system):
-  status_url = statuses_url.replace('statuses', 'status')
-  statuses = json.loads(gh(status_url + '?per_page=100'))
-  successes = 0
-  failures = 0
-  errors = 0
-  latest_datetime = None
-  if not statuses: return None
-  if system == 'kokoro': string_in_target_url = 'kokoro'
-  elif system == 'jenkins': string_in_target_url = 'grpc-testing'
-  for status in statuses['statuses']:
-    if not status['target_url'] or string_in_target_url not in status['target_url']: continue  # Ignore jenkins
-    if status['state'] == 'pending': return None
-    elif status['state'] == 'success': successes += 1
-    elif status['state'] == 'failure': failures += 1
-    elif status['state'] == 'error': errors += 1
-    if not latest_datetime:
-      latest_datetime = parse_timestamp(status['updated_at'])
-    else:
-      latest_datetime = max(latest_datetime, parse_timestamp(status['updated_at']))
-  # First status is the most recent one.
-  if any([successes, failures, errors]) and sum([successes, failures, errors]) > 15:
-    return {'latest_datetime': latest_datetime,
+    status_url = statuses_url.replace('statuses', 'status')
+    statuses = json.loads(gh(status_url + '?per_page=100'))
+    successes = 0
+    failures = 0
+    errors = 0
+    latest_datetime = None
+    if not statuses: return None
+    if system == 'kokoro': string_in_target_url = 'kokoro'
+    elif system == 'jenkins': string_in_target_url = 'grpc-testing'
+    for status in statuses['statuses']:
+        if not status['target_url'] or string_in_target_url not in status['target_url']:
+            continue  # Ignore jenkins
+        if status['state'] == 'pending': return None
+        elif status['state'] == 'success': successes += 1
+        elif status['state'] == 'failure': failures += 1
+        elif status['state'] == 'error': errors += 1
+        if not latest_datetime:
+            latest_datetime = parse_timestamp(status['updated_at'])
+        else:
+            latest_datetime = max(latest_datetime,
+                                  parse_timestamp(status['updated_at']))
+    # First status is the most recent one.
+    if any([successes, failures, errors
+           ]) and sum([successes, failures, errors]) > 15:
+        return {
+            'latest_datetime': latest_datetime,
             'successes': successes,
             'failures': failures,
-            'errors': errors}
-  else: return None
+            'errors': errors
+        }
+    else:
+        return None
 
 
 def build_args_parser():
-  import argparse
-  parser = argparse.ArgumentParser()
-  parser.add_argument('--format', type=str, choices=['human', 'csv'],
-                      default='human',
-                      help='Output format: are you a human or a machine?')
-  parser.add_argument('--system', type=str, choices=['jenkins', 'kokoro'],
-                      required=True, help='Consider only the given CI system')
-  parser.add_argument('--token', type=str, default='',
-                      help='GitHub token to use its API with a higher rate limit')
-  return parser
+    import argparse
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        '--format',
+        type=str,
+        choices=['human', 'csv'],
+        default='human',
+        help='Output format: are you a human or a machine?')
+    parser.add_argument(
+        '--system',
+        type=str,
+        choices=['jenkins', 'kokoro'],
+        required=True,
+        help='Consider only the given CI system')
+    parser.add_argument(
+        '--token',
+        type=str,
+        default='',
+        help='GitHub token to use its API with a higher rate limit')
+    return parser
 
 
 def main():
-  import sys
-  global TOKEN
-  args_parser = build_args_parser()
-  args = args_parser.parse_args()
-  TOKEN = args.token
-  if args.format == 'csv': print_csv_header()
-  for pr_data in get_pr_data():
-    commit_data = get_commits_data(pr_data['number'])
-    # PR with a single commit -> use the PRs creation time.
-    # else -> use the latest commit's date.
-    base_timestamp = pr_data['updated_at']
-    if commit_data['num_commits'] > 1:
-      base_timestamp = commit_data['most_recent_date']
-    else:
-      base_timestamp = pr_data['created_at']
-    last_status = get_status_data(pr_data['statuses_url'], args.system)
-    if last_status:
-      diff = last_status['latest_datetime'] - base_timestamp
-      if diff < timedelta(hours=5):
-        output(pr_data['number'], base_timestamp, last_status['latest_datetime'],
-               diff, last_status['successes'], last_status['failures'],
-               last_status['errors'], mode=args.format)
+    import sys
+    global TOKEN
+    args_parser = build_args_parser()
+    args = args_parser.parse_args()
+    TOKEN = args.token
+    if args.format == 'csv': print_csv_header()
+    for pr_data in get_pr_data():
+        commit_data = get_commits_data(pr_data['number'])
+        # PR with a single commit -> use the PRs creation time.
+        # else -> use the latest commit's date.
+        base_timestamp = pr_data['updated_at']
+        if commit_data['num_commits'] > 1:
+            base_timestamp = commit_data['most_recent_date']
+        else:
+            base_timestamp = pr_data['created_at']
+        last_status = get_status_data(pr_data['statuses_url'], args.system)
+        if last_status:
+            diff = last_status['latest_datetime'] - base_timestamp
+            if diff < timedelta(hours=5):
+                output(
+                    pr_data['number'],
+                    base_timestamp,
+                    last_status['latest_datetime'],
+                    diff,
+                    last_status['successes'],
+                    last_status['failures'],
+                    last_status['errors'],
+                    mode=args.format)
 
 
 if __name__ == '__main__':
-  main()
+    main()
diff --git a/tools/internal_ci/helper_scripts/prepare_build_linux_perf_multilang_rc b/tools/internal_ci/helper_scripts/prepare_build_linux_perf_multilang_rc
new file mode 100644
index 0000000..f1031ae
--- /dev/null
+++ b/tools/internal_ci/helper_scripts/prepare_build_linux_perf_multilang_rc
@@ -0,0 +1,40 @@
+#!/bin/bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Source this rc script to prepare the environment for linux perf builds
+
+# Need to increase open files limit and size for perf test
+ulimit -n 32768
+ulimit -c unlimited
+
+# Download non-core gRPC repos
+git clone --recursive https://github.com/grpc/grpc-go ./../grpc-go
+git clone --recursive https://github.com/grpc/grpc-java ./../grpc-java
+git clone --recursive https://github.com/grpc/grpc-node ./../grpc-node
+
+sudo pip install tabulate
+
+# Set up Ruby
+export PATH="$HOME/.rbenv/bin:$PATH"
+eval "$(rbenv init -)"
+gem list bundler
+gem install bundler --no-ri --no-rdoc
+
+# Allow SSH to Kokoro performance workers without explicit key verification
+gsutil cp gs://grpc-testing-secrets/grpc_kokoro_performance_ssh_keys/id_rsa ~/.ssh
+echo -e 'Host grpc-kokoro-performance*\n\tStrictHostKeyChecking no' >> ~/.ssh/config
+chmod 600 ~/.ssh/id_rsa ~/.ssh/config
+
+git submodule update --init
diff --git a/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh b/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
new file mode 100644
index 0000000..a767218
--- /dev/null
+++ b/tools/internal_ci/linux/grpc_bazel_on_foundry_dbg.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# A temporary solution to give Kokoro credentials. 
+# The file name 4321_grpc-testing-service needs to match auth_credential in 
+# the build config.
+mkdir -p ${KOKORO_KEYSTORE_DIR}
+cp ${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json ${KOKORO_KEYSTORE_DIR}/4321_grpc-testing-service
+
+mkdir -p /tmpfs/tmp/bazel-canary
+ln -f "${KOKORO_GFILE_DIR}/bazel-canary" /tmpfs/tmp/bazel-canary/bazel
+chmod 755 "${KOKORO_GFILE_DIR}/bazel-canary"
+export PATH="/tmpfs/tmp/bazel-canary:${PATH}"
+# This should show /tmpfs/tmp/bazel-canary/bazel
+which bazel
+chmod +x "${KOKORO_GFILE_DIR}/bazel_wrapper.py"
+
+# change to grpc repo root
+cd $(dirname $0)/../../..
+
+source tools/internal_ci/helper_scripts/prepare_build_linux_rc
+
+"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
+  --host_jvm_args=-Dbazel.DigestFunction=SHA1 \
+  test --jobs="50" \
+  --test_timeout="300,450,1200,3600" \
+  --test_output=errors  \
+  --verbose_failures=true  \
+  --keep_going  \
+  --remote_accept_cached=true  \
+  --spawn_strategy=remote  \
+  --remote_local_fallback=false  \
+  --remote_timeout=3600  \
+  --strategy=Javac=remote  \
+  --strategy=Closure=remote  \
+  --genrule_strategy=remote  \
+  --experimental_strict_action_env=true \
+  --experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-fl@sha256:aa20628a902f06a11a015caa94b0432eb60690de2d2525bd046b9eea046f5d8a" }' \
+  --crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
+  --define GRPC_PORT_ISOLATED_RUNTIME=1 \
+  -c dbg \
+  -- //test/...
diff --git a/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh b/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
new file mode 100644
index 0000000..defe664
--- /dev/null
+++ b/tools/internal_ci/linux/grpc_bazel_on_foundry_opt.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -ex
+
+# A temporary solution to give Kokoro credentials. 
+# The file name 4321_grpc-testing-service needs to match auth_credential in 
+# the build config.
+mkdir -p ${KOKORO_KEYSTORE_DIR}
+cp ${KOKORO_GFILE_DIR}/GrpcTesting-d0eeee2db331.json ${KOKORO_KEYSTORE_DIR}/4321_grpc-testing-service
+
+mkdir -p /tmpfs/tmp/bazel-canary
+ln -f "${KOKORO_GFILE_DIR}/bazel-canary" /tmpfs/tmp/bazel-canary/bazel
+chmod 755 "${KOKORO_GFILE_DIR}/bazel-canary"
+export PATH="/tmpfs/tmp/bazel-canary:${PATH}"
+# This should show /tmpfs/tmp/bazel-canary/bazel
+which bazel
+chmod +x "${KOKORO_GFILE_DIR}/bazel_wrapper.py"
+
+# change to grpc repo root
+cd $(dirname $0)/../../..
+
+source tools/internal_ci/helper_scripts/prepare_build_linux_rc
+
+"${KOKORO_GFILE_DIR}/bazel_wrapper.py" \
+  --host_jvm_args=-Dbazel.DigestFunction=SHA1 \
+  test --jobs="50" \
+  --test_timeout="300,450,1200,3600" \
+  --test_output=errors  \
+  --verbose_failures=true  \
+  --keep_going  \
+  --remote_accept_cached=true  \
+  --spawn_strategy=remote  \
+  --remote_local_fallback=false  \
+  --remote_timeout=3600  \
+  --strategy=Javac=remote  \
+  --strategy=Closure=remote  \
+  --genrule_strategy=remote  \
+  --experimental_strict_action_env=true \
+  --experimental_remote_platform_override='properties:{name:"container-image" value:"docker://gcr.io/asci-toolchain/nosla-debian8-clang-fl@sha256:aa20628a902f06a11a015caa94b0432eb60690de2d2525bd046b9eea046f5d8a" }' \
+  --crosstool_top=@com_github_bazelbuild_bazeltoolchains//configs/debian8_clang/0.2.0/bazel_0.7.0:toolchain \
+  --define GRPC_PORT_ISOLATED_RUNTIME=1 \
+  -c opt \
+  -- //test/...
diff --git a/tools/internal_ci/linux/grpc_full_performance_master.cfg b/tools/internal_ci/linux/grpc_full_performance_master.cfg
new file mode 100644
index 0000000..8852130
--- /dev/null
+++ b/tools/internal_ci/linux/grpc_full_performance_master.cfg
@@ -0,0 +1,25 @@
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Config file for the internal CI (in protobuf text format)
+
+# Location of the continuous shell script in repository.
+build_file: "grpc/tools/internal_ci/linux/grpc_full_performance_master.sh"
+timeout_mins: 600
+action {
+  define_artifacts {
+    regex: "**/*sponge_log.xml"
+    regex: "**/perf_reports/**"
+  }
+}
diff --git a/tools/internal_ci/linux/grpc_full_performance_master.sh b/tools/internal_ci/linux/grpc_full_performance_master.sh
new file mode 100755
index 0000000..1846839
--- /dev/null
+++ b/tools/internal_ci/linux/grpc_full_performance_master.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+set -ex
+
+# Enter the gRPC repo root
+cd $(dirname $0)/../../..
+
+source tools/internal_ci/helper_scripts/prepare_build_linux_perf_multilang_rc
+
+# run 8core client vs 8core server
+tools/run_tests/run_performance_tests.py \
+    -l c++ csharp ruby java python go php7 php7_protobuf_c \
+    --netperf \
+    --category scalable \
+    --remote_worker_host grpc-kokoro-performance-server-8core grpc-kokoro-performance-client-8core grpc-kokoro-performance-client2-8core \
+    -u kbuilder \
+    --bq_result_table performance_test.kokoro_performance_experiment \
+    --xml_report reports/8core/sponge_log.xml \
+    || EXIT_CODE=1
+
+# prevent pushing leftover build files to remote hosts in the next step.
+git clean -fdxq -e reports
+
+# scalability with 32cores (and upload to a different BQ table)
+tools/run_tests/run_performance_tests.py \
+    -l c++ java csharp go \
+    --netperf \
+    --category scalable \
+    --remote_worker_host grpc-kokoro-performance-server-32core grpc-kokoro-performance-client-32core grpc-kokoro-performance-client2-32core \
+    -u kbuilder \
+    --bq_result_table performance_test.kokoro_performance_experiment_32core \
+    --xml_report reports/32core/sponge_log.xml \
+    || EXIT_CODE=1
+
+# prevent pushing leftover build files to remote hosts in the next step.
+git clean -fdxq -e reports
+
+# selected scenarios on Windows
+tools/run_tests/run_performance_tests.py \
+    -l csharp \
+    --category scalable \
+    --remote_worker_host grpc-kokoro-performance-windows1 grpc-kokoro-performance-windows2 \
+    --bq_result_table performance_test.kokoro_performance_experiment_windows \
+    --xml_report reports/windows/sponge_log.xml \
+    || EXIT_CODE=1
+
+exit $EXIT_CODE
diff --git a/tools/interop_matrix/client_matrix.py b/tools/interop_matrix/client_matrix.py
index ee24ae7..71d3a79 100644
--- a/tools/interop_matrix/client_matrix.py
+++ b/tools/interop_matrix/client_matrix.py
@@ -15,17 +15,34 @@
 
 # Dictionaries used for client matrix testing.
 
+
 def get_github_repo(lang):
-  return {
-      'go': 'git@github.com:grpc/grpc-go.git',
-      'java': 'git@github.com:grpc/grpc-java.git',
-      'node': 'git@github.com:grpc/grpc-node.git',
-      # all other languages use the grpc.git repo.
-  }.get(lang, 'git@github.com:grpc/grpc.git')
+    return {
+        'go': 'git@github.com:grpc/grpc-go.git',
+        'java': 'git@github.com:grpc/grpc-java.git',
+        'node': 'git@github.com:grpc/grpc-node.git',
+        # all other languages use the grpc.git repo.
+    }.get(lang, 'git@github.com:grpc/grpc.git')
+
+
+def get_release_tags(lang):
+    return map(lambda r: get_release_tag_name(r), LANG_RELEASE_MATRIX[lang])
+
+
+def get_release_tag_name(release_info):
+    assert len(release_info.keys()) == 1
+    return release_info.keys()[0]
+
+
+def should_build_docker_interop_image_from_release_tag(lang):
+    if lang in ['go', 'java', 'node']:
+        return False
+    return True
+
 
 # Dictionary of runtimes per language
 LANG_RUNTIME_MATRIX = {
-    'cxx': ['cxx'],             # This is actually debian8.
+    'cxx': ['cxx'],  # This is actually debian8.
     'go': ['go1.7', 'go1.8'],
     'java': ['java_oracle8'],
     'python': ['python'],
@@ -39,98 +56,197 @@
 # a release tag pointing to the latest build of the branch.
 LANG_RELEASE_MATRIX = {
     'cxx': [
-        'v1.0.1',
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        'v1.7.2',
+        {
+            'v1.0.1': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
     'go': [
-        'v1.0.5',
-        'v1.2.1',
-        'v1.3.0',
-        'v1.4.2',
-        'v1.5.2',
-        'v1.6.0',
-        'v1.7.0',
-        'v1.7.1',
-        'v1.7.2',
-        'v1.7.3',
-        'v1.8.0',
+        {
+            'v1.0.5': None
+        },
+        {
+            'v1.2.1': None
+        },
+        {
+            'v1.3.0': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.5.2': None
+        },
+        {
+            'v1.6.0': None
+        },
+        {
+            'v1.7.4': None
+        },
+        {
+            'v1.8.2': None
+        },
     ],
     'java': [
-        'v1.0.3',
-        'v1.1.2',
-        'v1.2.0',
-        'v1.3.1',
-        'v1.4.0',
-        'v1.5.0',
-        'v1.6.1',
-        'v1.7.0',
-        'v1.8.0',
+        {
+            'v1.0.3': None
+        },
+        {
+            'v1.1.2': None
+        },
+        {
+            'v1.2.0': None
+        },
+        {
+            'v1.3.1': None
+        },
+        {
+            'v1.4.0': None
+        },
+        {
+            'v1.5.0': None
+        },
+        {
+            'v1.6.1': None
+        },
+        {
+            'v1.7.0': None
+        },
+        {
+            'v1.8.0': None
+        },
     ],
     'python': [
-        'v1.0.x',
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-    ],
-    'python': [
-        'v1.0.x', 
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        'v1.7.2',    
-    ],
-    'python': [
-        'v1.0.x',
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        'v1.7.2',    
+        {
+            'v1.0.x': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
     'node': [
-        'v1.0.1',
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        #'v1.7.1',  Failing tests.
+        {
+            'v1.0.1': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        #{'v1.7.1': None}, Failing tests
     ],
     'ruby': [
-        # Ruby v1.0.x doesn't have the fix #8914, therefore not supported.
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        'v1.7.2',
+        {
+            'v1.0.1': {
+                'patch': [
+                    'tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile',
+                    'tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh',
+                ]
+            }
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
     'php': [
-        'v1.0.1',
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        'v1.7.2',
+        {
+            'v1.0.1': None
+        },
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
-   'csharp': [
-        #'v1.0.1',
-        'v1.1.4',
-        'v1.2.5',
-        'v1.3.9',
-        'v1.4.2',
-        'v1.6.6',
-        'v1.7.2',
+    'csharp': [
+        #{'v1.0.1': None},
+        {
+            'v1.1.4': None
+        },
+        {
+            'v1.2.5': None
+        },
+        {
+            'v1.3.9': None
+        },
+        {
+            'v1.4.2': None
+        },
+        {
+            'v1.6.6': None
+        },
+        {
+            'v1.7.2': None
+        },
     ],
 }
diff --git a/tools/interop_matrix/create_matrix_images.py b/tools/interop_matrix/create_matrix_images.py
index 493a7d5..ef9f6a5 100755
--- a/tools/interop_matrix/create_matrix_images.py
+++ b/tools/interop_matrix/create_matrix_images.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Build and upload docker images to Google Container Registry per matrix."""
 
 from __future__ import print_function
@@ -29,8 +28,8 @@
 # Langauage Runtime Matrix
 import client_matrix
 
-python_util_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../run_tests/python_utils'))
+python_util_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../run_tests/python_utils'))
 sys.path.append(python_util_dir)
 import dockerjob
 import jobset
@@ -38,236 +37,305 @@
 _IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh'
 _LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
 # All gRPC release tags, flattened, deduped and sorted.
-_RELEASES = sorted(list(set(
-    i for l in client_matrix.LANG_RELEASE_MATRIX.values() for i in l)))
+_RELEASES = sorted(
+    list(
+        set(
+            client_matrix.get_release_tag_name(info)
+            for lang in client_matrix.LANG_RELEASE_MATRIX.values()
+            for info in lang)))
 
 # Destination directory inside docker image to keep extra info from build time.
 _BUILD_INFO = '/var/local/build_info'
 
 argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('--gcr_path',
-                  default='gcr.io/grpc-testing',
-                  help='Path of docker images in Google Container Registry')
+argp.add_argument(
+    '--gcr_path',
+    default='gcr.io/grpc-testing',
+    help='Path of docker images in Google Container Registry')
 
-argp.add_argument('--release',
-                  default='master',
-                  choices=['all', 'master'] + _RELEASES,
-                  help='github commit tag to checkout.  When building all '
-                  'releases defined in client_matrix.py, use "all". Valid only '
-                  'with --git_checkout.')
+argp.add_argument(
+    '--release',
+    default='master',
+    choices=['all', 'master'] + _RELEASES,
+    help='github commit tag to checkout.  When building all '
+    'releases defined in client_matrix.py, use "all". Valid only '
+    'with --git_checkout.')
 
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Test languages to build docker images for.')
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES),
+    nargs='+',
+    default=['all'],
+    help='Test languages to build docker images for.')
 
-argp.add_argument('--git_checkout',
-                  action='store_true',
-                  help='Use a separate git clone tree for building grpc stack. '
-                  'Required when using --release flag.  By default, current'
-                  'tree and the sibling will be used for building grpc stack.')
+argp.add_argument(
+    '--git_checkout',
+    action='store_true',
+    help='Use a separate git clone tree for building grpc stack. '
+    'Required when using --release flag.  By default, current'
+    'tree and the sibling will be used for building grpc stack.')
 
-argp.add_argument('--git_checkout_root',
-                  default='/export/hda3/tmp/grpc_matrix',
-                  help='Directory under which grpc-go/java/main repo will be '
-                  'cloned.  Valid only with --git_checkout.')
+argp.add_argument(
+    '--git_checkout_root',
+    default='/export/hda3/tmp/grpc_matrix',
+    help='Directory under which grpc-go/java/main repo will be '
+    'cloned.  Valid only with --git_checkout.')
 
-argp.add_argument('--keep',
-                  action='store_true',
-                  help='keep the created local images after uploading to GCR')
+argp.add_argument(
+    '--keep',
+    action='store_true',
+    help='keep the created local images after uploading to GCR')
 
-argp.add_argument('--reuse_git_root',
-                  default=False,
-                  action='store_const',
-                  const=True,                  
-                  help='reuse the repo dir. If False, the existing git root '
-                  'directory will removed before a clean checkout, because '
-                  'reusing the repo can cause git checkout error if you switch '
-                  'between releases.')
-
+argp.add_argument(
+    '--reuse_git_root',
+    default=False,
+    action='store_const',
+    const=True,
+    help='reuse the repo dir. If False, the existing git root '
+    'directory will removed before a clean checkout, because '
+    'reusing the repo can cause git checkout error if you switch '
+    'between releases.')
 
 args = argp.parse_args()
 
+
 def add_files_to_image(image, with_files, label=None):
-  """Add files to a docker image.
+    """Add files to a docker image.
 
   image: docker image name, i.e. grpc_interop_java:26328ad8
   with_files: additional files to include in the docker image.
   label: label string to attach to the image.
   """
-  tag_idx = image.find(':')
-  if tag_idx == -1:
-    jobset.message('FAILED', 'invalid docker image %s' % image, do_newline=True)
-    sys.exit(1)
-  orig_tag = '%s_' % image
-  subprocess.check_output(['docker', 'tag', image, orig_tag])
+    tag_idx = image.find(':')
+    if tag_idx == -1:
+        jobset.message(
+            'FAILED', 'invalid docker image %s' % image, do_newline=True)
+        sys.exit(1)
+    orig_tag = '%s_' % image
+    subprocess.check_output(['docker', 'tag', image, orig_tag])
 
-  lines = ['FROM ' + orig_tag]
-  if label:
-    lines.append('LABEL %s' % label)
+    lines = ['FROM ' + orig_tag]
+    if label:
+        lines.append('LABEL %s' % label)
 
-  temp_dir = tempfile.mkdtemp()
-  atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir]))
+    temp_dir = tempfile.mkdtemp()
+    atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir]))
 
-  # Copy with_files inside the tmp directory, which will be the docker build
-  # context.
-  for f in with_files:
-    shutil.copy(f, temp_dir)
-    lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO))
+    # Copy with_files inside the tmp directory, which will be the docker build
+    # context.
+    for f in with_files:
+        shutil.copy(f, temp_dir)
+        lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO))
 
-  # Create a Dockerfile.
-  with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f:
-    f.write('\n'.join(lines))
+    # Create a Dockerfile.
+    with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f:
+        f.write('\n'.join(lines))
 
-  jobset.message('START', 'Repackaging %s' % image, do_newline=True)
-  build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir]
-  subprocess.check_output(build_cmd)
-  dockerjob.remove_image(orig_tag, skip_nonexistent=True)
+    jobset.message('START', 'Repackaging %s' % image, do_newline=True)
+    build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir]
+    subprocess.check_output(build_cmd)
+    dockerjob.remove_image(orig_tag, skip_nonexistent=True)
+
 
 def build_image_jobspec(runtime, env, gcr_tag, stack_base):
-  """Build interop docker image for a language with runtime.
+    """Build interop docker image for a language with runtime.
 
   runtime: a <lang><version> string, for example go1.8.
   env:     dictionary of env to passed to the build script.
   gcr_tag: the tag for the docker image (i.e. v1.3.0).
   stack_base: the local gRPC repo path.
   """
-  basename = 'grpc_interop_%s' % runtime
-  tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag)
-  build_env = {
-      'INTEROP_IMAGE': tag,
-      'BASE_NAME': basename,
-      'TTY_FLAG': '-t'
-  }
-  build_env.update(env)
-  build_job = jobset.JobSpec(
-          cmdline=[_IMAGE_BUILDER],
-          environ=build_env,
-          shortname='build_docker_%s' % runtime,
-          timeout_seconds=30*60)
-  build_job.tag = tag
-  return build_job
+    basename = 'grpc_interop_%s' % runtime
+    tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag)
+    build_env = {'INTEROP_IMAGE': tag, 'BASE_NAME': basename, 'TTY_FLAG': '-t'}
+    build_env.update(env)
+    image_builder_path = _IMAGE_BUILDER
+    if client_matrix.should_build_docker_interop_image_from_release_tag(lang):
+        image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER)
+    build_job = jobset.JobSpec(
+        cmdline=[image_builder_path],
+        environ=build_env,
+        shortname='build_docker_%s' % runtime,
+        timeout_seconds=30 * 60)
+    build_job.tag = tag
+    return build_job
+
 
 def build_all_images_for_lang(lang):
-  """Build all docker images for a language across releases and runtimes."""
-  if not args.git_checkout:
-    if args.release != 'master':
-      print('WARNING: --release is set but will be ignored\n')
-    releases = ['master']
-  else:
-    if args.release == 'all':
-      releases = client_matrix.LANG_RELEASE_MATRIX[lang]
+    """Build all docker images for a language across releases and runtimes."""
+    if not args.git_checkout:
+        if args.release != 'master':
+            print('WARNING: --release is set but will be ignored\n')
+        releases = ['master']
     else:
-      # Build a particular release.
-      if args.release not in ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]:
-        jobset.message('SKIPPED',
-                       '%s for %s is not defined' % (args.release, lang),
-                       do_newline=True)
-        return []
-      releases = [args.release]
+        if args.release == 'all':
+            releases = client_matrix.get_release_tags(lang)
+        else:
+            # Build a particular release.
+            if args.release not in ['master'
+                                   ] + client_matrix.get_release_tags(lang):
+                jobset.message(
+                    'SKIPPED',
+                    '%s for %s is not defined' % (args.release, lang),
+                    do_newline=True)
+                return []
+            releases = [args.release]
 
-  images = []
-  for release in releases:
-    images += build_all_images_for_release(lang, release)
-  jobset.message('SUCCESS',
-                 'All docker images built for %s at %s.' % (lang, releases),
-                 do_newline=True)
-  return images
+    images = []
+    for release in releases:
+        images += build_all_images_for_release(lang, release)
+    jobset.message(
+        'SUCCESS',
+        'All docker images built for %s at %s.' % (lang, releases),
+        do_newline=True)
+    return images
+
 
 def build_all_images_for_release(lang, release):
-  """Build all docker images for a release across all runtimes."""
-  docker_images = []
-  build_jobs = []
+    """Build all docker images for a release across all runtimes."""
+    docker_images = []
+    build_jobs = []
 
-  env = {}
-  # If we not using current tree or the sibling for grpc stack, do checkout.
-  stack_base = ''
-  if args.git_checkout:
-    stack_base = checkout_grpc_stack(lang, release)
-    var ={'go': 'GRPC_GO_ROOT', 'java': 'GRPC_JAVA_ROOT', 'node': 'GRPC_NODE_ROOT'}.get(lang, 'GRPC_ROOT')
-    env[var] = stack_base
+    env = {}
+    # If we not using current tree or the sibling for grpc stack, do checkout.
+    stack_base = ''
+    if args.git_checkout:
+        stack_base = checkout_grpc_stack(lang, release)
+        var = {
+            'go': 'GRPC_GO_ROOT',
+            'java': 'GRPC_JAVA_ROOT',
+            'node': 'GRPC_NODE_ROOT'
+        }.get(lang, 'GRPC_ROOT')
+        env[var] = stack_base
 
-  for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
-    job = build_image_jobspec(runtime, env, release, stack_base)
-    docker_images.append(job.tag)
-    build_jobs.append(job)
+    for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
+        job = build_image_jobspec(runtime, env, release, stack_base)
+        docker_images.append(job.tag)
+        build_jobs.append(job)
 
-  jobset.message('START', 'Building interop docker images.', do_newline=True)
-  print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+    jobset.message('START', 'Building interop docker images.', do_newline=True)
+    print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
 
-  num_failures, _ = jobset.run(
-      build_jobs, newline_on_success=True, maxjobs=multiprocessing.cpu_count())
-  if num_failures:
-    jobset.message('FAILED', 'Failed to build interop docker images.',
-                   do_newline=True)
-    docker_images_cleanup.extend(docker_images)
-    sys.exit(1)
+    num_failures, _ = jobset.run(
+        build_jobs,
+        newline_on_success=True,
+        maxjobs=multiprocessing.cpu_count())
+    if num_failures:
+        jobset.message(
+            'FAILED', 'Failed to build interop docker images.', do_newline=True)
+        docker_images_cleanup.extend(docker_images)
+        sys.exit(1)
 
-  jobset.message('SUCCESS',
-                 'All docker images built for %s at %s.' % (lang, release),
-                 do_newline=True)
+    jobset.message(
+        'SUCCESS',
+        'All docker images built for %s at %s.' % (lang, release),
+        do_newline=True)
 
-  if release != 'master':
-    commit_log = os.path.join(stack_base, 'commit_log')
-    if os.path.exists(commit_log):
-      for image in docker_images:
-        add_files_to_image(image, [commit_log], 'release=%s' % release)
-  return docker_images
+    if release != 'master':
+        commit_log = os.path.join(stack_base, 'commit_log')
+        if os.path.exists(commit_log):
+            for image in docker_images:
+                add_files_to_image(image, [commit_log], 'release=%s' % release)
+    return docker_images
+
 
 def cleanup():
-  if not args.keep:
-    for image in docker_images_cleanup:
-      dockerjob.remove_image(image, skip_nonexistent=True)
+    if not args.keep:
+        for image in docker_images_cleanup:
+            dockerjob.remove_image(image, skip_nonexistent=True)
+
 
 docker_images_cleanup = []
 atexit.register(cleanup)
 
+
+def maybe_apply_patches_on_git_tag(stack_base, lang, release):
+    files_to_patch = []
+    for release_info in client_matrix.LANG_RELEASE_MATRIX[lang]:
+        if client_matrix.get_release_tag_name(release_info) == release:
+            if release_info[release] is not None:
+                files_to_patch = release_info[release].get('patch')
+                break
+    if not files_to_patch:
+        return
+    patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release)
+    patch_file = os.path.abspath(
+        os.path.join(os.path.dirname(__file__), patch_file_relative_path))
+    if not os.path.exists(patch_file):
+        jobset.message('FAILED',
+                       'expected patch file |%s| to exist' % patch_file)
+        sys.exit(1)
+    subprocess.check_output(
+        ['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT)
+    for repo_relative_path in files_to_patch:
+        subprocess.check_output(
+            ['git', 'add', repo_relative_path],
+            cwd=stack_base,
+            stderr=subprocess.STDOUT)
+    subprocess.check_output(
+        [
+            'git', 'commit', '-m',
+            ('Hack performed on top of %s git '
+             'tag in order to build and run the %s '
+             'interop tests on that tag.' % (lang, release))
+        ],
+        cwd=stack_base,
+        stderr=subprocess.STDOUT)
+
+
 def checkout_grpc_stack(lang, release):
-  """Invokes 'git check' for the lang/release and returns directory created."""
-  assert args.git_checkout and args.git_checkout_root
+    """Invokes 'git check' for the lang/release and returns directory created."""
+    assert args.git_checkout and args.git_checkout_root
 
-  if not os.path.exists(args.git_checkout_root):
-    os.makedirs(args.git_checkout_root)
+    if not os.path.exists(args.git_checkout_root):
+        os.makedirs(args.git_checkout_root)
 
-  repo = client_matrix.get_github_repo(lang)
-  # Get the subdir name part of repo
-  # For example, 'git@github.com:grpc/grpc-go.git' should use 'grpc-go'.
-  repo_dir = os.path.splitext(os.path.basename(repo))[0]
-  stack_base = os.path.join(args.git_checkout_root, repo_dir)
+    repo = client_matrix.get_github_repo(lang)
+    # Get the subdir name part of repo
+    # For example, 'git@github.com:grpc/grpc-go.git' should use 'grpc-go'.
+    repo_dir = os.path.splitext(os.path.basename(repo))[0]
+    stack_base = os.path.join(args.git_checkout_root, repo_dir)
 
-  # Clean up leftover repo dir if necessary.
-  if not args.reuse_git_root and os.path.exists(stack_base):
-    jobset.message('START', 'Removing git checkout root.', do_newline=True)
-    shutil.rmtree(stack_base)
+    # Clean up leftover repo dir if necessary.
+    if not args.reuse_git_root and os.path.exists(stack_base):
+        jobset.message('START', 'Removing git checkout root.', do_newline=True)
+        shutil.rmtree(stack_base)
 
-  if not os.path.exists(stack_base):
-    subprocess.check_call(['git', 'clone', '--recursive', repo],
-                          cwd=os.path.dirname(stack_base))
+    if not os.path.exists(stack_base):
+        subprocess.check_call(
+            ['git', 'clone', '--recursive', repo],
+            cwd=os.path.dirname(stack_base))
 
-  # git checkout.
-  jobset.message('START', 'git checkout %s from %s' % (release, stack_base),
-                 do_newline=True)
-  # We should NEVER do checkout on current tree !!!
-  assert not os.path.dirname(__file__).startswith(stack_base)
-  output = subprocess.check_output(
-      ['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT)
-  commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
-  jobset.message('SUCCESS', 'git checkout', 
-                 '%s: %s' % (str(output), commit_log), 
-                 do_newline=True)
+    # git checkout.
+    jobset.message(
+        'START',
+        'git checkout %s from %s' % (release, stack_base),
+        do_newline=True)
+    # We should NEVER do checkout on current tree !!!
+    assert not os.path.dirname(__file__).startswith(stack_base)
+    output = subprocess.check_output(
+        ['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT)
+    maybe_apply_patches_on_git_tag(stack_base, lang, release)
+    commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base)
+    jobset.message(
+        'SUCCESS',
+        'git checkout',
+        '%s: %s' % (str(output), commit_log),
+        do_newline=True)
 
-  # Write git log to commit_log so it can be packaged with the docker image.
-  with open(os.path.join(stack_base, 'commit_log'), 'w') as f:
-    f.write(commit_log)
-  return stack_base
+    # Write git log to commit_log so it can be packaged with the docker image.
+    with open(os.path.join(stack_base, 'commit_log'), 'w') as f:
+        f.write(commit_log)
+    return stack_base
+
 
 languages = args.language if args.language != ['all'] else _LANGUAGES
 for lang in languages:
-  docker_images = build_all_images_for_lang(lang)
-  for image in docker_images:
-    jobset.message('START', 'Uploading %s' % image, do_newline=True)
-    # docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
-    assert image.startswith(args.gcr_path) and image.find(':') != -1
+    docker_images = build_all_images_for_lang(lang)
+    for image in docker_images:
+        jobset.message('START', 'Uploading %s' % image, do_newline=True)
+        # docker image name must be in the format <gcr_path>/<image>:<gcr_tag>
+        assert image.startswith(args.gcr_path) and image.find(':') != -1
 
-    subprocess.call(['gcloud', 'docker', '--', 'push', image])
+        subprocess.call(['gcloud', 'docker', '--', 'push', image])
diff --git a/tools/interop_matrix/patches/README.md b/tools/interop_matrix/patches/README.md
new file mode 100644
index 0000000..0c0893f
--- /dev/null
+++ b/tools/interop_matrix/patches/README.md
@@ -0,0 +1,38 @@
+# Patches to grpc repo tags for the backwards compatibility interop tests
+
+This directory has patch files that can be applied to different tags
+of the grpc git repo in order to run the interop tests for a specific
+language based on that tag.
+
+For example, because the ruby interop tests do not run on the v1.0.1 tag out
+of the box, but we still want to test compatibility of the 1.0.1 ruby release
+with other versions, we can apply a patch to the v1.0.1 tag from this directory
+that makes the necessary changes that are needed to run the ruby interop tests
+from that tag. We can then use that patch to build the docker image for the
+ruby v1.0.1 interop tests.
+
+## How to add a new patch to this directory
+
+Patch files in this directory are meant to be applied to a git tag
+with a `git apply` command.
+
+1. Under the `patches` directory, create a new subdirectory
+titled `<language>_<git_tag>` for the git tag being modified.
+
+2. `git checkout <git_tag>`
+
+3. Make necessary modifications to the git repo at that tag.
+
+4. 
+
+```
+git diff > ~/git_repo.patch
+git checkout <current working branch>
+cp ~/git_repo.patch tools/interop_matrix/patches/<language>_<git_tag>/
+```
+
+5. Edit the `LANGUAGE_RELEASE_MATRIX` in `client_matrix.py` for your language/tag
+and add a `'patch': [<files>,....]` entry to it's `dictionary`.
+
+After doing this, the interop image creation script can apply that patch to the
+tag with `git apply` before uploading to the test image repo.
diff --git a/tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch b/tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch
new file mode 100644
index 0000000..0cd92d6
--- /dev/null
+++ b/tools/interop_matrix/patches/ruby_v1.0.1/git_repo.patch
@@ -0,0 +1,34 @@
+diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+index 88b5130..7ae9f7d 100644
+--- a/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
++++ b/tools/dockerfile/interoptest/grpc_interop_ruby/Dockerfile
+@@ -70,12 +70,12 @@ RUN apt-get update && apt-get install -y time && apt-get clean
+ RUN gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3
+ RUN \curl -sSL https://get.rvm.io | bash -s stable
+ 
+-# Install Ruby 2.1
+-RUN /bin/bash -l -c "rvm install ruby-2.1"
+-RUN /bin/bash -l -c "rvm use --default ruby-2.1"
++# Install Ruby 2.1.8
++RUN /bin/bash -l -c "rvm install ruby-2.1.8"
++RUN /bin/bash -l -c "rvm use --default ruby-2.1.8"
+ RUN /bin/bash -l -c "echo 'gem: --no-ri --no-rdoc' > ~/.gemrc"
+ RUN /bin/bash -l -c "echo 'export PATH=/usr/local/rvm/bin:$PATH' >> ~/.bashrc"
+-RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1' >> ~/.bashrc"
++RUN /bin/bash -l -c "echo 'rvm --default use ruby-2.1.8' >> ~/.bashrc"
+ RUN /bin/bash -l -c "gem install bundler --no-ri --no-rdoc"
+ 
+ # Prepare ccache
+diff --git a/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh b/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
+index 97b3860..cec046d 100755
+--- a/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
++++ b/tools/dockerfile/interoptest/grpc_interop_ruby/build_interop.sh
+@@ -38,7 +38,7 @@ git clone --recursive /var/local/jenkins/grpc /var/local/git/grpc
+ cp -r /var/local/jenkins/service_account $HOME || true
+ 
+ cd /var/local/git/grpc
+-rvm --default use ruby-2.1
++rvm --default use ruby-2.1.8
+ 
+ # build Ruby interop client and server
+ (cd src/ruby && gem update bundler && bundle && rake compile)
diff --git a/tools/interop_matrix/run_interop_matrix_tests.py b/tools/interop_matrix/run_interop_matrix_tests.py
index 4265bc5..3391ef5 100755
--- a/tools/interop_matrix/run_interop_matrix_tests.py
+++ b/tools/interop_matrix/run_interop_matrix_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run tests using docker images in Google Container Registry per matrix."""
 
 from __future__ import print_function
@@ -30,8 +29,8 @@
 # Langauage Runtime Matrix
 import client_matrix
 
-python_util_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../run_tests/python_utils'))
+python_util_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../run_tests/python_utils'))
 sys.path.append(python_util_dir)
 import dockerjob
 import jobset
@@ -40,46 +39,56 @@
 
 _LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys()
 # All gRPC release tags, flattened, deduped and sorted.
-_RELEASES = sorted(list(set(
-    i for l in client_matrix.LANG_RELEASE_MATRIX.values() for i in l)))
+_RELEASES = sorted(
+    list(
+        set(
+            client_matrix.get_release_tag_name(info)
+            for lang in client_matrix.LANG_RELEASE_MATRIX.values()
+            for info in lang)))
 _TEST_TIMEOUT = 30
 
 argp = argparse.ArgumentParser(description='Run interop tests.')
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('--gcr_path',
-                  default='gcr.io/grpc-testing',
-                  help='Path of docker images in Google Container Registry')
-argp.add_argument('--release',
-                  default='all',
-                  choices=['all', 'master'] + _RELEASES,
-                  help='Release tags to test.  When testing all '
-                  'releases defined in client_matrix.py, use "all".')
+argp.add_argument(
+    '--gcr_path',
+    default='gcr.io/grpc-testing',
+    help='Path of docker images in Google Container Registry')
+argp.add_argument(
+    '--release',
+    default='all',
+    choices=['all', 'master'] + _RELEASES,
+    help='Release tags to test.  When testing all '
+    'releases defined in client_matrix.py, use "all".')
 
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Languages to test')
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES),
+    nargs='+',
+    default=['all'],
+    help='Languages to test')
 
-argp.add_argument('--keep',
-                  action='store_true',
-                  help='keep the created local images after finishing the tests.')
+argp.add_argument(
+    '--keep',
+    action='store_true',
+    help='keep the created local images after finishing the tests.')
 
-argp.add_argument('--report_file',
-                  default='report.xml',
-                  help='The result file to create.')
+argp.add_argument(
+    '--report_file', default='report.xml', help='The result file to create.')
 
-argp.add_argument('--allow_flakes',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help=('Allow flaky tests to show as passing (re-runs failed '
-                        'tests up to five times)'))
-argp.add_argument('--bq_result_table',
-                  default='',
-                  type=str,
-                  nargs='?',
-                  help='Upload test results to a specified BQ table.')
+argp.add_argument(
+    '--allow_flakes',
+    default=False,
+    action='store_const',
+    const=True,
+    help=('Allow flaky tests to show as passing (re-runs failed '
+          'tests up to five times)'))
+argp.add_argument(
+    '--bq_result_table',
+    default='',
+    type=str,
+    nargs='?',
+    help='Upload test results to a specified BQ table.')
 
 args = argp.parse_args()
 
@@ -87,138 +96,154 @@
 
 
 def find_all_images_for_lang(lang):
-  """Find docker images for a language across releases and runtimes.
+    """Find docker images for a language across releases and runtimes.
 
   Returns dictionary of list of (<tag>, <image-full-path>) keyed by runtime.
   """
-  # Find all defined releases.
-  if args.release == 'all':
-    releases = ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]
-  else:
-    # Look for a particular release.
-    if args.release not in ['master'] + client_matrix.LANG_RELEASE_MATRIX[lang]:
-      jobset.message('SKIPPED',
-                     '%s for %s is not defined' % (args.release, lang),
-                     do_newline=True)
-      return {}
-    releases = [args.release]
+    # Find all defined releases.
+    if args.release == 'all':
+        releases = ['master'] + client_matrix.get_release_tags(lang)
+    else:
+        # Look for a particular release.
+        if args.release not in ['master'
+                               ] + client_matrix.get_release_tags(lang):
+            jobset.message(
+                'SKIPPED',
+                '%s for %s is not defined' % (args.release, lang),
+                do_newline=True)
+            return {}
+        releases = [args.release]
 
-  # Images tuples keyed by runtime.
-  images = {}
-  for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
-    image_path = '%s/grpc_interop_%s' % (args.gcr_path, runtime)
-    output = subprocess.check_output(['gcloud', 'beta', 'container', 'images',
-                                      'list-tags', '--format=json', image_path])
-    docker_image_list = json.loads(output)
-    # All images should have a single tag or no tag.
-    # TODO(adelez): Remove tagless images.
-    tags = [i['tags'][0] for i in docker_image_list if i['tags']]
-    jobset.message('START', 'Found images for %s: %s' % (image_path, tags),
-                   do_newline=True)
-    skipped = len(docker_image_list) - len(tags)
-    jobset.message('SKIPPED', 'Skipped images (no-tag/unknown-tag): %d' % skipped,
-                   do_newline=True)
-    # Filter tags based on the releases.
-    images[runtime] = [(tag,'%s:%s' % (image_path,tag)) for tag in tags if
-                       tag in releases]
-  return images
+    # Images tuples keyed by runtime.
+    images = {}
+    for runtime in client_matrix.LANG_RUNTIME_MATRIX[lang]:
+        image_path = '%s/grpc_interop_%s' % (args.gcr_path, runtime)
+        output = subprocess.check_output([
+            'gcloud', 'beta', 'container', 'images', 'list-tags',
+            '--format=json', image_path
+        ])
+        docker_image_list = json.loads(output)
+        # All images should have a single tag or no tag.
+        # TODO(adelez): Remove tagless images.
+        tags = [i['tags'][0] for i in docker_image_list if i['tags']]
+        jobset.message(
+            'START',
+            'Found images for %s: %s' % (image_path, tags),
+            do_newline=True)
+        skipped = len(docker_image_list) - len(tags)
+        jobset.message(
+            'SKIPPED',
+            'Skipped images (no-tag/unknown-tag): %d' % skipped,
+            do_newline=True)
+        # Filter tags based on the releases.
+        images[runtime] = [(tag, '%s:%s' % (image_path, tag))
+                           for tag in tags
+                           if tag in releases]
+    return images
+
 
 # caches test cases (list of JobSpec) loaded from file.  Keyed by lang and runtime.
 def find_test_cases(lang, runtime, release, suite_name):
-  """Returns the list of test cases from testcase files per lang/release."""
-  file_tmpl = os.path.join(os.path.dirname(__file__), 'testcases/%s__%s')
-  testcase_release = release
-  filename_prefix = lang
-  if lang == 'csharp':
-    filename_prefix = runtime
-  if not os.path.exists(file_tmpl % (filename_prefix, release)):
-    testcase_release = 'master'
-  testcases = file_tmpl % (filename_prefix, testcase_release)
+    """Returns the list of test cases from testcase files per lang/release."""
+    file_tmpl = os.path.join(os.path.dirname(__file__), 'testcases/%s__%s')
+    testcase_release = release
+    filename_prefix = lang
+    if lang == 'csharp':
+        filename_prefix = runtime
+    if not os.path.exists(file_tmpl % (filename_prefix, release)):
+        testcase_release = 'master'
+    testcases = file_tmpl % (filename_prefix, testcase_release)
 
-  job_spec_list=[]
-  try:
-    with open(testcases) as f:
-      # Only line start with 'docker run' are test cases.
-      for line in f.readlines():
-        if line.startswith('docker run'):
-          m = re.search('--test_case=(.*)"', line)
-          shortname = m.group(1) if m else 'unknown_test'
-          m = re.search('--server_host_override=(.*).sandbox.googleapis.com', 
+    job_spec_list = []
+    try:
+        with open(testcases) as f:
+            # Only line start with 'docker run' are test cases.
+            for line in f.readlines():
+                if line.startswith('docker run'):
+                    m = re.search('--test_case=(.*)"', line)
+                    shortname = m.group(1) if m else 'unknown_test'
+                    m = re.search(
+                        '--server_host_override=(.*).sandbox.googleapis.com',
                         line)
-          server = m.group(1) if m else 'unknown_server'
-          spec = jobset.JobSpec(cmdline=line,
-                                shortname='%s:%s:%s:%s' % (suite_name, lang, 
-                                                           server, shortname),
-                                timeout_seconds=_TEST_TIMEOUT,
-                                shell=True,
-                                flake_retries=5 if args.allow_flakes else 0)
-          job_spec_list.append(spec)
-      jobset.message('START',
-                     'Loaded %s tests from %s' % (len(job_spec_list), testcases),
-                     do_newline=True)
-  except IOError as err:
-    jobset.message('FAILED', err, do_newline=True)
-  return job_spec_list
+                    server = m.group(1) if m else 'unknown_server'
+                    spec = jobset.JobSpec(
+                        cmdline=line,
+                        shortname='%s:%s:%s:%s' % (suite_name, lang, server,
+                                                   shortname),
+                        timeout_seconds=_TEST_TIMEOUT,
+                        shell=True,
+                        flake_retries=5 if args.allow_flakes else 0)
+                    job_spec_list.append(spec)
+            jobset.message(
+                'START',
+                'Loaded %s tests from %s' % (len(job_spec_list), testcases),
+                do_newline=True)
+    except IOError as err:
+        jobset.message('FAILED', err, do_newline=True)
+    return job_spec_list
+
 
 _xml_report_tree = report_utils.new_junit_xml_tree()
+
+
 def run_tests_for_lang(lang, runtime, images):
-  """Find and run all test cases for a language.
+    """Find and run all test cases for a language.
 
   images is a list of (<release-tag>, <image-full-path>) tuple.
   """
-  total_num_failures = 0
-  for image_tuple in images:
-    release, image = image_tuple
-    jobset.message('START', 'Testing %s' % image, do_newline=True)
-    # Download the docker image before running each test case.
-    subprocess.check_call(['gcloud', 'docker', '--', 'pull', image])
-    suite_name = '%s__%s_%s' % (lang, runtime, release)
-    job_spec_list = find_test_cases(lang, runtime, release, suite_name)
-    
-    if not job_spec_list:  
-      jobset.message('FAILED', 'No test cases were found.', do_newline=True)
-      return 1
+    total_num_failures = 0
+    for image_tuple in images:
+        release, image = image_tuple
+        jobset.message('START', 'Testing %s' % image, do_newline=True)
+        # Download the docker image before running each test case.
+        subprocess.check_call(['gcloud', 'docker', '--', 'pull', image])
+        suite_name = '%s__%s_%s' % (lang, runtime, release)
+        job_spec_list = find_test_cases(lang, runtime, release, suite_name)
 
-    num_failures, resultset = jobset.run(job_spec_list,
-                                         newline_on_success=True,
-                                         add_env={'docker_image':image},
-                                         maxjobs=args.jobs)
-    if args.bq_result_table and resultset:
-      upload_test_results.upload_interop_results_to_bq(
-          resultset, args.bq_result_table, args)
-    if num_failures:
-      jobset.message('FAILED', 'Some tests failed', do_newline=True)
-      total_num_failures += num_failures
-    else:
-      jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+        if not job_spec_list:
+            jobset.message(
+                'FAILED', 'No test cases were found.', do_newline=True)
+            return 1
 
-    report_utils.append_junit_xml_results(
-        _xml_report_tree,
-        resultset,
-        'grpc_interop_matrix',
-        suite_name,
-        str(uuid.uuid4()))
+        num_failures, resultset = jobset.run(
+            job_spec_list,
+            newline_on_success=True,
+            add_env={'docker_image': image},
+            maxjobs=args.jobs)
+        if args.bq_result_table and resultset:
+            upload_test_results.upload_interop_results_to_bq(
+                resultset, args.bq_result_table, args)
+        if num_failures:
+            jobset.message('FAILED', 'Some tests failed', do_newline=True)
+            total_num_failures += num_failures
+        else:
+            jobset.message('SUCCESS', 'All tests passed', do_newline=True)
 
-    if not args.keep:
-      cleanup(image)
-  
-  return total_num_failures
+        report_utils.append_junit_xml_results(_xml_report_tree, resultset,
+                                              'grpc_interop_matrix', suite_name,
+                                              str(uuid.uuid4()))
+
+        if not args.keep:
+            cleanup(image)
+
+    return total_num_failures
 
 
 def cleanup(image):
-  jobset.message('START', 'Cleanup docker image %s' % image, do_newline=True)
-  dockerjob.remove_image(image, skip_nonexistent=True)
+    jobset.message('START', 'Cleanup docker image %s' % image, do_newline=True)
+    dockerjob.remove_image(image, skip_nonexistent=True)
 
 
 languages = args.language if args.language != ['all'] else _LANGUAGES
 total_num_failures = 0
 for lang in languages:
-  docker_images = find_all_images_for_lang(lang)
-  for runtime in sorted(docker_images.keys()):
-    total_num_failures += run_tests_for_lang(lang, runtime, docker_images[runtime])
+    docker_images = find_all_images_for_lang(lang)
+    for runtime in sorted(docker_images.keys()):
+        total_num_failures += run_tests_for_lang(lang, runtime,
+                                                 docker_images[runtime])
 
 report_utils.create_xml_report_file(_xml_report_tree, args.report_file)
 
 if total_num_failures:
-  sys.exit(1)
+    sys.exit(1)
 sys.exit(0)
diff --git a/tools/interop_matrix/testcases/ruby__v1.0.1 b/tools/interop_matrix/testcases/ruby__v1.0.1
new file mode 100755
index 0000000..effbef1
--- /dev/null
+++ b/tools/interop_matrix/testcases/ruby__v1.0.1
@@ -0,0 +1,20 @@
+#!/bin/bash
+echo "Testing ${docker_image:=grpc_interop_ruby:6bd1f0eb-51a4-4ad8-861c-1cbd7a929f33}"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=large_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=ping_pong"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_stream"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=client_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=server_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_begin"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_first_response"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=timeout_on_sleeping_server"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=large_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_unary"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=ping_pong"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=empty_stream"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=client_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=server_streaming"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_begin"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=cancel_after_first_response"
+docker run -i --rm=true -w /var/local/git/grpc --net=host $docker_image bash -c "source /usr/local/rvm/scripts/rvm && ruby src/ruby/pb/test/client.rb --server_host=216.239.32.254 --server_host_override=grpc-test4.sandbox.googleapis.com --server_port=443 --use_tls=true --test_case=timeout_on_sleeping_server"
diff --git a/tools/line_count/collect-history.py b/tools/line_count/collect-history.py
index 3f030fb..c8e33c9 100755
--- a/tools/line_count/collect-history.py
+++ b/tools/line_count/collect-history.py
@@ -19,20 +19,24 @@
 # this script is only of historical interest: it's the script that was used to
 # bootstrap the dataset
 
+
 def daterange(start, end):
-  for n in range(int((end - start).days)):
-    yield start + datetime.timedelta(n)
+    for n in range(int((end - start).days)):
+        yield start + datetime.timedelta(n)
+
 
 start_date = datetime.date(2017, 3, 26)
 end_date = datetime.date(2017, 3, 29)
 
 for dt in daterange(start_date, end_date):
-  dmy = dt.strftime('%Y-%m-%d')
-  sha1 = subprocess.check_output(['git', 'rev-list', '-n', '1',
-                                  '--before=%s' % dmy,
-                                  'master']).strip()
-  subprocess.check_call(['git', 'checkout', sha1])
-  subprocess.check_call(['git', 'submodule', 'update'])
-  subprocess.check_call(['git', 'clean', '-f', '-x', '-d'])
-  subprocess.check_call(['cloc', '--vcs=git', '--by-file', '--yaml', '--out=../count/%s.yaml' % dmy, '.'])
-
+    dmy = dt.strftime('%Y-%m-%d')
+    sha1 = subprocess.check_output(
+        ['git', 'rev-list', '-n', '1',
+         '--before=%s' % dmy, 'master']).strip()
+    subprocess.check_call(['git', 'checkout', sha1])
+    subprocess.check_call(['git', 'submodule', 'update'])
+    subprocess.check_call(['git', 'clean', '-f', '-x', '-d'])
+    subprocess.check_call([
+        'cloc', '--vcs=git', '--by-file', '--yaml',
+        '--out=../count/%s.yaml' % dmy, '.'
+    ])
diff --git a/tools/line_count/summarize-history.py b/tools/line_count/summarize-history.py
index d2ef7ec..4a08599 100755
--- a/tools/line_count/summarize-history.py
+++ b/tools/line_count/summarize-history.py
@@ -13,22 +13,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import subprocess
 import datetime
 
 # this script is only of historical interest: it's the script that was used to
 # bootstrap the dataset
 
+
 def daterange(start, end):
-  for n in range(int((end - start).days)):
-    yield start + datetime.timedelta(n)
+    for n in range(int((end - start).days)):
+        yield start + datetime.timedelta(n)
+
 
 start_date = datetime.date(2017, 3, 26)
 end_date = datetime.date(2017, 3, 29)
 
 for dt in daterange(start_date, end_date):
-  dmy = dt.strftime('%Y-%m-%d')
-  print dmy
-  subprocess.check_call(['tools/line_count/yaml2csv.py', '-i', '../count/%s.yaml' % dmy, '-d', dmy, '-o', '../count/%s.csv' % dmy])
-
+    dmy = dt.strftime('%Y-%m-%d')
+    print dmy
+    subprocess.check_call([
+        'tools/line_count/yaml2csv.py', '-i',
+        '../count/%s.yaml' % dmy, '-d', dmy, '-o',
+        '../count/%s.csv' % dmy
+    ])
diff --git a/tools/line_count/yaml2csv.py b/tools/line_count/yaml2csv.py
index 2a38a12..dd2e92b 100755
--- a/tools/line_count/yaml2csv.py
+++ b/tools/line_count/yaml2csv.py
@@ -13,7 +13,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import yaml
 import argparse
 import datetime
@@ -21,18 +20,22 @@
 
 argp = argparse.ArgumentParser(description='Convert cloc yaml to bigquery csv')
 argp.add_argument('-i', '--input', type=str)
-argp.add_argument('-d', '--date', type=str, default=datetime.date.today().strftime('%Y-%m-%d'))
+argp.add_argument(
+    '-d',
+    '--date',
+    type=str,
+    default=datetime.date.today().strftime('%Y-%m-%d'))
 argp.add_argument('-o', '--output', type=str, default='out.csv')
 args = argp.parse_args()
 
 data = yaml.load(open(args.input).read())
 with open(args.output, 'w') as outf:
-  writer = csv.DictWriter(outf, ['date', 'name', 'language', 'code', 'comment', 'blank'])
-  for key, value in data.iteritems():
-    if key == 'header': continue
-    if key == 'SUM': continue
-    if key.startswith('third_party/'): continue
-    row = {'name': key, 'date': args.date}
-    row.update(value)
-    writer.writerow(row)
-
+    writer = csv.DictWriter(
+        outf, ['date', 'name', 'language', 'code', 'comment', 'blank'])
+    for key, value in data.iteritems():
+        if key == 'header': continue
+        if key == 'SUM': continue
+        if key.startswith('third_party/'): continue
+        row = {'name': key, 'date': args.date}
+        row.update(value)
+        writer.writerow(row)
diff --git a/tools/mkowners/mkowners.py b/tools/mkowners/mkowners.py
index e0ad998..2ea7265 100755
--- a/tools/mkowners/mkowners.py
+++ b/tools/mkowners/mkowners.py
@@ -24,10 +24,8 @@
 # Find the root of the git tree
 #
 
-git_root = (subprocess
-            .check_output(['git', 'rev-parse', '--show-toplevel'])
-            .decode('utf-8')
-            .strip())
+git_root = (subprocess.check_output(['git', 'rev-parse', '--show-toplevel'])
+            .decode('utf-8').strip())
 
 #
 # Parse command line arguments
@@ -36,19 +34,23 @@
 default_out = os.path.join(git_root, '.github', 'CODEOWNERS')
 
 argp = argparse.ArgumentParser('Generate .github/CODEOWNERS file')
-argp.add_argument('--out', '-o',
-                  type=str,
-                  default=default_out,
-                  help='Output file (default %s)' % default_out)
+argp.add_argument(
+    '--out',
+    '-o',
+    type=str,
+    default=default_out,
+    help='Output file (default %s)' % default_out)
 args = argp.parse_args()
 
 #
 # Walk git tree to locate all OWNERS files
 #
 
-owners_files = [os.path.join(root, 'OWNERS')
-                for root, dirs, files in os.walk(git_root)
-                if 'OWNERS' in files]
+owners_files = [
+    os.path.join(root, 'OWNERS')
+    for root, dirs, files in os.walk(git_root)
+    if 'OWNERS' in files
+]
 
 #
 # Parse owners files
@@ -57,39 +59,40 @@
 Owners = collections.namedtuple('Owners', 'parent directives dir')
 Directive = collections.namedtuple('Directive', 'who globs')
 
-def parse_owners(filename):
-  with open(filename) as f:
-    src = f.read().splitlines()
-  parent = True
-  directives = []
-  for line in src:
-    line = line.strip()
-    # line := directive | comment
-    if not line: continue
-    if line[0] == '#': continue
-    # it's a directive
-    directive = None
-    if line == 'set noparent':
-      parent = False
-    elif line == '*':
-      directive = Directive(who='*', globs=[])
-    elif ' ' in line:
-      (who, globs) = line.split(' ', 1)
-      globs_list = [glob
-                    for glob in globs.split(' ')
-                    if glob]
-      directive = Directive(who=who, globs=globs_list)
-    else:
-      directive = Directive(who=line, globs=[])
-    if directive:
-      directives.append(directive)
-  return Owners(parent=parent,
-                directives=directives,
-                dir=os.path.relpath(os.path.dirname(filename), git_root))
 
-owners_data = sorted([parse_owners(filename)
-                      for filename in owners_files],
-                     key=operator.attrgetter('dir'))
+def parse_owners(filename):
+    with open(filename) as f:
+        src = f.read().splitlines()
+    parent = True
+    directives = []
+    for line in src:
+        line = line.strip()
+        # line := directive | comment
+        if not line: continue
+        if line[0] == '#': continue
+        # it's a directive
+        directive = None
+        if line == 'set noparent':
+            parent = False
+        elif line == '*':
+            directive = Directive(who='*', globs=[])
+        elif ' ' in line:
+            (who, globs) = line.split(' ', 1)
+            globs_list = [glob for glob in globs.split(' ') if glob]
+            directive = Directive(who=who, globs=globs_list)
+        else:
+            directive = Directive(who=line, globs=[])
+        if directive:
+            directives.append(directive)
+    return Owners(
+        parent=parent,
+        directives=directives,
+        dir=os.path.relpath(os.path.dirname(filename), git_root))
+
+
+owners_data = sorted(
+    [parse_owners(filename) for filename in owners_files],
+    key=operator.attrgetter('dir'))
 
 #
 # Modify owners so that parented OWNERS files point to the actual
@@ -98,24 +101,24 @@
 
 new_owners_data = []
 for owners in owners_data:
-  if owners.parent == True:
-    best_parent = None
-    best_parent_score = None
-    for possible_parent in owners_data:
-      if possible_parent is owners: continue
-      rel = os.path.relpath(owners.dir, possible_parent.dir)
-      # '..' ==> we had to walk up from possible_parent to get to owners
-      #      ==> not a parent
-      if '..' in rel: continue
-      depth = len(rel.split(os.sep))
-      if not best_parent or depth < best_parent_score:
-        best_parent = possible_parent
-        best_parent_score = depth
-    if best_parent:
-      owners = owners._replace(parent = best_parent.dir)
-    else:
-      owners = owners._replace(parent = None)
-  new_owners_data.append(owners)
+    if owners.parent == True:
+        best_parent = None
+        best_parent_score = None
+        for possible_parent in owners_data:
+            if possible_parent is owners: continue
+            rel = os.path.relpath(owners.dir, possible_parent.dir)
+            # '..' ==> we had to walk up from possible_parent to get to owners
+            #      ==> not a parent
+            if '..' in rel: continue
+            depth = len(rel.split(os.sep))
+            if not best_parent or depth < best_parent_score:
+                best_parent = possible_parent
+                best_parent_score = depth
+        if best_parent:
+            owners = owners._replace(parent=best_parent.dir)
+        else:
+            owners = owners._replace(parent=None)
+    new_owners_data.append(owners)
 owners_data = new_owners_data
 
 #
@@ -123,106 +126,114 @@
 # a CODEOWNERS file for GitHub
 #
 
+
 def full_dir(rules_dir, sub_path):
-  return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
+    return os.path.join(rules_dir, sub_path) if rules_dir != '.' else sub_path
+
 
 # glob using git
 gg_cache = {}
+
+
 def git_glob(glob):
-  global gg_cache
-  if glob in gg_cache: return gg_cache[glob]
-  r = set(subprocess
-      .check_output(['git', 'ls-files', os.path.join(git_root, glob)])
-      .decode('utf-8')
-      .strip()
-      .splitlines())
-  gg_cache[glob] = r
-  return r
+    global gg_cache
+    if glob in gg_cache: return gg_cache[glob]
+    r = set(
+        subprocess.check_output(
+            ['git', 'ls-files', os.path.join(git_root, glob)]).decode('utf-8')
+        .strip().splitlines())
+    gg_cache[glob] = r
+    return r
+
 
 def expand_directives(root, directives):
-  globs = collections.OrderedDict()
-  # build a table of glob --> owners
-  for directive in directives:
-    for glob in directive.globs or ['**']:
-      if glob not in globs:
-        globs[glob] = []
-      if directive.who not in globs[glob]:
-        globs[glob].append(directive.who)
-  # expand owners for intersecting globs
-  sorted_globs = sorted(globs.keys(),
-                        key=lambda g: len(git_glob(full_dir(root, g))),
-                        reverse=True)
-  out_globs = collections.OrderedDict()
-  for glob_add in sorted_globs:
-    who_add = globs[glob_add]
-    pre_items = [i for i in out_globs.items()]
-    out_globs[glob_add] = who_add.copy()
-    for glob_have, who_have in pre_items:
-      files_add = git_glob(full_dir(root, glob_add))
-      files_have = git_glob(full_dir(root, glob_have))
-      intersect = files_have.intersection(files_add)
-      if intersect:
-        for f in sorted(files_add): # sorted to ensure merge stability
-          if f not in intersect:
-            out_globs[os.path.relpath(f, start=root)] = who_add
-        for who in who_have:
-          if who not in out_globs[glob_add]:
-            out_globs[glob_add].append(who)
-  return out_globs
+    globs = collections.OrderedDict()
+    # build a table of glob --> owners
+    for directive in directives:
+        for glob in directive.globs or ['**']:
+            if glob not in globs:
+                globs[glob] = []
+            if directive.who not in globs[glob]:
+                globs[glob].append(directive.who)
+    # expand owners for intersecting globs
+    sorted_globs = sorted(
+        globs.keys(),
+        key=lambda g: len(git_glob(full_dir(root, g))),
+        reverse=True)
+    out_globs = collections.OrderedDict()
+    for glob_add in sorted_globs:
+        who_add = globs[glob_add]
+        pre_items = [i for i in out_globs.items()]
+        out_globs[glob_add] = who_add.copy()
+        for glob_have, who_have in pre_items:
+            files_add = git_glob(full_dir(root, glob_add))
+            files_have = git_glob(full_dir(root, glob_have))
+            intersect = files_have.intersection(files_add)
+            if intersect:
+                for f in sorted(files_add):  # sorted to ensure merge stability
+                    if f not in intersect:
+                        out_globs[os.path.relpath(f, start=root)] = who_add
+                for who in who_have:
+                    if who not in out_globs[glob_add]:
+                        out_globs[glob_add].append(who)
+    return out_globs
+
 
 def add_parent_to_globs(parent, globs, globs_dir):
-  if not parent: return
-  for owners in owners_data:
-    if owners.dir == parent:
-      owners_globs = expand_directives(owners.dir, owners.directives)
-      for oglob, oglob_who in owners_globs.items():
-        for gglob, gglob_who in globs.items():
-          files_parent = git_glob(full_dir(owners.dir, oglob))
-          files_child = git_glob(full_dir(globs_dir, gglob))
-          intersect = files_parent.intersection(files_child)
-          gglob_who_orig = gglob_who.copy()
-          if intersect:
-            for f in sorted(files_child): # sorted to ensure merge stability
-              if f not in intersect:
-                who = gglob_who_orig.copy()
-                globs[os.path.relpath(f, start=globs_dir)] = who
-            for who in oglob_who:
-              if who not in gglob_who:
-                gglob_who.append(who)
-      add_parent_to_globs(owners.parent, globs, globs_dir)
-      return
-  assert(False)
+    if not parent: return
+    for owners in owners_data:
+        if owners.dir == parent:
+            owners_globs = expand_directives(owners.dir, owners.directives)
+            for oglob, oglob_who in owners_globs.items():
+                for gglob, gglob_who in globs.items():
+                    files_parent = git_glob(full_dir(owners.dir, oglob))
+                    files_child = git_glob(full_dir(globs_dir, gglob))
+                    intersect = files_parent.intersection(files_child)
+                    gglob_who_orig = gglob_who.copy()
+                    if intersect:
+                        for f in sorted(files_child
+                                       ):  # sorted to ensure merge stability
+                            if f not in intersect:
+                                who = gglob_who_orig.copy()
+                                globs[os.path.relpath(f, start=globs_dir)] = who
+                        for who in oglob_who:
+                            if who not in gglob_who:
+                                gglob_who.append(who)
+            add_parent_to_globs(owners.parent, globs, globs_dir)
+            return
+    assert (False)
+
 
 todo = owners_data.copy()
 done = set()
 with open(args.out, 'w') as out:
-  out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
-  out.write('# Uses OWNERS files in different modules throughout the\n')
-  out.write('# repository as the source of truth for module ownership.\n')
-  written_globs = []
-  while todo:
-    head, *todo = todo
-    if head.parent and not head.parent in done:
-      todo.append(head)
-      continue
-    globs = expand_directives(head.dir, head.directives)
-    add_parent_to_globs(head.parent, globs, head.dir)
-    for glob, owners in globs.items():
-      skip = False
-      for glob1, owners1, dir1 in reversed(written_globs):
-        files = git_glob(full_dir(head.dir, glob))
-        files1 = git_glob(full_dir(dir1, glob1))
-        intersect = files.intersection(files1)
-        if files == intersect:
-          if sorted(owners) == sorted(owners1):
-            skip = True # nothing new in this rule
-            break
-        elif intersect:
-          # continuing would cause a semantic change since some files are
-          # affected differently by this rule and CODEOWNERS is order dependent
-          break
-      if not skip:
-        out.write('/%s %s\n' % (
-            full_dir(head.dir, glob), ' '.join(owners)))
-        written_globs.append((glob, owners, head.dir))
-    done.add(head.dir)
+    out.write('# Auto-generated by the tools/mkowners/mkowners.py tool\n')
+    out.write('# Uses OWNERS files in different modules throughout the\n')
+    out.write('# repository as the source of truth for module ownership.\n')
+    written_globs = []
+    while todo:
+        head, *todo = todo
+        if head.parent and not head.parent in done:
+            todo.append(head)
+            continue
+        globs = expand_directives(head.dir, head.directives)
+        add_parent_to_globs(head.parent, globs, head.dir)
+        for glob, owners in globs.items():
+            skip = False
+            for glob1, owners1, dir1 in reversed(written_globs):
+                files = git_glob(full_dir(head.dir, glob))
+                files1 = git_glob(full_dir(dir1, glob1))
+                intersect = files.intersection(files1)
+                if files == intersect:
+                    if sorted(owners) == sorted(owners1):
+                        skip = True  # nothing new in this rule
+                        break
+                elif intersect:
+                    # continuing would cause a semantic change since some files are
+                    # affected differently by this rule and CODEOWNERS is order dependent
+                    break
+            if not skip:
+                out.write('/%s %s\n' % (full_dir(head.dir, glob),
+                                        ' '.join(owners)))
+                written_globs.append((glob, owners, head.dir))
+        done.add(head.dir)
diff --git a/tools/profiling/bloat/bloat_diff.py b/tools/profiling/bloat/bloat_diff.py
index 9b40685..91611c2 100755
--- a/tools/profiling/bloat/bloat_diff.py
+++ b/tools/profiling/bloat/bloat_diff.py
@@ -23,12 +23,11 @@
 import sys
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
 import comment_on_pr
 
-argp = argparse.ArgumentParser(
-    description='Perform diff on microbenchmarks')
+argp = argparse.ArgumentParser(description='Perform diff on microbenchmarks')
 
 argp.add_argument(
     '-d',
@@ -36,64 +35,59 @@
     type=str,
     help='Commit or branch to compare the current one to')
 
-argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count())
+argp.add_argument('-j', '--jobs', type=int, default=multiprocessing.cpu_count())
 
 args = argp.parse_args()
 
 LIBS = [
-  'libgrpc.so',
-  'libgrpc++.so',
+    'libgrpc.so',
+    'libgrpc++.so',
 ]
 
+
 def build(where):
-  subprocess.check_call('make -j%d' % args.jobs,
-                        shell=True, cwd='.')
-  shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True)
-  os.rename('libs', 'bloat_diff_%s' % where)
+    subprocess.check_call('make -j%d' % args.jobs, shell=True, cwd='.')
+    shutil.rmtree('bloat_diff_%s' % where, ignore_errors=True)
+    os.rename('libs', 'bloat_diff_%s' % where)
+
 
 build('new')
 
 if args.diff_base:
     old = 'old'
     where_am_i = subprocess.check_output(
-      ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+        ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
     subprocess.check_call(['git', 'checkout', args.diff_base])
     subprocess.check_call(['git', 'submodule', 'update'])
     try:
-      try:
-        build('old')
-      except subprocess.CalledProcessError, e:
-        subprocess.check_call(['make', 'clean'])
-        build('old')
+        try:
+            build('old')
+        except subprocess.CalledProcessError, e:
+            subprocess.check_call(['make', 'clean'])
+            build('old')
     finally:
-      subprocess.check_call(['git', 'checkout', where_am_i])
-      subprocess.check_call(['git', 'submodule', 'update'])
+        subprocess.check_call(['git', 'checkout', where_am_i])
+        subprocess.check_call(['git', 'submodule', 'update'])
 
-subprocess.check_call('make -j%d' % args.jobs,
-                      shell=True, cwd='third_party/bloaty')
+subprocess.check_call(
+    'make -j%d' % args.jobs, shell=True, cwd='third_party/bloaty')
 
 text = ''
 for lib in LIBS:
-  text += '****************************************************************\n\n'
-  text += lib + '\n\n'
-  old_version = glob.glob('bloat_diff_old/opt/%s' % lib)
-  new_version = glob.glob('bloat_diff_new/opt/%s' % lib)
-  assert len(new_version) == 1
-  cmd = 'third_party/bloaty/bloaty -d compileunits,symbols'
-  if old_version:
-    assert len(old_version) == 1
-    text += subprocess.check_output('%s %s -- %s' %
-                                    (cmd, new_version[0], old_version[0]),
-                                    shell=True)
-  else:
-    text += subprocess.check_output('%s %s' %
-                                    (cmd, new_version[0]),
-                                    shell=True)
-  text += '\n\n'
+    text += '****************************************************************\n\n'
+    text += lib + '\n\n'
+    old_version = glob.glob('bloat_diff_old/opt/%s' % lib)
+    new_version = glob.glob('bloat_diff_new/opt/%s' % lib)
+    assert len(new_version) == 1
+    cmd = 'third_party/bloaty/bloaty -d compileunits,symbols'
+    if old_version:
+        assert len(old_version) == 1
+        text += subprocess.check_output(
+            '%s %s -- %s' % (cmd, new_version[0], old_version[0]), shell=True)
+    else:
+        text += subprocess.check_output(
+            '%s %s' % (cmd, new_version[0]), shell=True)
+    text += '\n\n'
 
 print text
 comment_on_pr.comment_on_pr('```\n%s\n```' % text)
diff --git a/tools/profiling/latency_profile/profile_analyzer.py b/tools/profiling/latency_profile/profile_analyzer.py
index 8a19afb..e3d3357 100755
--- a/tools/profiling/latency_profile/profile_analyzer.py
+++ b/tools/profiling/latency_profile/profile_analyzer.py
@@ -23,7 +23,6 @@
 import tabulate
 import time
 
-
 SELF_TIME = object()
 TIME_FROM_SCOPE_START = object()
 TIME_TO_SCOPE_END = object()
@@ -31,124 +30,129 @@
 TIME_TO_STACK_END = object()
 TIME_FROM_LAST_IMPORTANT = object()
 
-
-argp = argparse.ArgumentParser(description='Process output of basic_prof builds')
+argp = argparse.ArgumentParser(
+    description='Process output of basic_prof builds')
 argp.add_argument('--source', default='latency_trace.txt', type=str)
 argp.add_argument('--fmt', choices=tabulate.tabulate_formats, default='simple')
 argp.add_argument('--out', default='-', type=str)
 args = argp.parse_args()
 
+
 class LineItem(object):
 
-  def __init__(self, line, indent):
-    self.tag = line['tag']
-    self.indent = indent
-    self.start_time = line['t']
-    self.end_time = None
-    self.important = line['imp']
-    self.filename = line['file']
-    self.fileline = line['line']
-    self.times = {}
+    def __init__(self, line, indent):
+        self.tag = line['tag']
+        self.indent = indent
+        self.start_time = line['t']
+        self.end_time = None
+        self.important = line['imp']
+        self.filename = line['file']
+        self.fileline = line['line']
+        self.times = {}
 
 
 class ScopeBuilder(object):
 
-  def __init__(self, call_stack_builder, line):
-    self.call_stack_builder = call_stack_builder
-    self.indent = len(call_stack_builder.stk)
-    self.top_line = LineItem(line, self.indent)
-    call_stack_builder.lines.append(self.top_line)
-    self.first_child_pos = len(call_stack_builder.lines)
+    def __init__(self, call_stack_builder, line):
+        self.call_stack_builder = call_stack_builder
+        self.indent = len(call_stack_builder.stk)
+        self.top_line = LineItem(line, self.indent)
+        call_stack_builder.lines.append(self.top_line)
+        self.first_child_pos = len(call_stack_builder.lines)
 
-  def mark(self, line):
-    line_item = LineItem(line, self.indent + 1)
-    line_item.end_time = line_item.start_time
-    self.call_stack_builder.lines.append(line_item)
+    def mark(self, line):
+        line_item = LineItem(line, self.indent + 1)
+        line_item.end_time = line_item.start_time
+        self.call_stack_builder.lines.append(line_item)
 
-  def finish(self, line):
-    assert line['tag'] == self.top_line.tag, (
-        'expected %s, got %s; thread=%s; t0=%f t1=%f' %
-        (self.top_line.tag, line['tag'], line['thd'], self.top_line.start_time,
-         line['t']))
-    final_time_stamp = line['t']
-    assert self.top_line.end_time is None
-    self.top_line.end_time = final_time_stamp
-    self.top_line.important = self.top_line.important or line['imp']
-    assert SELF_TIME not in self.top_line.times
-    self.top_line.times[SELF_TIME] = final_time_stamp - self.top_line.start_time
-    for line in self.call_stack_builder.lines[self.first_child_pos:]:
-      if TIME_FROM_SCOPE_START not in line.times:
-        line.times[TIME_FROM_SCOPE_START] = line.start_time - self.top_line.start_time
-        line.times[TIME_TO_SCOPE_END] = final_time_stamp - line.end_time
+    def finish(self, line):
+        assert line['tag'] == self.top_line.tag, (
+            'expected %s, got %s; thread=%s; t0=%f t1=%f' %
+            (self.top_line.tag, line['tag'], line['thd'],
+             self.top_line.start_time, line['t']))
+        final_time_stamp = line['t']
+        assert self.top_line.end_time is None
+        self.top_line.end_time = final_time_stamp
+        self.top_line.important = self.top_line.important or line['imp']
+        assert SELF_TIME not in self.top_line.times
+        self.top_line.times[
+            SELF_TIME] = final_time_stamp - self.top_line.start_time
+        for line in self.call_stack_builder.lines[self.first_child_pos:]:
+            if TIME_FROM_SCOPE_START not in line.times:
+                line.times[
+                    TIME_FROM_SCOPE_START] = line.start_time - self.top_line.start_time
+                line.times[TIME_TO_SCOPE_END] = final_time_stamp - line.end_time
 
 
 class CallStackBuilder(object):
 
-  def __init__(self):
-    self.stk = []
-    self.signature = hashlib.md5()
-    self.lines = []
+    def __init__(self):
+        self.stk = []
+        self.signature = hashlib.md5()
+        self.lines = []
 
-  def finish(self):
-    start_time = self.lines[0].start_time
-    end_time = self.lines[0].end_time
-    self.signature = self.signature.hexdigest()
-    last_important = start_time
-    for line in self.lines:
-      line.times[TIME_FROM_STACK_START] = line.start_time - start_time
-      line.times[TIME_TO_STACK_END] = end_time - line.end_time
-      line.times[TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
-      if line.important:
-        last_important = line.end_time
-    last_important = end_time
+    def finish(self):
+        start_time = self.lines[0].start_time
+        end_time = self.lines[0].end_time
+        self.signature = self.signature.hexdigest()
+        last_important = start_time
+        for line in self.lines:
+            line.times[TIME_FROM_STACK_START] = line.start_time - start_time
+            line.times[TIME_TO_STACK_END] = end_time - line.end_time
+            line.times[
+                TIME_FROM_LAST_IMPORTANT] = line.start_time - last_important
+            if line.important:
+                last_important = line.end_time
+        last_important = end_time
 
-  def add(self, line):
-    line_type = line['type']
-    self.signature.update(line_type)
-    self.signature.update(line['tag'])
-    if line_type == '{':
-      self.stk.append(ScopeBuilder(self, line))
-      return False
-    elif line_type == '}':
-      assert self.stk, (
-          'expected non-empty stack for closing %s; thread=%s; t=%f' %
-          (line['tag'], line['thd'], line['t']))
-      self.stk.pop().finish(line)
-      if not self.stk:
-        self.finish()
-        return True
-      return False
-    elif line_type == '.' or line_type == '!':
-      self.stk[-1].mark(line)
-      return False
-    else:
-      raise Exception('Unknown line type: \'%s\'' % line_type)
+    def add(self, line):
+        line_type = line['type']
+        self.signature.update(line_type)
+        self.signature.update(line['tag'])
+        if line_type == '{':
+            self.stk.append(ScopeBuilder(self, line))
+            return False
+        elif line_type == '}':
+            assert self.stk, (
+                'expected non-empty stack for closing %s; thread=%s; t=%f' %
+                (line['tag'], line['thd'], line['t']))
+            self.stk.pop().finish(line)
+            if not self.stk:
+                self.finish()
+                return True
+            return False
+        elif line_type == '.' or line_type == '!':
+            self.stk[-1].mark(line)
+            return False
+        else:
+            raise Exception('Unknown line type: \'%s\'' % line_type)
 
 
 class CallStack(object):
 
-  def __init__(self, initial_call_stack_builder):
-    self.count = 1
-    self.signature = initial_call_stack_builder.signature
-    self.lines = initial_call_stack_builder.lines
-    for line in self.lines:
-      for key, val in line.times.items():
-        line.times[key] = [val]
+    def __init__(self, initial_call_stack_builder):
+        self.count = 1
+        self.signature = initial_call_stack_builder.signature
+        self.lines = initial_call_stack_builder.lines
+        for line in self.lines:
+            for key, val in line.times.items():
+                line.times[key] = [val]
 
-  def add(self, call_stack_builder):
-    assert self.signature == call_stack_builder.signature
-    self.count += 1
-    assert len(self.lines) == len(call_stack_builder.lines)
-    for lsum, line in itertools.izip(self.lines, call_stack_builder.lines):
-      assert lsum.tag == line.tag
-      assert lsum.times.keys() == line.times.keys()
-      for k, lst in lsum.times.iteritems():
-        lst.append(line.times[k])
+    def add(self, call_stack_builder):
+        assert self.signature == call_stack_builder.signature
+        self.count += 1
+        assert len(self.lines) == len(call_stack_builder.lines)
+        for lsum, line in itertools.izip(self.lines, call_stack_builder.lines):
+            assert lsum.tag == line.tag
+            assert lsum.times.keys() == line.times.keys()
+            for k, lst in lsum.times.iteritems():
+                lst.append(line.times[k])
 
-  def finish(self):
-    for line in self.lines:
-      for lst in line.times.itervalues():
-        lst.sort()
+    def finish(self):
+        for line in self.lines:
+            for lst in line.times.itervalues():
+                lst.sort()
+
 
 builder = collections.defaultdict(CallStackBuilder)
 call_stacks = collections.defaultdict(CallStack)
@@ -156,26 +160,28 @@
 lines = 0
 start = time.time()
 with open(args.source) as f:
-  for line in f:
-    lines += 1
-    inf = json.loads(line)
-    thd = inf['thd']
-    cs = builder[thd]
-    if cs.add(inf):
-      if cs.signature in call_stacks:
-        call_stacks[cs.signature].add(cs)
-      else:
-        call_stacks[cs.signature] = CallStack(cs)
-      del builder[thd]
+    for line in f:
+        lines += 1
+        inf = json.loads(line)
+        thd = inf['thd']
+        cs = builder[thd]
+        if cs.add(inf):
+            if cs.signature in call_stacks:
+                call_stacks[cs.signature].add(cs)
+            else:
+                call_stacks[cs.signature] = CallStack(cs)
+            del builder[thd]
 time_taken = time.time() - start
 
-call_stacks = sorted(call_stacks.values(), key=lambda cs: cs.count, reverse=True)
+call_stacks = sorted(
+    call_stacks.values(), key=lambda cs: cs.count, reverse=True)
 total_stacks = 0
 for cs in call_stacks:
-  total_stacks += cs.count
-  cs.finish()
+    total_stacks += cs.count
+    cs.finish()
 
-def percentile(N, percent, key=lambda x:x):
+
+def percentile(N, percent, key=lambda x: x):
     """
     Find the percentile of a list of values.
 
@@ -187,80 +193,83 @@
     """
     if not N:
         return None
-    k = (len(N)-1) * percent
+    k = (len(N) - 1) * percent
     f = math.floor(k)
     c = math.ceil(k)
     if f == c:
         return key(N[int(k)])
-    d0 = key(N[int(f)]) * (c-k)
-    d1 = key(N[int(c)]) * (k-f)
-    return d0+d1
+    d0 = key(N[int(f)]) * (c - k)
+    d1 = key(N[int(c)]) * (k - f)
+    return d0 + d1
+
 
 def tidy_tag(tag):
-  if tag[0:10] == 'GRPC_PTAG_':
-    return tag[10:]
-  return tag
+    if tag[0:10] == 'GRPC_PTAG_':
+        return tag[10:]
+    return tag
+
 
 def time_string(values):
-  num_values = len(values)
-  return '%.1f/%.1f/%.1f' % (
-      1e6 * percentile(values, 0.5),
-      1e6 * percentile(values, 0.9),
-      1e6 * percentile(values, 0.99))
+    num_values = len(values)
+    return '%.1f/%.1f/%.1f' % (1e6 * percentile(values, 0.5),
+                               1e6 * percentile(values, 0.9),
+                               1e6 * percentile(values, 0.99))
+
 
 def time_format(idx):
-  def ent(line, idx=idx):
-    if idx in line.times:
-      return time_string(line.times[idx])
-    return ''
-  return ent
 
-BANNER = {
-    'simple': 'Count: %(count)d',
-    'html': '<h1>Count: %(count)d</h1>'
-}
+    def ent(line, idx=idx):
+        if idx in line.times:
+            return time_string(line.times[idx])
+        return ''
+
+    return ent
+
+
+BANNER = {'simple': 'Count: %(count)d', 'html': '<h1>Count: %(count)d</h1>'}
 
 FORMAT = [
-  ('TAG', lambda line: '..'*line.indent + tidy_tag(line.tag)),
-  ('LOC', lambda line: '%s:%d' % (line.filename[line.filename.rfind('/')+1:], line.fileline)),
-  ('IMP', lambda line: '*' if line.important else ''),
-  ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
-  ('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
-  ('SELF', time_format(SELF_TIME)),
-  ('TO_STACK_END', time_format(TIME_TO_STACK_END)),
-  ('FROM_SCOPE_START', time_format(TIME_FROM_SCOPE_START)),
-  ('SELF', time_format(SELF_TIME)),
-  ('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
+    ('TAG', lambda line: '..' * line.indent + tidy_tag(line.tag)),
+    ('LOC',
+     lambda line: '%s:%d' % (line.filename[line.filename.rfind('/') + 1:], line.fileline)
+    ),
+    ('IMP', lambda line: '*' if line.important else ''),
+    ('FROM_IMP', time_format(TIME_FROM_LAST_IMPORTANT)),
+    ('FROM_STACK_START', time_format(TIME_FROM_STACK_START)),
+    ('SELF', time_format(SELF_TIME)),
+    ('TO_STACK_END', time_format(TIME_TO_STACK_END)),
+    ('FROM_SCOPE_START', time_format(TIME_FROM_SCOPE_START)),
+    ('SELF', time_format(SELF_TIME)),
+    ('TO_SCOPE_END', time_format(TIME_TO_SCOPE_END)),
 ]
 
 out = sys.stdout
 if args.out != '-':
-  out = open(args.out, 'w')
+    out = open(args.out, 'w')
 
 if args.fmt == 'html':
-  print >>out, '<html>'
-  print >>out, '<head>'
-  print >>out, '<title>Profile Report</title>'
-  print >>out, '</head>'
+    print >> out, '<html>'
+    print >> out, '<head>'
+    print >> out, '<title>Profile Report</title>'
+    print >> out, '</head>'
 
 accounted_for = 0
 for cs in call_stacks:
-  if args.fmt in BANNER:
-    print >>out, BANNER[args.fmt] % {
-        'count': cs.count,
-    }
-  header, _ = zip(*FORMAT)
-  table = []
-  for line in cs.lines:
-    fields = []
-    for _, fn in FORMAT:
-      fields.append(fn(line))
-    table.append(fields)
-  print >>out, tabulate.tabulate(table, header, tablefmt=args.fmt)
-  accounted_for += cs.count
-  if accounted_for > .99 * total_stacks:
-    break
+    if args.fmt in BANNER:
+        print >> out, BANNER[args.fmt] % {
+            'count': cs.count,
+        }
+    header, _ = zip(*FORMAT)
+    table = []
+    for line in cs.lines:
+        fields = []
+        for _, fn in FORMAT:
+            fields.append(fn(line))
+        table.append(fields)
+    print >> out, tabulate.tabulate(table, header, tablefmt=args.fmt)
+    accounted_for += cs.count
+    if accounted_for > .99 * total_stacks:
+        break
 
 if args.fmt == 'html':
-  print '</html>'
-
+    print '</html>'
diff --git a/tools/profiling/microbenchmarks/bm2bq.py b/tools/profiling/microbenchmarks/bm2bq.py
index 9f9b672..e084e28 100755
--- a/tools/profiling/microbenchmarks/bm2bq.py
+++ b/tools/profiling/microbenchmarks/bm2bq.py
@@ -28,37 +28,38 @@
 columns = []
 
 for row in json.loads(
-    subprocess.check_output([
-      'bq','--format=json','show','microbenchmarks.microbenchmarks']))['schema']['fields']:
-  columns.append((row['name'], row['type'].lower()))
+        subprocess.check_output(
+            ['bq', '--format=json', 'show',
+             'microbenchmarks.microbenchmarks']))['schema']['fields']:
+    columns.append((row['name'], row['type'].lower()))
 
 SANITIZE = {
-  'integer': int,
-  'float': float,
-  'boolean': bool,
-  'string': str,
-  'timestamp': str,
+    'integer': int,
+    'float': float,
+    'boolean': bool,
+    'string': str,
+    'timestamp': str,
 }
 
 if sys.argv[1] == '--schema':
-  print ',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)
-  sys.exit(0)
+    print ',\n'.join('%s:%s' % (k, t.upper()) for k, t in columns)
+    sys.exit(0)
 
 with open(sys.argv[1]) as f:
-  js = json.loads(f.read())
+    js = json.loads(f.read())
 
 if len(sys.argv) > 2:
-  with open(sys.argv[2]) as f:
-    js2 = json.loads(f.read())
+    with open(sys.argv[2]) as f:
+        js2 = json.loads(f.read())
 else:
-  js2 = None
+    js2 = None
 
-writer = csv.DictWriter(sys.stdout, [c for c,t in columns])
+writer = csv.DictWriter(sys.stdout, [c for c, t in columns])
 
 for row in bm_json.expand_json(js, js2):
-  sane_row = {}
-  for name, sql_type in columns:
-    if name in row:
-      if row[name] == '': continue
-      sane_row[name] = SANITIZE[sql_type](row[name])
-  writer.writerow(sane_row)
+    sane_row = {}
+    for name, sql_type in columns:
+        if name in row:
+            if row[name] == '': continue
+            sane_row[name] = SANITIZE[sql_type](row[name])
+    writer.writerow(sane_row)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_build.py b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
index ce62c09..4197ba3 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_build.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_build.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Python utility to build opt and counters benchmarks """
 
 import bm_constants
@@ -26,55 +25,57 @@
 
 
 def _args():
-  argp = argparse.ArgumentParser(description='Builds microbenchmarks')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Which benchmarks to build')
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='How many CPUs to dedicate to this task')
-  argp.add_argument(
-    '-n',
-    '--name',
-    type=str,
-    help='Unique name of this build. To be used as a handle to pass to the other bm* scripts'
-  )
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  args = argp.parse_args()
-  assert args.name
-  return args
+    argp = argparse.ArgumentParser(description='Builds microbenchmarks')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Which benchmarks to build')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='How many CPUs to dedicate to this task')
+    argp.add_argument(
+        '-n',
+        '--name',
+        type=str,
+        help=
+        'Unique name of this build. To be used as a handle to pass to the other bm* scripts'
+    )
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    args = argp.parse_args()
+    assert args.name
+    return args
 
 
 def _make_cmd(cfg, benchmarks, jobs):
-  return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
+    return ['make'] + benchmarks + ['CONFIG=%s' % cfg, '-j', '%d' % jobs]
 
 
 def build(name, benchmarks, jobs, counters):
-  shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
-  subprocess.check_call(['git', 'submodule', 'update'])
-  try:
-    subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
-    if counters:
-      subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
-  except subprocess.CalledProcessError, e:
-    subprocess.check_call(['make', 'clean'])
-    subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
-    if counters:
-      subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
-  os.rename(
-    'bins',
-    'bm_diff_%s' % name,)
+    shutil.rmtree('bm_diff_%s' % name, ignore_errors=True)
+    subprocess.check_call(['git', 'submodule', 'update'])
+    try:
+        subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
+        if counters:
+            subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
+    except subprocess.CalledProcessError, e:
+        subprocess.check_call(['make', 'clean'])
+        subprocess.check_call(_make_cmd('opt', benchmarks, jobs))
+        if counters:
+            subprocess.check_call(_make_cmd('counters', benchmarks, jobs))
+    os.rename(
+        'bins',
+        'bm_diff_%s' % name,
+    )
 
 
 if __name__ == '__main__':
-  args = _args()
-  build(args.name, args.benchmarks, args.jobs, args.counters)
+    args = _args()
+    build(args.name, args.benchmarks, args.jobs, args.counters)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
index 0ec17fa..5719e42 100644
--- a/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_constants.py
@@ -13,19 +13,21 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Configurable constants for the bm_*.py family """
 
 _AVAILABLE_BENCHMARK_TESTS = [
-  'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
-  'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
-  'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
-  'bm_metadata', 'bm_fullstack_trickle'
+    'bm_fullstack_unary_ping_pong', 'bm_fullstack_streaming_ping_pong',
+    'bm_fullstack_streaming_pump', 'bm_closure', 'bm_cq', 'bm_call_create',
+    'bm_error', 'bm_chttp2_hpack', 'bm_chttp2_transport', 'bm_pollset',
+    'bm_metadata', 'bm_fullstack_trickle'
 ]
 
-_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median', 'locks_per_iteration',
-        'allocs_per_iteration', 'writes_per_iteration',
-        'atm_cas_per_iteration', 'atm_add_per_iteration',
-        'nows_per_iteration', 'cli_transport_stalls_per_iteration', 
-        'cli_stream_stalls_per_iteration', 'svr_transport_stalls_per_iteration',
-        'svr_stream_stalls_per_iteration', 'http2_pings_sent_per_iteration')
+_INTERESTING = ('cpu_time', 'real_time', 'call_initial_size-median',
+                'locks_per_iteration', 'allocs_per_iteration',
+                'writes_per_iteration', 'atm_cas_per_iteration',
+                'atm_add_per_iteration', 'nows_per_iteration',
+                'cli_transport_stalls_per_iteration',
+                'cli_stream_stalls_per_iteration',
+                'svr_transport_stalls_per_iteration',
+                'svr_stream_stalls_per_iteration',
+                'http2_pings_sent_per_iteration')
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
index a41d0f0..f975a8b 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_diff.py
@@ -34,190 +34,196 @@
 
 
 def _median(ary):
-  assert (len(ary))
-  ary = sorted(ary)
-  n = len(ary)
-  if n % 2 == 0:
-    return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
-  else:
-    return ary[n / 2]
+    assert (len(ary))
+    ary = sorted(ary)
+    n = len(ary)
+    if n % 2 == 0:
+        return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
+    else:
+        return ary[n / 2]
 
 
 def _args():
-  argp = argparse.ArgumentParser(
-    description='Perform diff on microbenchmarks')
-  argp.add_argument(
-    '-t',
-    '--track',
-    choices=sorted(bm_constants._INTERESTING),
-    nargs='+',
-    default=sorted(bm_constants._INTERESTING),
-    help='Which metrics to track')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Which benchmarks to run')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=20,
-    help='Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
-  )
-  argp.add_argument(
-    '-r',
-    '--regex',
-    type=str,
-    default="",
-    help='Regex to filter benchmarks run')
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  argp.add_argument('-n', '--new', type=str, help='New benchmark name')
-  argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
-  argp.add_argument(
-    '-v', '--verbose', type=bool, help='Print details of before/after')
-  args = argp.parse_args()
-  global verbose
-  if args.verbose: verbose = True
-  assert args.new
-  assert args.old
-  return args
+    argp = argparse.ArgumentParser(
+        description='Perform diff on microbenchmarks')
+    argp.add_argument(
+        '-t',
+        '--track',
+        choices=sorted(bm_constants._INTERESTING),
+        nargs='+',
+        default=sorted(bm_constants._INTERESTING),
+        help='Which metrics to track')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Which benchmarks to run')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=20,
+        help=
+        'Number of times to loops the benchmarks. Must match what was passed to bm_run.py'
+    )
+    argp.add_argument(
+        '-r',
+        '--regex',
+        type=str,
+        default="",
+        help='Regex to filter benchmarks run')
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    argp.add_argument('-n', '--new', type=str, help='New benchmark name')
+    argp.add_argument('-o', '--old', type=str, help='Old benchmark name')
+    argp.add_argument(
+        '-v', '--verbose', type=bool, help='Print details of before/after')
+    args = argp.parse_args()
+    global verbose
+    if args.verbose: verbose = True
+    assert args.new
+    assert args.old
+    return args
 
 
 def _maybe_print(str):
-  if verbose: print str
+    if verbose: print str
 
 
 class Benchmark:
 
-  def __init__(self):
-    self.samples = {
-      True: collections.defaultdict(list),
-      False: collections.defaultdict(list)
-    }
-    self.final = {}
+    def __init__(self):
+        self.samples = {
+            True: collections.defaultdict(list),
+            False: collections.defaultdict(list)
+        }
+        self.final = {}
 
-  def add_sample(self, track, data, new):
-    for f in track:
-      if f in data:
-        self.samples[new][f].append(float(data[f]))
+    def add_sample(self, track, data, new):
+        for f in track:
+            if f in data:
+                self.samples[new][f].append(float(data[f]))
 
-  def process(self, track, new_name, old_name):
-    for f in sorted(track):
-      new = self.samples[True][f]
-      old = self.samples[False][f]
-      if not new or not old: continue
-      mdn_diff = abs(_median(new) - _median(old))
-      _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
-             (f, new_name, new, old_name, old, mdn_diff))
-      s = bm_speedup.speedup(new, old, 1e-5)
-      if abs(s) > 3:
-        if mdn_diff > 0.5 or 'trickle' in f:
-          self.final[f] = '%+d%%' % s
-    return self.final.keys()
+    def process(self, track, new_name, old_name):
+        for f in sorted(track):
+            new = self.samples[True][f]
+            old = self.samples[False][f]
+            if not new or not old: continue
+            mdn_diff = abs(_median(new) - _median(old))
+            _maybe_print('%s: %s=%r %s=%r mdn_diff=%r' %
+                         (f, new_name, new, old_name, old, mdn_diff))
+            s = bm_speedup.speedup(new, old, 1e-5)
+            if abs(s) > 3:
+                if mdn_diff > 0.5 or 'trickle' in f:
+                    self.final[f] = '%+d%%' % s
+        return self.final.keys()
 
-  def skip(self):
-    return not self.final
+    def skip(self):
+        return not self.final
 
-  def row(self, flds):
-    return [self.final[f] if f in self.final else '' for f in flds]
+    def row(self, flds):
+        return [self.final[f] if f in self.final else '' for f in flds]
 
 
 def _read_json(filename, badjson_files, nonexistant_files):
-  stripped = ".".join(filename.split(".")[:-2])
-  try:
-    with open(filename) as f:
-      r = f.read();
-      return json.loads(r)
-  except IOError, e:
-    if stripped in nonexistant_files:
-      nonexistant_files[stripped] += 1
-    else:
-      nonexistant_files[stripped] = 1
-    return None
-  except ValueError, e:
-    print r
-    if stripped in badjson_files:
-      badjson_files[stripped] += 1
-    else:
-      badjson_files[stripped] = 1
-    return None
+    stripped = ".".join(filename.split(".")[:-2])
+    try:
+        with open(filename) as f:
+            r = f.read()
+            return json.loads(r)
+    except IOError, e:
+        if stripped in nonexistant_files:
+            nonexistant_files[stripped] += 1
+        else:
+            nonexistant_files[stripped] = 1
+        return None
+    except ValueError, e:
+        print r
+        if stripped in badjson_files:
+            badjson_files[stripped] += 1
+        else:
+            badjson_files[stripped] = 1
+        return None
+
 
 def fmt_dict(d):
-  return ''.join(["    " + k + ": " + str(d[k]) + "\n" for k in d])
+    return ''.join(["    " + k + ": " + str(d[k]) + "\n" for k in d])
+
 
 def diff(bms, loops, regex, track, old, new, counters):
-  benchmarks = collections.defaultdict(Benchmark)
+    benchmarks = collections.defaultdict(Benchmark)
 
-  badjson_files = {}
-  nonexistant_files = {}
-  for bm in bms:
-    for loop in range(0, loops):
-      for line in subprocess.check_output(
-        ['bm_diff_%s/opt/%s' % (old, bm),
-         '--benchmark_list_tests', 
-         '--benchmark_filter=%s' % regex]).splitlines():
-        stripped_line = line.strip().replace("/", "_").replace(
-          "<", "_").replace(">", "_").replace(", ", "_")
-        js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
-                    (bm, stripped_line, new, loop),
-                    badjson_files, nonexistant_files)
-        js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
-                    (bm, stripped_line, old, loop),
-                    badjson_files, nonexistant_files)
-        if counters:
-          js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
-                      (bm, stripped_line, new, loop),
-                      badjson_files, nonexistant_files)
-          js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
-                      (bm, stripped_line, old, loop),
-                      badjson_files, nonexistant_files)
+    badjson_files = {}
+    nonexistant_files = {}
+    for bm in bms:
+        for loop in range(0, loops):
+            for line in subprocess.check_output([
+                    'bm_diff_%s/opt/%s' % (old, bm), '--benchmark_list_tests',
+                    '--benchmark_filter=%s' % regex
+            ]).splitlines():
+                stripped_line = line.strip().replace("/", "_").replace(
+                    "<", "_").replace(">", "_").replace(", ", "_")
+                js_new_opt = _read_json('%s.%s.opt.%s.%d.json' %
+                                        (bm, stripped_line, new, loop),
+                                        badjson_files, nonexistant_files)
+                js_old_opt = _read_json('%s.%s.opt.%s.%d.json' %
+                                        (bm, stripped_line, old, loop),
+                                        badjson_files, nonexistant_files)
+                if counters:
+                    js_new_ctr = _read_json('%s.%s.counters.%s.%d.json' %
+                                            (bm, stripped_line, new, loop),
+                                            badjson_files, nonexistant_files)
+                    js_old_ctr = _read_json('%s.%s.counters.%s.%d.json' %
+                                            (bm, stripped_line, old, loop),
+                                            badjson_files, nonexistant_files)
+                else:
+                    js_new_ctr = None
+                    js_old_ctr = None
+
+                for row in bm_json.expand_json(js_new_ctr, js_new_opt):
+                    name = row['cpp_name']
+                    if name.endswith('_mean') or name.endswith('_stddev'):
+                        continue
+                    benchmarks[name].add_sample(track, row, True)
+                for row in bm_json.expand_json(js_old_ctr, js_old_opt):
+                    name = row['cpp_name']
+                    if name.endswith('_mean') or name.endswith('_stddev'):
+                        continue
+                    benchmarks[name].add_sample(track, row, False)
+
+    really_interesting = set()
+    for name, bm in benchmarks.items():
+        _maybe_print(name)
+        really_interesting.update(bm.process(track, new, old))
+    fields = [f for f in track if f in really_interesting]
+
+    headers = ['Benchmark'] + fields
+    rows = []
+    for name in sorted(benchmarks.keys()):
+        if benchmarks[name].skip(): continue
+        rows.append([name] + benchmarks[name].row(fields))
+    note = None
+    if len(badjson_files):
+        note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(
+            badjson_files)
+    if len(nonexistant_files):
+        if note:
+            note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(
+                nonexistant_files)
         else:
-          js_new_ctr = None
-          js_old_ctr = None
-
-        for row in bm_json.expand_json(js_new_ctr, js_new_opt):
-          name = row['cpp_name']
-          if name.endswith('_mean') or name.endswith('_stddev'):
-            continue
-          benchmarks[name].add_sample(track, row, True)
-        for row in bm_json.expand_json(js_old_ctr, js_old_opt):
-          name = row['cpp_name']
-          if name.endswith('_mean') or name.endswith('_stddev'):
-            continue
-          benchmarks[name].add_sample(track, row, False)
-
-  really_interesting = set()
-  for name, bm in benchmarks.items():
-    _maybe_print(name)
-    really_interesting.update(bm.process(track, new, old))
-  fields = [f for f in track if f in really_interesting]
-
-  headers = ['Benchmark'] + fields
-  rows = []
-  for name in sorted(benchmarks.keys()):
-    if benchmarks[name].skip(): continue
-    rows.append([name] + benchmarks[name].row(fields))
-  note = None
-  if len(badjson_files):
-    note = 'Corrupt JSON data (indicates timeout or crash): \n%s' % fmt_dict(badjson_files)
-  if len(nonexistant_files):
-    if note:
-      note += '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
+            note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(
+                nonexistant_files)
+    if rows:
+        return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
     else:
-      note = '\n\nMissing files (indicates new benchmark): \n%s' % fmt_dict(nonexistant_files)
-  if rows:
-    return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f'), note
-  else:
-    return None, note
+        return None, note
 
 
 if __name__ == '__main__':
-  args = _args()
-  diff, note = diff(args.benchmarks, args.loops, args.regex, args.track, args.old,
-            args.new, args.counters)
-  print('%s\n%s' % (note, diff if diff else "No performance differences"))
+    args = _args()
+    diff, note = diff(args.benchmarks, args.loops, args.regex, args.track,
+                      args.old, args.new, args.counters)
+    print('%s\n%s' % (note, diff if diff else "No performance differences"))
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_main.py b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
index 74b7174..96c63ba 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_main.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_main.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Runs the entire bm_*.py pipeline, and possible comments on the PR """
 
 import bm_constants
@@ -29,129 +28,133 @@
 import subprocess
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
 import comment_on_pr
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
-    'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+        'python_utils'))
 import jobset
 
 
 def _args():
-  argp = argparse.ArgumentParser(
-    description='Perform diff on microbenchmarks')
-  argp.add_argument(
-    '-t',
-    '--track',
-    choices=sorted(bm_constants._INTERESTING),
-    nargs='+',
-    default=sorted(bm_constants._INTERESTING),
-    help='Which metrics to track')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Which benchmarks to run')
-  argp.add_argument(
-    '-d',
-    '--diff_base',
-    type=str,
-    help='Commit or branch to compare the current one to')
-  argp.add_argument(
-    '-o',
-    '--old',
-    default='old',
-    type=str,
-    help='Name of baseline run to compare to. Ususally just called "old"')
-  argp.add_argument(
-    '-r',
-    '--regex',
-    type=str,
-    default="",
-    help='Regex to filter benchmarks run')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=10,
-    help='Number of times to loops the benchmarks. More loops cuts down on noise'
-  )
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='Number of CPUs to use')
-  argp.add_argument(
-    '--pr_comment_name',
-    type=str,
-    default="microbenchmarks",
-    help='Name that Jenkins will use to commen on the PR')
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  args = argp.parse_args()
-  assert args.diff_base or args.old, "One of diff_base or old must be set!"
-  if args.loops < 3:
-    print "WARNING: This run will likely be noisy. Increase loops."
-  return args
+    argp = argparse.ArgumentParser(
+        description='Perform diff on microbenchmarks')
+    argp.add_argument(
+        '-t',
+        '--track',
+        choices=sorted(bm_constants._INTERESTING),
+        nargs='+',
+        default=sorted(bm_constants._INTERESTING),
+        help='Which metrics to track')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Which benchmarks to run')
+    argp.add_argument(
+        '-d',
+        '--diff_base',
+        type=str,
+        help='Commit or branch to compare the current one to')
+    argp.add_argument(
+        '-o',
+        '--old',
+        default='old',
+        type=str,
+        help='Name of baseline run to compare to. Ususally just called "old"')
+    argp.add_argument(
+        '-r',
+        '--regex',
+        type=str,
+        default="",
+        help='Regex to filter benchmarks run')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=10,
+        help=
+        'Number of times to loops the benchmarks. More loops cuts down on noise'
+    )
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='Number of CPUs to use')
+    argp.add_argument(
+        '--pr_comment_name',
+        type=str,
+        default="microbenchmarks",
+        help='Name that Jenkins will use to commen on the PR')
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    args = argp.parse_args()
+    assert args.diff_base or args.old, "One of diff_base or old must be set!"
+    if args.loops < 3:
+        print "WARNING: This run will likely be noisy. Increase loops."
+    return args
 
 
 def eintr_be_gone(fn):
-  """Run fn until it doesn't stop because of EINTR"""
+    """Run fn until it doesn't stop because of EINTR"""
 
-  def inner(*args):
-    while True:
-      try:
-        return fn(*args)
-      except IOError, e:
-        if e.errno != errno.EINTR:
-          raise
+    def inner(*args):
+        while True:
+            try:
+                return fn(*args)
+            except IOError, e:
+                if e.errno != errno.EINTR:
+                    raise
 
-  return inner
+    return inner
 
 
 def main(args):
 
-  bm_build.build('new', args.benchmarks, args.jobs, args.counters)
+    bm_build.build('new', args.benchmarks, args.jobs, args.counters)
 
-  old = args.old
-  if args.diff_base:
-    old = 'old'
-    where_am_i = subprocess.check_output(
-      ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
-    subprocess.check_call(['git', 'checkout', args.diff_base])
-    try:
-      bm_build.build(old, args.benchmarks, args.jobs, args.counters)
-    finally:
-      subprocess.check_call(['git', 'checkout', where_am_i])
-      subprocess.check_call(['git', 'submodule', 'update'])
+    old = args.old
+    if args.diff_base:
+        old = 'old'
+        where_am_i = subprocess.check_output(
+            ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+        subprocess.check_call(['git', 'checkout', args.diff_base])
+        try:
+            bm_build.build(old, args.benchmarks, args.jobs, args.counters)
+        finally:
+            subprocess.check_call(['git', 'checkout', where_am_i])
+            subprocess.check_call(['git', 'submodule', 'update'])
 
-  jobs_list = []
-  jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops, args.regex, args.counters)
-  jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops, args.regex, args.counters)
+    jobs_list = []
+    jobs_list += bm_run.create_jobs('new', args.benchmarks, args.loops,
+                                    args.regex, args.counters)
+    jobs_list += bm_run.create_jobs(old, args.benchmarks, args.loops,
+                                    args.regex, args.counters)
 
-  # shuffle all jobs to eliminate noise from GCE CPU drift
-  random.shuffle(jobs_list, random.SystemRandom().random)
-  jobset.run(jobs_list, maxjobs=args.jobs)
+    # shuffle all jobs to eliminate noise from GCE CPU drift
+    random.shuffle(jobs_list, random.SystemRandom().random)
+    jobset.run(jobs_list, maxjobs=args.jobs)
 
-  diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex, args.track, old,
-                'new', args.counters)
-  if diff:
-    text = '[%s] Performance differences noted:\n%s' % (args.pr_comment_name, diff)
-  else:
-    text = '[%s] No significant performance differences' % args.pr_comment_name
-  if note:
-    text = note + '\n\n' + text
-  print('%s' % text)
-  comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+    diff, note = bm_diff.diff(args.benchmarks, args.loops, args.regex,
+                              args.track, old, 'new', args.counters)
+    if diff:
+        text = '[%s] Performance differences noted:\n%s' % (
+            args.pr_comment_name, diff)
+    else:
+        text = '[%s] No significant performance differences' % args.pr_comment_name
+    if note:
+        text = note + '\n\n' + text
+    print('%s' % text)
+    comment_on_pr.comment_on_pr('```\n%s\n```' % text)
 
 
 if __name__ == '__main__':
-  args = _args()
-  main(args)
+    args = _args()
+    main(args)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_run.py b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
index 81db5a2..dfb9b17 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_run.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_run.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """ Python utility to run opt and counters benchmarks and save json output """
 
 import bm_constants
@@ -27,93 +26,99 @@
 import os
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
-    'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', '..', 'run_tests',
+        'python_utils'))
 import jobset
 
 
 def _args():
-  argp = argparse.ArgumentParser(description='Runs microbenchmarks')
-  argp.add_argument(
-    '-b',
-    '--benchmarks',
-    nargs='+',
-    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-    help='Benchmarks to run')
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='Number of CPUs to use')
-  argp.add_argument(
-    '-n',
-    '--name',
-    type=str,
-    help='Unique name of the build to run. Needs to match the handle passed to bm_build.py'
-  )
-  argp.add_argument(
-    '-r',
-    '--regex',
-    type=str,
-    default="",
-    help='Regex to filter benchmarks run')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=20,
-    help='Number of times to loops the benchmarks. More loops cuts down on noise'
-  )
-  argp.add_argument('--counters', dest='counters', action='store_true')
-  argp.add_argument('--no-counters', dest='counters', action='store_false')
-  argp.set_defaults(counters=True)
-  args = argp.parse_args()
-  assert args.name
-  if args.loops < 3:
-    print "WARNING: This run will likely be noisy. Increase loops to at least 3."
-  return args
+    argp = argparse.ArgumentParser(description='Runs microbenchmarks')
+    argp.add_argument(
+        '-b',
+        '--benchmarks',
+        nargs='+',
+        choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+        help='Benchmarks to run')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='Number of CPUs to use')
+    argp.add_argument(
+        '-n',
+        '--name',
+        type=str,
+        help=
+        'Unique name of the build to run. Needs to match the handle passed to bm_build.py'
+    )
+    argp.add_argument(
+        '-r',
+        '--regex',
+        type=str,
+        default="",
+        help='Regex to filter benchmarks run')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=20,
+        help=
+        'Number of times to loops the benchmarks. More loops cuts down on noise'
+    )
+    argp.add_argument('--counters', dest='counters', action='store_true')
+    argp.add_argument('--no-counters', dest='counters', action='store_false')
+    argp.set_defaults(counters=True)
+    args = argp.parse_args()
+    assert args.name
+    if args.loops < 3:
+        print "WARNING: This run will likely be noisy. Increase loops to at least 3."
+    return args
 
 
 def _collect_bm_data(bm, cfg, name, regex, idx, loops):
-  jobs_list = []
-  for line in subprocess.check_output(
-    ['bm_diff_%s/%s/%s' % (name, cfg, bm),
-     '--benchmark_list_tests', '--benchmark_filter=%s' % regex]).splitlines():
-    stripped_line = line.strip().replace("/", "_").replace(
-      "<", "_").replace(">", "_").replace(", ", "_")
-    cmd = [
-      'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_filter=^%s$' %
-      line, '--benchmark_out=%s.%s.%s.%s.%d.json' %
-      (bm, stripped_line, cfg, name, idx), '--benchmark_out_format=json',
-    ]
-    jobs_list.append(
-      jobset.JobSpec(
-        cmd,
-        shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
-                         loops),
-        verbose_success=True,
-        cpu_cost=2,
-        timeout_seconds=60 * 60)) # one hour
-  return jobs_list
+    jobs_list = []
+    for line in subprocess.check_output([
+            'bm_diff_%s/%s/%s' % (name, cfg, bm), '--benchmark_list_tests',
+            '--benchmark_filter=%s' % regex
+    ]).splitlines():
+        stripped_line = line.strip().replace("/",
+                                             "_").replace("<", "_").replace(
+                                                 ">", "_").replace(", ", "_")
+        cmd = [
+            'bm_diff_%s/%s/%s' % (name, cfg, bm),
+            '--benchmark_filter=^%s$' % line,
+            '--benchmark_out=%s.%s.%s.%s.%d.json' % (bm, stripped_line, cfg,
+                                                     name, idx),
+            '--benchmark_out_format=json',
+        ]
+        jobs_list.append(
+            jobset.JobSpec(
+                cmd,
+                shortname='%s %s %s %s %d/%d' % (bm, line, cfg, name, idx + 1,
+                                                 loops),
+                verbose_success=True,
+                cpu_cost=2,
+                timeout_seconds=60 * 60))  # one hour
+    return jobs_list
 
 
 def create_jobs(name, benchmarks, loops, regex, counters):
-  jobs_list = []
-  for loop in range(0, loops):
-    for bm in benchmarks:
-      jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
-      if counters:
-        jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
-                        loops)
-  random.shuffle(jobs_list, random.SystemRandom().random)
-  return jobs_list
+    jobs_list = []
+    for loop in range(0, loops):
+        for bm in benchmarks:
+            jobs_list += _collect_bm_data(bm, 'opt', name, regex, loop, loops)
+            if counters:
+                jobs_list += _collect_bm_data(bm, 'counters', name, regex, loop,
+                                              loops)
+    random.shuffle(jobs_list, random.SystemRandom().random)
+    return jobs_list
 
 
 if __name__ == '__main__':
-  args = _args()
-  jobs_list = create_jobs(args.name, args.benchmarks, args.loops, 
-                          args.regex, args.counters)
-  jobset.run(jobs_list, maxjobs=args.jobs)
+    args = _args()
+    jobs_list = create_jobs(args.name, args.benchmarks, args.loops, args.regex,
+                            args.counters)
+    jobset.run(jobs_list, maxjobs=args.jobs)
diff --git a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
index 63e691a..2a77040 100755
--- a/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
+++ b/tools/profiling/microbenchmarks/bm_diff/bm_speedup.py
@@ -19,40 +19,41 @@
 
 _DEFAULT_THRESHOLD = 1e-10
 
+
 def scale(a, mul):
-  return [x * mul for x in a]
+    return [x * mul for x in a]
 
 
 def cmp(a, b):
-  return stats.ttest_ind(a, b)
+    return stats.ttest_ind(a, b)
 
 
-def speedup(new, old, threshold = _DEFAULT_THRESHOLD):
-  if (len(set(new))) == 1 and new == old: return 0
-  s0, p0 = cmp(new, old)
-  if math.isnan(p0): return 0
-  if s0 == 0: return 0
-  if p0 > threshold: return 0
-  if s0 < 0:
-    pct = 1
-    while pct < 100:
-      sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
-      if sp > 0: break
-      if pp > threshold: break
-      pct += 1
-    return -(pct - 1)
-  else:
-    pct = 1
-    while pct < 10000:
-      sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
-      if sp < 0: break
-      if pp > threshold: break
-      pct += 1
-    return pct - 1
+def speedup(new, old, threshold=_DEFAULT_THRESHOLD):
+    if (len(set(new))) == 1 and new == old: return 0
+    s0, p0 = cmp(new, old)
+    if math.isnan(p0): return 0
+    if s0 == 0: return 0
+    if p0 > threshold: return 0
+    if s0 < 0:
+        pct = 1
+        while pct < 100:
+            sp, pp = cmp(new, scale(old, 1 - pct / 100.0))
+            if sp > 0: break
+            if pp > threshold: break
+            pct += 1
+        return -(pct - 1)
+    else:
+        pct = 1
+        while pct < 10000:
+            sp, pp = cmp(new, scale(old, 1 + pct / 100.0))
+            if sp < 0: break
+            if pp > threshold: break
+            pct += 1
+        return pct - 1
 
 
 if __name__ == "__main__":
-  new = [0.0, 0.0, 0.0, 0.0] 
-  old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
-  print speedup(new, old, 1e-5)
-  print speedup(old, new, 1e-5)
+    new = [0.0, 0.0, 0.0, 0.0]
+    old = [2.96608e-06, 3.35076e-06, 3.45384e-06, 3.34407e-06]
+    print speedup(new, old, 1e-5)
+    print speedup(old, new, 1e-5)
diff --git a/tools/profiling/microbenchmarks/bm_json.py b/tools/profiling/microbenchmarks/bm_json.py
index eb450ee..497d7ca 100644
--- a/tools/profiling/microbenchmarks/bm_json.py
+++ b/tools/profiling/microbenchmarks/bm_json.py
@@ -15,187 +15,197 @@
 import os
 
 _BM_SPECS = {
-  'BM_UnaryPingPong': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size', 'response_size'],
-  },
-  'BM_PumpStreamClientToServer': {
-    'tpl': ['fixture'],
-    'dyn': ['request_size'],
-  },
-  'BM_PumpStreamServerToClient': {
-    'tpl': ['fixture'],
-    'dyn': ['request_size'],
-  },
-  'BM_StreamingPingPong': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size', 'request_count'],
-  },
-  'BM_StreamingPingPongMsgs': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size'],
-  },
-  'BM_PumpStreamServerToClient_Trickle': {
-    'tpl': [],
-    'dyn': ['request_size', 'bandwidth_kilobits'],
-  },
-  'BM_PumpUnbalancedUnary_Trickle': {
-    'tpl': [],
-    'dyn': ['cli_req_size', 'svr_req_size', 'bandwidth_kilobits'],
-  },
-  'BM_ErrorStringOnNewError': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorStringRepeatedly': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorGetStatus': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorGetStatusCode': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_ErrorHttpError': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_HasClearGrpcStatus': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_IsolatedFilter': {
-    'tpl': ['fixture', 'client_mutator'],
-    'dyn': [],
-  },
-  'BM_HpackEncoderEncodeHeader': {
-    'tpl': ['fixture'],
-    'dyn': ['end_of_stream', 'request_size'],
-  },
-  'BM_HpackParserParseHeader': {
-    'tpl': ['fixture', 'on_header'],
-    'dyn': [],
-  },
-  'BM_CallCreateDestroy': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_Zalloc': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  },
-  'BM_PollEmptyPollset_SpeedOfLight': {
-    'tpl': [],
-    'dyn': ['request_size', 'request_count'],
-  },
-  'BM_StreamCreateSendInitialMetadataDestroy': {
-    'tpl': ['fixture'],
-    'dyn': [],
-  },
-  'BM_TransportStreamSend': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  },
-  'BM_TransportStreamRecv': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  },
-  'BM_StreamingPingPongWithCoalescingApi': {
-    'tpl': ['fixture', 'client_mutator', 'server_mutator'],
-    'dyn': ['request_size', 'request_count', 'end_of_stream'],
-  },
-  'BM_Base16SomeStuff': {
-    'tpl': [],
-    'dyn': ['request_size'],
-  }
+    'BM_UnaryPingPong': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size', 'response_size'],
+    },
+    'BM_PumpStreamClientToServer': {
+        'tpl': ['fixture'],
+        'dyn': ['request_size'],
+    },
+    'BM_PumpStreamServerToClient': {
+        'tpl': ['fixture'],
+        'dyn': ['request_size'],
+    },
+    'BM_StreamingPingPong': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size', 'request_count'],
+    },
+    'BM_StreamingPingPongMsgs': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size'],
+    },
+    'BM_PumpStreamServerToClient_Trickle': {
+        'tpl': [],
+        'dyn': ['request_size', 'bandwidth_kilobits'],
+    },
+    'BM_PumpUnbalancedUnary_Trickle': {
+        'tpl': [],
+        'dyn': ['cli_req_size', 'svr_req_size', 'bandwidth_kilobits'],
+    },
+    'BM_ErrorStringOnNewError': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorStringRepeatedly': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorGetStatus': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorGetStatusCode': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_ErrorHttpError': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_HasClearGrpcStatus': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_IsolatedFilter': {
+        'tpl': ['fixture', 'client_mutator'],
+        'dyn': [],
+    },
+    'BM_HpackEncoderEncodeHeader': {
+        'tpl': ['fixture'],
+        'dyn': ['end_of_stream', 'request_size'],
+    },
+    'BM_HpackParserParseHeader': {
+        'tpl': ['fixture', 'on_header'],
+        'dyn': [],
+    },
+    'BM_CallCreateDestroy': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_Zalloc': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    },
+    'BM_PollEmptyPollset_SpeedOfLight': {
+        'tpl': [],
+        'dyn': ['request_size', 'request_count'],
+    },
+    'BM_StreamCreateSendInitialMetadataDestroy': {
+        'tpl': ['fixture'],
+        'dyn': [],
+    },
+    'BM_TransportStreamSend': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    },
+    'BM_TransportStreamRecv': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    },
+    'BM_StreamingPingPongWithCoalescingApi': {
+        'tpl': ['fixture', 'client_mutator', 'server_mutator'],
+        'dyn': ['request_size', 'request_count', 'end_of_stream'],
+    },
+    'BM_Base16SomeStuff': {
+        'tpl': [],
+        'dyn': ['request_size'],
+    }
 }
 
+
 def numericalize(s):
-  if not s: return ''
-  if s[-1] == 'k':
-    return float(s[:-1]) * 1024
-  if s[-1] == 'M':
-    return float(s[:-1]) * 1024 * 1024
-  if 0 <= (ord(s[-1]) - ord('0')) <= 9:
-    return float(s)
-  assert 'not a number: %s' % s
+    if not s: return ''
+    if s[-1] == 'k':
+        return float(s[:-1]) * 1024
+    if s[-1] == 'M':
+        return float(s[:-1]) * 1024 * 1024
+    if 0 <= (ord(s[-1]) - ord('0')) <= 9:
+        return float(s)
+    assert 'not a number: %s' % s
+
 
 def parse_name(name):
-  cpp_name = name
-  if '<' not in name and '/' not in name and name not in _BM_SPECS:
-    return {'name': name, 'cpp_name': name}
-  rest = name
-  out = {}
-  tpl_args = []
-  dyn_args = []
-  if '<' in rest:
-    tpl_bit = rest[rest.find('<') + 1 : rest.rfind('>')]
-    arg = ''
-    nesting = 0
-    for c in tpl_bit:
-      if c == '<':
-        nesting += 1
-        arg += c
-      elif c == '>':
-        nesting -= 1
-        arg += c
-      elif c == ',':
-        if nesting == 0:
-          tpl_args.append(arg.strip())
-          arg = ''
-        else:
-          arg += c
-      else:
-        arg += c
-    tpl_args.append(arg.strip())
-    rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
-  if '/' in rest:
-    s = rest.split('/')
-    rest = s[0]
-    dyn_args = s[1:]
-  name = rest
-  print (name)
-  print (dyn_args, _BM_SPECS[name]['dyn'])
-  print (tpl_args, _BM_SPECS[name]['tpl'])
-  assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
-  assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
-  assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
-  out['name'] = name
-  out['cpp_name'] = cpp_name
-  out.update(dict((k, numericalize(v)) for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
-  out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
-  return out
+    cpp_name = name
+    if '<' not in name and '/' not in name and name not in _BM_SPECS:
+        return {'name': name, 'cpp_name': name}
+    rest = name
+    out = {}
+    tpl_args = []
+    dyn_args = []
+    if '<' in rest:
+        tpl_bit = rest[rest.find('<') + 1:rest.rfind('>')]
+        arg = ''
+        nesting = 0
+        for c in tpl_bit:
+            if c == '<':
+                nesting += 1
+                arg += c
+            elif c == '>':
+                nesting -= 1
+                arg += c
+            elif c == ',':
+                if nesting == 0:
+                    tpl_args.append(arg.strip())
+                    arg = ''
+                else:
+                    arg += c
+            else:
+                arg += c
+        tpl_args.append(arg.strip())
+        rest = rest[:rest.find('<')] + rest[rest.rfind('>') + 1:]
+    if '/' in rest:
+        s = rest.split('/')
+        rest = s[0]
+        dyn_args = s[1:]
+    name = rest
+    print(name)
+    print(dyn_args, _BM_SPECS[name]['dyn'])
+    print(tpl_args, _BM_SPECS[name]['tpl'])
+    assert name in _BM_SPECS, '_BM_SPECS needs to be expanded for %s' % name
+    assert len(dyn_args) == len(_BM_SPECS[name]['dyn'])
+    assert len(tpl_args) == len(_BM_SPECS[name]['tpl'])
+    out['name'] = name
+    out['cpp_name'] = cpp_name
+    out.update(
+        dict((k, numericalize(v))
+             for k, v in zip(_BM_SPECS[name]['dyn'], dyn_args)))
+    out.update(dict(zip(_BM_SPECS[name]['tpl'], tpl_args)))
+    return out
 
-def expand_json(js, js2 = None):
-  if not js and not js2: raise StopIteration()
-  if not js: js = js2
-  for bm in js['benchmarks']:
-    if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'): continue
-    context = js['context']
-    if 'label' in bm:
-      labels_list = [s.split(':') for s in bm['label'].strip().split(' ') if len(s) and s[0] != '#']
-      for el in labels_list:
-        el[0] = el[0].replace('/iter', '_per_iteration')
-      labels = dict(labels_list)
-    else:
-      labels = {}
-    row = {
-      'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
-      'jenkins_job': os.environ.get('JOB_NAME', ''),
-    }
-    row.update(context)
-    row.update(bm)
-    row.update(parse_name(row['name']))
-    row.update(labels)
-    if js2:
-      for bm2 in js2['benchmarks']:
-        if bm['name'] == bm2['name'] and 'already_used' not in bm2:
-          row['cpu_time'] = bm2['cpu_time']
-          row['real_time'] = bm2['real_time']
-          row['iterations'] = bm2['iterations']
-          bm2['already_used'] = True
-          break
-    yield row
+
+def expand_json(js, js2=None):
+    if not js and not js2: raise StopIteration()
+    if not js: js = js2
+    for bm in js['benchmarks']:
+        if bm['name'].endswith('_stddev') or bm['name'].endswith('_mean'):
+            continue
+        context = js['context']
+        if 'label' in bm:
+            labels_list = [
+                s.split(':')
+                for s in bm['label'].strip().split(' ')
+                if len(s) and s[0] != '#'
+            ]
+            for el in labels_list:
+                el[0] = el[0].replace('/iter', '_per_iteration')
+            labels = dict(labels_list)
+        else:
+            labels = {}
+        row = {
+            'jenkins_build': os.environ.get('BUILD_NUMBER', ''),
+            'jenkins_job': os.environ.get('JOB_NAME', ''),
+        }
+        row.update(context)
+        row.update(bm)
+        row.update(parse_name(row['name']))
+        row.update(labels)
+        if js2:
+            for bm2 in js2['benchmarks']:
+                if bm['name'] == bm2['name'] and 'already_used' not in bm2:
+                    row['cpu_time'] = bm2['cpu_time']
+                    row['real_time'] = bm2['real_time']
+                    row['iterations'] = bm2['iterations']
+                    bm2['already_used'] = True
+                    break
+        yield row
diff --git a/tools/profiling/qps/qps_diff.py b/tools/profiling/qps/qps_diff.py
index 0654f45..393f862 100755
--- a/tools/profiling/qps/qps_diff.py
+++ b/tools/profiling/qps/qps_diff.py
@@ -26,144 +26,147 @@
 import tabulate
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', 'microbenchmarks', 'bm_diff'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'microbenchmarks', 'bm_diff'))
 import bm_speedup
 
 sys.path.append(
-  os.path.join(
-    os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
 import comment_on_pr
 
 
 def _args():
-  argp = argparse.ArgumentParser(
-    description='Perform diff on QPS Driver')
-  argp.add_argument(
-    '-d',
-    '--diff_base',
-    type=str,
-    help='Commit or branch to compare the current one to')
-  argp.add_argument(
-    '-l',
-    '--loops',
-    type=int,
-    default=4,
-    help='Number of loops for each benchmark. More loops cuts down on noise'
-  )
-  argp.add_argument(
-    '-j',
-    '--jobs',
-    type=int,
-    default=multiprocessing.cpu_count(),
-    help='Number of CPUs to use')
-  args = argp.parse_args()
-  assert args.diff_base, "diff_base must be set"
-  return args
+    argp = argparse.ArgumentParser(description='Perform diff on QPS Driver')
+    argp.add_argument(
+        '-d',
+        '--diff_base',
+        type=str,
+        help='Commit or branch to compare the current one to')
+    argp.add_argument(
+        '-l',
+        '--loops',
+        type=int,
+        default=4,
+        help='Number of loops for each benchmark. More loops cuts down on noise'
+    )
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        type=int,
+        default=multiprocessing.cpu_count(),
+        help='Number of CPUs to use')
+    args = argp.parse_args()
+    assert args.diff_base, "diff_base must be set"
+    return args
 
 
 def _make_cmd(jobs):
-  return ['make', '-j', '%d' % jobs, 'qps_json_driver', 'qps_worker']
+    return ['make', '-j', '%d' % jobs, 'qps_json_driver', 'qps_worker']
 
 
 def build(name, jobs):
-  shutil.rmtree('qps_diff_%s' % name, ignore_errors=True)
-  subprocess.check_call(['git', 'submodule', 'update'])
-  try:
-    subprocess.check_call(_make_cmd(jobs))
-  except subprocess.CalledProcessError, e:
-    subprocess.check_call(['make', 'clean'])
-    subprocess.check_call(_make_cmd(jobs))
-  os.rename('bins', 'qps_diff_%s' % name)
+    shutil.rmtree('qps_diff_%s' % name, ignore_errors=True)
+    subprocess.check_call(['git', 'submodule', 'update'])
+    try:
+        subprocess.check_call(_make_cmd(jobs))
+    except subprocess.CalledProcessError, e:
+        subprocess.check_call(['make', 'clean'])
+        subprocess.check_call(_make_cmd(jobs))
+    os.rename('bins', 'qps_diff_%s' % name)
 
 
 def _run_cmd(name, scenario, fname):
-  return ['qps_diff_%s/opt/qps_json_driver' % name, '--scenarios_json', scenario, '--json_file_out', fname]
+    return [
+        'qps_diff_%s/opt/qps_json_driver' % name, '--scenarios_json', scenario,
+        '--json_file_out', fname
+    ]
 
 
 def run(name, scenarios, loops):
-  for sn in scenarios:
-    for i in range(0, loops):
-      fname = "%s.%s.%d.json" % (sn, name, i)
-      subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
+    for sn in scenarios:
+        for i in range(0, loops):
+            fname = "%s.%s.%d.json" % (sn, name, i)
+            subprocess.check_call(_run_cmd(name, scenarios[sn], fname))
 
 
 def _load_qps(fname):
-  try:
-    with open(fname) as f:
-      return json.loads(f.read())['qps']
-  except IOError, e:
-    print("IOError occurred reading file: %s" % fname)
-    return None
-  except ValueError, e:
-    print("ValueError occurred reading file: %s" % fname)
-    return None
+    try:
+        with open(fname) as f:
+            return json.loads(f.read())['qps']
+    except IOError, e:
+        print("IOError occurred reading file: %s" % fname)
+        return None
+    except ValueError, e:
+        print("ValueError occurred reading file: %s" % fname)
+        return None
 
 
 def _median(ary):
-  assert (len(ary))
-  ary = sorted(ary)
-  n = len(ary)
-  if n % 2 == 0:
-    return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
-  else:
-    return ary[n / 2]
+    assert (len(ary))
+    ary = sorted(ary)
+    n = len(ary)
+    if n % 2 == 0:
+        return (ary[(n - 1) / 2] + ary[(n - 1) / 2 + 1]) / 2.0
+    else:
+        return ary[n / 2]
 
 
 def diff(scenarios, loops, old, new):
-  old_data = {}
-  new_data = {}
+    old_data = {}
+    new_data = {}
 
-  # collect data
-  for sn in scenarios:
-    old_data[sn] = []
-    new_data[sn] = []
-    for i in range(loops):
-      old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
-      new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
+    # collect data
+    for sn in scenarios:
+        old_data[sn] = []
+        new_data[sn] = []
+        for i in range(loops):
+            old_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, old, i)))
+            new_data[sn].append(_load_qps("%s.%s.%d.json" % (sn, new, i)))
 
-  # crunch data
-  headers = ['Benchmark', 'qps']
-  rows = []
-  for sn in scenarios:
-    mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
-    print('%s: %s=%r %s=%r mdn_diff=%r' % (sn, new, new_data[sn], old, old_data[sn], mdn_diff))
-    s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
-    if abs(s) > 3 and mdn_diff > 0.5:
-      rows.append([sn, '%+d%%' % s])
+    # crunch data
+    headers = ['Benchmark', 'qps']
+    rows = []
+    for sn in scenarios:
+        mdn_diff = abs(_median(new_data[sn]) - _median(old_data[sn]))
+        print('%s: %s=%r %s=%r mdn_diff=%r' % (sn, new, new_data[sn], old,
+                                               old_data[sn], mdn_diff))
+        s = bm_speedup.speedup(new_data[sn], old_data[sn], 10e-5)
+        if abs(s) > 3 and mdn_diff > 0.5:
+            rows.append([sn, '%+d%%' % s])
 
-  if rows:
-    return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
-  else:
-    return None
+    if rows:
+        return tabulate.tabulate(rows, headers=headers, floatfmt='+.2f')
+    else:
+        return None
 
 
 def main(args):
-  build('new', args.jobs)
+    build('new', args.jobs)
 
-  if args.diff_base:
-    where_am_i = subprocess.check_output(
-      ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
-    subprocess.check_call(['git', 'checkout', args.diff_base])
-    try:
-      build('old', args.jobs)
-    finally:
-      subprocess.check_call(['git', 'checkout', where_am_i])
-      subprocess.check_call(['git', 'submodule', 'update'])
+    if args.diff_base:
+        where_am_i = subprocess.check_output(
+            ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
+        subprocess.check_call(['git', 'checkout', args.diff_base])
+        try:
+            build('old', args.jobs)
+        finally:
+            subprocess.check_call(['git', 'checkout', where_am_i])
+            subprocess.check_call(['git', 'submodule', 'update'])
 
-  run('new', qps_scenarios._SCENARIOS, args.loops)
-  run('old', qps_scenarios._SCENARIOS, args.loops)
+    run('new', qps_scenarios._SCENARIOS, args.loops)
+    run('old', qps_scenarios._SCENARIOS, args.loops)
 
-  diff_output = diff(qps_scenarios._SCENARIOS, args.loops, 'old', 'new')
+    diff_output = diff(qps_scenarios._SCENARIOS, args.loops, 'old', 'new')
 
-  if diff_output:
-    text = '[qps] Performance differences noted:\n%s' % diff_output
-  else:
-    text = '[qps] No significant performance differences'
-  print('%s' % text)
-  comment_on_pr.comment_on_pr('```\n%s\n```' % text)
+    if diff_output:
+        text = '[qps] Performance differences noted:\n%s' % diff_output
+    else:
+        text = '[qps] No significant performance differences'
+    print('%s' % text)
+    comment_on_pr.comment_on_pr('```\n%s\n```' % text)
 
 
 if __name__ == '__main__':
-  args = _args()
-  main(args)
+    args = _args()
+    main(args)
diff --git a/tools/profiling/qps/qps_scenarios.py b/tools/profiling/qps/qps_scenarios.py
index 4fbbdef..532acc9 100644
--- a/tools/profiling/qps/qps_scenarios.py
+++ b/tools/profiling/qps/qps_scenarios.py
@@ -14,6 +14,8 @@
 """ QPS Scenarios to run """
 
 _SCENARIOS = {
-  'large-message-throughput': '{"scenarios":[{"name":"large-message-throughput", "spawn_local_worker_count": -2, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 1, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 1, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 1048576, "req_size": 1048576}}, "client_channels": 1, "async_client_threads": 1, "outstanding_rpcs_per_channel": 1, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}',
-  'multi-channel-64-KiB': '{"scenarios":[{"name":"multi-channel-64-KiB", "spawn_local_worker_count": -3, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 31, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 2, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 65536, "req_size": 65536}}, "client_channels": 32, "async_client_threads": 31, "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
+    'large-message-throughput':
+    '{"scenarios":[{"name":"large-message-throughput", "spawn_local_worker_count": -2, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 1, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 1, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 1048576, "req_size": 1048576}}, "client_channels": 1, "async_client_threads": 1, "outstanding_rpcs_per_channel": 1, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}',
+    'multi-channel-64-KiB':
+    '{"scenarios":[{"name":"multi-channel-64-KiB", "spawn_local_worker_count": -3, "warmup_seconds": 30, "benchmark_seconds": 270, "num_servers": 1, "server_config": {"async_server_threads": 31, "security_params": null, "server_type": "ASYNC_SERVER"}, "num_clients": 2, "client_config": {"client_type": "ASYNC_CLIENT", "security_params": null, "payload_config": {"simple_params": {"resp_size": 65536, "req_size": 65536}}, "client_channels": 32, "async_client_threads": 31, "outstanding_rpcs_per_channel": 100, "rpc_type": "UNARY", "load_params": {"closed_loop": {}}, "histogram_params": {"max_possible": 60000000000.0, "resolution": 0.01}}}]}'
 }
diff --git a/tools/run_tests/artifacts/artifact_targets.py b/tools/run_tests/artifacts/artifact_targets.py
index ea202ed..efc4ca0 100644
--- a/tools/run_tests/artifacts/artifact_targets.py
+++ b/tools/run_tests/artifacts/artifact_targets.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Definition of targets to build artifacts."""
 
 import os.path
@@ -24,316 +23,352 @@
 import python_utils.jobset as jobset
 
 
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
-                   flake_retries=0, timeout_retries=0, timeout_seconds=30*60,
-                   docker_base_image=None, extra_docker_args=None):
-  """Creates jobspec for a task running under docker."""
-  environ = environ.copy()
-  environ['RUN_COMMAND'] = shell_command
-  environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
+def create_docker_jobspec(name,
+                          dockerfile_dir,
+                          shell_command,
+                          environ={},
+                          flake_retries=0,
+                          timeout_retries=0,
+                          timeout_seconds=30 * 60,
+                          docker_base_image=None,
+                          extra_docker_args=None):
+    """Creates jobspec for a task running under docker."""
+    environ = environ.copy()
+    environ['RUN_COMMAND'] = shell_command
+    environ['ARTIFACTS_OUT'] = 'artifacts/%s' % name
 
-  docker_args=[]
-  for k,v in environ.items():
-    docker_args += ['-e', '%s=%s' % (k, v)]
-  docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
-                'OUTPUT_DIR': 'artifacts'}
+    docker_args = []
+    for k, v in environ.items():
+        docker_args += ['-e', '%s=%s' % (k, v)]
+    docker_env = {
+        'DOCKERFILE_DIR': dockerfile_dir,
+        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+        'OUTPUT_DIR': 'artifacts'
+    }
 
-  if docker_base_image is not None:
-    docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
-  if extra_docker_args is not None:
-    docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
-  jobspec = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
-          environ=docker_env,
-          shortname='build_artifact.%s' % (name),
-          timeout_seconds=timeout_seconds,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries)
-  return jobspec
+    if docker_base_image is not None:
+        docker_env['DOCKER_BASE_IMAGE'] = docker_base_image
+    if extra_docker_args is not None:
+        docker_env['EXTRA_DOCKER_ARGS'] = extra_docker_args
+    jobspec = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+        docker_args,
+        environ=docker_env,
+        shortname='build_artifact.%s' % (name),
+        timeout_seconds=timeout_seconds,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries)
+    return jobspec
 
 
-def create_jobspec(name, cmdline, environ={}, shell=False,
-                   flake_retries=0, timeout_retries=0, timeout_seconds=30*60,
+def create_jobspec(name,
+                   cmdline,
+                   environ={},
+                   shell=False,
+                   flake_retries=0,
+                   timeout_retries=0,
+                   timeout_seconds=30 * 60,
                    use_workspace=False,
                    cpu_cost=1.0):
-  """Creates jobspec."""
-  environ = environ.copy()
-  if use_workspace:
-    environ['WORKSPACE_NAME'] = 'workspace_%s' % name
-    environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
-    cmdline = ['bash',
-               'tools/run_tests/artifacts/run_in_workspace.sh'] + cmdline
-  else:
-    environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
+    """Creates jobspec."""
+    environ = environ.copy()
+    if use_workspace:
+        environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+        environ['ARTIFACTS_OUT'] = os.path.join('..', 'artifacts', name)
+        cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+                  ] + cmdline
+    else:
+        environ['ARTIFACTS_OUT'] = os.path.join('artifacts', name)
 
-  jobspec = jobset.JobSpec(
-          cmdline=cmdline,
-          environ=environ,
-          shortname='build_artifact.%s' % (name),
-          timeout_seconds=timeout_seconds,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries,
-          shell=shell,
-          cpu_cost=cpu_cost)
-  return jobspec
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        environ=environ,
+        shortname='build_artifact.%s' % (name),
+        timeout_seconds=timeout_seconds,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries,
+        shell=shell,
+        cpu_cost=cpu_cost)
+    return jobspec
 
 
 _MACOS_COMPAT_FLAG = '-mmacosx-version-min=10.7'
 
-_ARCH_FLAG_MAP = {
-  'x86': '-m32',
-  'x64': '-m64'
-}
+_ARCH_FLAG_MAP = {'x86': '-m32', 'x64': '-m64'}
 
 
 class PythonArtifact:
-  """Builds Python artifacts."""
+    """Builds Python artifacts."""
 
-  def __init__(self, platform, arch, py_version):
-    self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'python', platform, arch, py_version]
-    self.py_version = py_version
+    def __init__(self, platform, arch, py_version):
+        self.name = 'python_%s_%s_%s' % (platform, arch, py_version)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'python', platform, arch, py_version]
+        self.py_version = py_version
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    environ = {}
-    if self.platform == 'linux_extra':
-      # Raspberry Pi build
-      environ['PYTHON'] = '/usr/local/bin/python{}'.format(self.py_version)
-      environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
-      # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
-      # A QEMU bug causes submodule update to hang, so we copy directly
-      environ['RELATIVE_COPY_PATH'] = '.'
-      extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
-      return create_docker_jobspec(self.name,
-          'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
-          'tools/run_tests/artifacts/build_artifact_python.sh',
-          environ=environ,
-          timeout_seconds=60*60*5,
-          docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
-          extra_docker_args=extra_args)
-    elif self.platform == 'linux':
-      if self.arch == 'x86':
-        environ['SETARCH_CMD'] = 'linux32'
-      # Inside the manylinux container, the python installations are located in
-      # special places...
-      environ['PYTHON'] = '/opt/python/{}/bin/python'.format(self.py_version)
-      environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
-      # Platform autodetection for the manylinux1 image breaks so we set the
-      # defines ourselves.
-      # TODO(atash) get better platform-detection support in core so we don't
-      # need to do this manually...
-      environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
-      environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
-      environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
-      return create_docker_jobspec(self.name,
-          'tools/dockerfile/grpc_artifact_python_manylinux_%s' % self.arch,
-          'tools/run_tests/artifacts/build_artifact_python.sh',
-          environ=environ,
-          timeout_seconds=60*60,
-          docker_base_image='quay.io/pypa/manylinux1_i686' if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
-    elif self.platform == 'windows':
-      if 'Python27' in self.py_version or 'Python34' in self.py_version:
-        environ['EXT_COMPILER'] = 'mingw32'
-      else:
-        environ['EXT_COMPILER'] = 'msvc'
-      # For some reason, the batch script %random% always runs with the same
-      # seed.  We create a random temp-dir here
-      dir = ''.join(random.choice(string.ascii_uppercase) for _ in range(10))
-      return create_jobspec(self.name,
-                            ['tools\\run_tests\\artifacts\\build_artifact_python.bat',
-                             self.py_version,
-                             '32' if self.arch == 'x86' else '64'],
-                            environ=environ,
-                            timeout_seconds=45*60,
-                            use_workspace=True)
-    else:
-      environ['PYTHON'] = self.py_version
-      environ['SKIP_PIP_INSTALL'] = 'TRUE'
-      return create_jobspec(self.name,
-                            ['tools/run_tests/artifacts/build_artifact_python.sh'],
-                            environ=environ,
-                            timeout_seconds=60*60,
-                            use_workspace=True)
+    def build_jobspec(self):
+        environ = {}
+        if self.platform == 'linux_extra':
+            # Raspberry Pi build
+            environ['PYTHON'] = '/usr/local/bin/python{}'.format(
+                self.py_version)
+            environ['PIP'] = '/usr/local/bin/pip{}'.format(self.py_version)
+            # https://github.com/resin-io-projects/armv7hf-debian-qemu/issues/9
+            # A QEMU bug causes submodule update to hang, so we copy directly
+            environ['RELATIVE_COPY_PATH'] = '.'
+            extra_args = ' --entrypoint=/usr/bin/qemu-arm-static '
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
+                'tools/run_tests/artifacts/build_artifact_python.sh',
+                environ=environ,
+                timeout_seconds=60 * 60 * 5,
+                docker_base_image='quay.io/grpc/raspbian_{}'.format(self.arch),
+                extra_docker_args=extra_args)
+        elif self.platform == 'linux':
+            if self.arch == 'x86':
+                environ['SETARCH_CMD'] = 'linux32'
+            # Inside the manylinux container, the python installations are located in
+            # special places...
+            environ['PYTHON'] = '/opt/python/{}/bin/python'.format(
+                self.py_version)
+            environ['PIP'] = '/opt/python/{}/bin/pip'.format(self.py_version)
+            # Platform autodetection for the manylinux1 image breaks so we set the
+            # defines ourselves.
+            # TODO(atash) get better platform-detection support in core so we don't
+            # need to do this manually...
+            environ['CFLAGS'] = '-DGPR_MANYLINUX1=1'
+            environ['GRPC_BUILD_GRPCIO_TOOLS_DEPENDENTS'] = 'TRUE'
+            environ['GRPC_BUILD_MANYLINUX_WHEEL'] = 'TRUE'
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/grpc_artifact_python_manylinux_%s' %
+                self.arch,
+                'tools/run_tests/artifacts/build_artifact_python.sh',
+                environ=environ,
+                timeout_seconds=60 * 60,
+                docker_base_image='quay.io/pypa/manylinux1_i686'
+                if self.arch == 'x86' else 'quay.io/pypa/manylinux1_x86_64')
+        elif self.platform == 'windows':
+            if 'Python27' in self.py_version or 'Python34' in self.py_version:
+                environ['EXT_COMPILER'] = 'mingw32'
+            else:
+                environ['EXT_COMPILER'] = 'msvc'
+            # For some reason, the batch script %random% always runs with the same
+            # seed.  We create a random temp-dir here
+            dir = ''.join(
+                random.choice(string.ascii_uppercase) for _ in range(10))
+            return create_jobspec(
+                self.name, [
+                    'tools\\run_tests\\artifacts\\build_artifact_python.bat',
+                    self.py_version, '32' if self.arch == 'x86' else '64'
+                ],
+                environ=environ,
+                timeout_seconds=45 * 60,
+                use_workspace=True)
+        else:
+            environ['PYTHON'] = self.py_version
+            environ['SKIP_PIP_INSTALL'] = 'TRUE'
+            return create_jobspec(
+                self.name,
+                ['tools/run_tests/artifacts/build_artifact_python.sh'],
+                environ=environ,
+                timeout_seconds=60 * 60,
+                use_workspace=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class RubyArtifact:
-  """Builds ruby native gem."""
+    """Builds ruby native gem."""
 
-  def __init__(self, platform, arch):
-    self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'ruby', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'ruby_native_gem_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'ruby', platform, arch]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    # Ruby build uses docker internally and docker cannot be nested.
-    # We are using a custom workspace instead.
-    return create_jobspec(self.name,
-                          ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
-                          use_workspace=True,
-                          timeout_seconds=45*60)
+    def build_jobspec(self):
+        # Ruby build uses docker internally and docker cannot be nested.
+        # We are using a custom workspace instead.
+        return create_jobspec(
+            self.name, ['tools/run_tests/artifacts/build_artifact_ruby.sh'],
+            use_workspace=True,
+            timeout_seconds=45 * 60)
 
 
 class CSharpExtArtifact:
-  """Builds C# native extension library"""
+    """Builds C# native extension library"""
 
-  def __init__(self, platform, arch):
-    self.name = 'csharp_ext_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'csharp', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'csharp_ext_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'csharp', platform, arch]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'windows':
-      cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
-      return create_jobspec(self.name,
-                            ['tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
-                             cmake_arch_option],
-                            use_workspace=True)
-    else:
-      environ = {'CONFIG': 'opt',
-                 'EMBED_OPENSSL': 'true',
-                 'EMBED_ZLIB': 'true',
-                 'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
-                 'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
-                 'LDFLAGS': ''}
-      if self.platform == 'linux':
-        return create_docker_jobspec(self.name,
-            'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
-            'tools/run_tests/artifacts/build_artifact_csharp.sh',
-            environ=environ)
-      else:
-        archflag = _ARCH_FLAG_MAP[self.arch]
-        environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
-        environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
-        environ['LDFLAGS'] += ' %s' % archflag
-        return create_jobspec(self.name,
-                              ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
-                              environ=environ,
-                              use_workspace=True)
+    def build_jobspec(self):
+        if self.platform == 'windows':
+            cmake_arch_option = 'Win32' if self.arch == 'x86' else self.arch
+            return create_jobspec(
+                self.name, [
+                    'tools\\run_tests\\artifacts\\build_artifact_csharp.bat',
+                    cmake_arch_option
+                ],
+                use_workspace=True)
+        else:
+            environ = {
+                'CONFIG': 'opt',
+                'EMBED_OPENSSL': 'true',
+                'EMBED_ZLIB': 'true',
+                'CFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+                'CXXFLAGS': '-DGPR_BACKWARDS_COMPATIBILITY_MODE',
+                'LDFLAGS': ''
+            }
+            if self.platform == 'linux':
+                return create_docker_jobspec(
+                    self.name,
+                    'tools/dockerfile/grpc_artifact_linux_%s' % self.arch,
+                    'tools/run_tests/artifacts/build_artifact_csharp.sh',
+                    environ=environ)
+            else:
+                archflag = _ARCH_FLAG_MAP[self.arch]
+                environ['CFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+                environ['CXXFLAGS'] += ' %s %s' % (archflag, _MACOS_COMPAT_FLAG)
+                environ['LDFLAGS'] += ' %s' % archflag
+                return create_jobspec(
+                    self.name,
+                    ['tools/run_tests/artifacts/build_artifact_csharp.sh'],
+                    environ=environ,
+                    use_workspace=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
+
 
 class PHPArtifact:
-  """Builds PHP PECL package"""
+    """Builds PHP PECL package"""
 
-  def __init__(self, platform, arch):
-    self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'php', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'php_pecl_package_{0}_{1}'.format(platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'php', platform, arch]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(
-          self.name,
-          'tools/dockerfile/grpc_artifact_linux_{}'.format(self.arch),
-          'tools/run_tests/artifacts/build_artifact_php.sh')
-    else:
-      return create_jobspec(self.name,
-                            ['tools/run_tests/artifacts/build_artifact_php.sh'],
-                            use_workspace=True)
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name, 'tools/dockerfile/grpc_artifact_linux_{}'.format(
+                    self.arch),
+                'tools/run_tests/artifacts/build_artifact_php.sh')
+        else:
+            return create_jobspec(
+                self.name, ['tools/run_tests/artifacts/build_artifact_php.sh'],
+                use_workspace=True)
+
 
 class ProtocArtifact:
-  """Builds protoc and protoc-plugin artifacts"""
+    """Builds protoc and protoc-plugin artifacts"""
 
-  def __init__(self, platform, arch):
-    self.name = 'protoc_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.labels = ['artifact', 'protoc', platform, arch]
+    def __init__(self, platform, arch):
+        self.name = 'protoc_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.labels = ['artifact', 'protoc', platform, arch]
 
-  def pre_build_jobspecs(self):
-      return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform != 'windows':
-      cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
-      ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
-      if self.platform != 'macos':
-        ldflags += '  -static-libgcc -static-libstdc++ -s'
-      environ={'CONFIG': 'opt',
-               'CXXFLAGS': cxxflags,
-               'LDFLAGS': ldflags,
-               'PROTOBUF_LDFLAGS_EXTRA': ldflags}
-      if self.platform == 'linux':
-        return create_docker_jobspec(self.name,
-            'tools/dockerfile/grpc_artifact_protoc',
-            'tools/run_tests/artifacts/build_artifact_protoc.sh',
-            environ=environ)
-      else:
-        environ['CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
-        return create_jobspec(self.name,
-            ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
-            environ=environ,
-            use_workspace=True)
-    else:
-      generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
-      return create_jobspec(self.name,
-                            ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
-                            environ={'generator': generator},
-                            use_workspace=True)
+    def build_jobspec(self):
+        if self.platform != 'windows':
+            cxxflags = '-DNDEBUG %s' % _ARCH_FLAG_MAP[self.arch]
+            ldflags = '%s' % _ARCH_FLAG_MAP[self.arch]
+            if self.platform != 'macos':
+                ldflags += '  -static-libgcc -static-libstdc++ -s'
+            environ = {
+                'CONFIG': 'opt',
+                'CXXFLAGS': cxxflags,
+                'LDFLAGS': ldflags,
+                'PROTOBUF_LDFLAGS_EXTRA': ldflags
+            }
+            if self.platform == 'linux':
+                return create_docker_jobspec(
+                    self.name,
+                    'tools/dockerfile/grpc_artifact_protoc',
+                    'tools/run_tests/artifacts/build_artifact_protoc.sh',
+                    environ=environ)
+            else:
+                environ[
+                    'CXXFLAGS'] += ' -std=c++11 -stdlib=libc++ %s' % _MACOS_COMPAT_FLAG
+                return create_jobspec(
+                    self.name,
+                    ['tools/run_tests/artifacts/build_artifact_protoc.sh'],
+                    environ=environ,
+                    use_workspace=True)
+        else:
+            generator = 'Visual Studio 14 2015 Win64' if self.arch == 'x64' else 'Visual Studio 14 2015'
+            return create_jobspec(
+                self.name,
+                ['tools\\run_tests\\artifacts\\build_artifact_protoc.bat'],
+                environ={'generator': generator},
+                use_workspace=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 def targets():
-  """Gets list of supported targets"""
-  return ([Cls(platform, arch)
-           for Cls in (CSharpExtArtifact, ProtocArtifact)
-           for platform in ('linux', 'macos', 'windows')
-           for arch in ('x86', 'x64')] +
-          [PythonArtifact('linux', 'x86', 'cp27-cp27m'),
-           PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
-           PythonArtifact('linux', 'x86', 'cp34-cp34m'),
-           PythonArtifact('linux', 'x86', 'cp35-cp35m'),
-           PythonArtifact('linux', 'x86', 'cp36-cp36m'),
-           PythonArtifact('linux_extra', 'armv7', '2.7'),
-           PythonArtifact('linux_extra', 'armv7', '3.4'),
-           PythonArtifact('linux_extra', 'armv7', '3.5'),
-           PythonArtifact('linux_extra', 'armv7', '3.6'),
-           PythonArtifact('linux_extra', 'armv6', '2.7'),
-           PythonArtifact('linux_extra', 'armv6', '3.4'),
-           PythonArtifact('linux_extra', 'armv6', '3.5'),
-           PythonArtifact('linux_extra', 'armv6', '3.6'),
-           PythonArtifact('linux', 'x64', 'cp27-cp27m'),
-           PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
-           PythonArtifact('linux', 'x64', 'cp34-cp34m'),
-           PythonArtifact('linux', 'x64', 'cp35-cp35m'),
-           PythonArtifact('linux', 'x64', 'cp36-cp36m'),
-           PythonArtifact('macos', 'x64', 'python2.7'),
-           PythonArtifact('macos', 'x64', 'python3.4'),
-           PythonArtifact('macos', 'x64', 'python3.5'),
-           PythonArtifact('macos', 'x64', 'python3.6'),
-           PythonArtifact('windows', 'x86', 'Python27_32bits'),
-           PythonArtifact('windows', 'x86', 'Python34_32bits'),
-           PythonArtifact('windows', 'x86', 'Python35_32bits'),
-           PythonArtifact('windows', 'x86', 'Python36_32bits'),
-           PythonArtifact('windows', 'x64', 'Python27'),
-           PythonArtifact('windows', 'x64', 'Python34'),
-           PythonArtifact('windows', 'x64', 'Python35'),
-           PythonArtifact('windows', 'x64', 'Python36'),
-           RubyArtifact('linux', 'x64'),
-           RubyArtifact('macos', 'x64'),
-           PHPArtifact('linux', 'x64'),
-           PHPArtifact('macos', 'x64')])
+    """Gets list of supported targets"""
+    return ([
+        Cls(platform, arch)
+        for Cls in (CSharpExtArtifact, ProtocArtifact)
+        for platform in ('linux', 'macos', 'windows') for arch in ('x86', 'x64')
+    ] + [
+        PythonArtifact('linux', 'x86', 'cp27-cp27m'),
+        PythonArtifact('linux', 'x86', 'cp27-cp27mu'),
+        PythonArtifact('linux', 'x86', 'cp34-cp34m'),
+        PythonArtifact('linux', 'x86', 'cp35-cp35m'),
+        PythonArtifact('linux', 'x86', 'cp36-cp36m'),
+        PythonArtifact('linux_extra', 'armv7', '2.7'),
+        PythonArtifact('linux_extra', 'armv7', '3.4'),
+        PythonArtifact('linux_extra', 'armv7', '3.5'),
+        PythonArtifact('linux_extra', 'armv7', '3.6'),
+        PythonArtifact('linux_extra', 'armv6', '2.7'),
+        PythonArtifact('linux_extra', 'armv6', '3.4'),
+        PythonArtifact('linux_extra', 'armv6', '3.5'),
+        PythonArtifact('linux_extra', 'armv6', '3.6'),
+        PythonArtifact('linux', 'x64', 'cp27-cp27m'),
+        PythonArtifact('linux', 'x64', 'cp27-cp27mu'),
+        PythonArtifact('linux', 'x64', 'cp34-cp34m'),
+        PythonArtifact('linux', 'x64', 'cp35-cp35m'),
+        PythonArtifact('linux', 'x64', 'cp36-cp36m'),
+        PythonArtifact('macos', 'x64', 'python2.7'),
+        PythonArtifact('macos', 'x64', 'python3.4'),
+        PythonArtifact('macos', 'x64', 'python3.5'),
+        PythonArtifact('macos', 'x64', 'python3.6'),
+        PythonArtifact('windows', 'x86', 'Python27_32bits'),
+        PythonArtifact('windows', 'x86', 'Python34_32bits'),
+        PythonArtifact('windows', 'x86', 'Python35_32bits'),
+        PythonArtifact('windows', 'x86', 'Python36_32bits'),
+        PythonArtifact('windows', 'x64', 'Python27'),
+        PythonArtifact('windows', 'x64', 'Python34'),
+        PythonArtifact('windows', 'x64', 'Python35'),
+        PythonArtifact('windows', 'x64', 'Python36'),
+        RubyArtifact('linux', 'x64'),
+        RubyArtifact('macos', 'x64'),
+        PHPArtifact('linux', 'x64'),
+        PHPArtifact('macos', 'x64')
+    ])
diff --git a/tools/run_tests/artifacts/distribtest_targets.py b/tools/run_tests/artifacts/distribtest_targets.py
index bf3d7a5..b2cc16a 100644
--- a/tools/run_tests/artifacts/distribtest_targets.py
+++ b/tools/run_tests/artifacts/distribtest_targets.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Definition of targets run distribution package tests."""
 
 import os.path
@@ -22,274 +21,311 @@
 import python_utils.jobset as jobset
 
 
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
-                   flake_retries=0, timeout_retries=0,
-                   copy_rel_path=None):
-  """Creates jobspec for a task running under docker."""
-  environ = environ.copy()
-  environ['RUN_COMMAND'] = shell_command
-  # the entire repo will be cloned if copy_rel_path is not set.
-  if copy_rel_path:
-    environ['RELATIVE_COPY_PATH'] = copy_rel_path
+def create_docker_jobspec(name,
+                          dockerfile_dir,
+                          shell_command,
+                          environ={},
+                          flake_retries=0,
+                          timeout_retries=0,
+                          copy_rel_path=None):
+    """Creates jobspec for a task running under docker."""
+    environ = environ.copy()
+    environ['RUN_COMMAND'] = shell_command
+    # the entire repo will be cloned if copy_rel_path is not set.
+    if copy_rel_path:
+        environ['RELATIVE_COPY_PATH'] = copy_rel_path
 
-  docker_args=[]
-  for k,v in environ.items():
-    docker_args += ['-e', '%s=%s' % (k, v)]
-  docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
-  jobspec = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
-          environ=docker_env,
-          shortname='distribtest.%s' % (name),
-          timeout_seconds=30*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries)
-  return jobspec
+    docker_args = []
+    for k, v in environ.items():
+        docker_args += ['-e', '%s=%s' % (k, v)]
+    docker_env = {
+        'DOCKERFILE_DIR': dockerfile_dir,
+        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'
+    }
+    jobspec = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+        docker_args,
+        environ=docker_env,
+        shortname='distribtest.%s' % (name),
+        timeout_seconds=30 * 60,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries)
+    return jobspec
 
 
-def create_jobspec(name, cmdline, environ=None, shell=False,
-                   flake_retries=0, timeout_retries=0,
-                   use_workspace=False):
-  """Creates jobspec."""
-  environ = environ.copy()
-  if use_workspace:
-    environ['WORKSPACE_NAME'] = 'workspace_%s' % name
-    cmdline = ['bash',
-               'tools/run_tests/artifacts/run_in_workspace.sh'] + cmdline
-  jobspec = jobset.JobSpec(
-          cmdline=cmdline,
-          environ=environ,
-          shortname='distribtest.%s' % (name),
-          timeout_seconds=10*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries,
-          shell=shell)
-  return jobspec
+def create_jobspec(name,
+                   cmdline,
+                   environ=None,
+                   shell=False,
+                   flake_retries=0,
+                   timeout_retries=0,
+                   use_workspace=False,
+                   timeout_seconds=10 * 60):
+    """Creates jobspec."""
+    environ = environ.copy()
+    if use_workspace:
+        environ['WORKSPACE_NAME'] = 'workspace_%s' % name
+        cmdline = ['bash', 'tools/run_tests/artifacts/run_in_workspace.sh'
+                  ] + cmdline
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        environ=environ,
+        shortname='distribtest.%s' % (name),
+        timeout_seconds=timeout_seconds,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries,
+        shell=shell)
+    return jobspec
 
 
 class CSharpDistribTest(object):
-  """Tests C# NuGet package"""
+    """Tests C# NuGet package"""
 
-  def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False):
-    self.name = 'csharp_%s_%s' % (platform, arch)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'csharp', platform, arch]
-    self.script_suffix = ''
-    if docker_suffix:
-      self.name += '_%s' % docker_suffix
-      self.labels.append(docker_suffix)
-    if use_dotnet_cli:
-      self.name += '_dotnetcli'
-      self.script_suffix = '_dotnetcli'
-      self.labels.append('dotnetcli')
-    else:
-      self.labels.append('olddotnet')
+    def __init__(self, platform, arch, docker_suffix=None,
+                 use_dotnet_cli=False):
+        self.name = 'csharp_%s_%s' % (platform, arch)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'csharp', platform, arch]
+        self.script_suffix = ''
+        if docker_suffix:
+            self.name += '_%s' % docker_suffix
+            self.labels.append(docker_suffix)
+        if use_dotnet_cli:
+            self.name += '_dotnetcli'
+            self.script_suffix = '_dotnetcli'
+            self.labels.append('dotnetcli')
+        else:
+            self.labels.append('olddotnet')
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(self.name,
-          'tools/dockerfile/distribtest/csharp_%s_%s' % (
-              self.docker_suffix,
-              self.arch),
-          'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix,
-          copy_rel_path='test/distrib')
-    elif self.platform == 'macos':
-      return create_jobspec(self.name,
-          ['test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix],
-          environ={'EXTERNAL_GIT_ROOT': '../../../..'},
-          use_workspace=True)
-    elif self.platform == 'windows':
-      if self.arch == 'x64':
-        # Use double leading / as the first occurence gets removed by msys bash
-        # when invoking the .bat file (side-effect of posix path conversion)
-        environ={'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
-                 'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
-      else:
-        environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
-      return create_jobspec(self.name,
-          ['test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix],
-          environ=environ,
-          use_workspace=True)
-    else:
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/distribtest/csharp_%s_%s' %
+                (self.docker_suffix, self.arch),
+                'test/distrib/csharp/run_distrib_test%s.sh' %
+                self.script_suffix,
+                copy_rel_path='test/distrib')
+        elif self.platform == 'macos':
+            return create_jobspec(
+                self.name, [
+                    'test/distrib/csharp/run_distrib_test%s.sh' %
+                    self.script_suffix
+                ],
+                environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+                use_workspace=True)
+        elif self.platform == 'windows':
+            if self.arch == 'x64':
+                # Use double leading / as the first occurence gets removed by msys bash
+                # when invoking the .bat file (side-effect of posix path conversion)
+                environ = {
+                    'MSBUILD_EXTRA_ARGS': '//p:Platform=x64',
+                    'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'
+                }
+            else:
+                environ = {'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\Debug'}
+            return create_jobspec(
+                self.name, [
+                    'test\\distrib\\csharp\\run_distrib_test%s.bat' %
+                    self.script_suffix
+                ],
+                environ=environ,
+                use_workspace=True)
+        else:
+            raise Exception("Not supported yet.")
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
+
 
 class PythonDistribTest(object):
-  """Tests Python package"""
+    """Tests Python package"""
 
-  def __init__(self, platform, arch, docker_suffix):
-    self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
+    def __init__(self, platform, arch, docker_suffix):
+        self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if not self.platform == 'linux':
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if not self.platform == 'linux':
+            raise Exception("Not supported yet.")
 
-    return create_docker_jobspec(self.name,
-          'tools/dockerfile/distribtest/python_%s_%s' % (
-              self.docker_suffix,
-              self.arch),
-          'test/distrib/python/run_distrib_test.sh',
-          copy_rel_path='test/distrib')
+        return create_docker_jobspec(
+            self.name,
+            'tools/dockerfile/distribtest/python_%s_%s' % (self.docker_suffix,
+                                                           self.arch),
+            'test/distrib/python/run_distrib_test.sh',
+            copy_rel_path='test/distrib')
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class RubyDistribTest(object):
-  """Tests Ruby package"""
+    """Tests Ruby package"""
 
-  def __init__(self, platform, arch, docker_suffix):
-    self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
+    def __init__(self, platform, arch, docker_suffix):
+        self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    arch_to_gem_arch = {
-        'x64': 'x86_64',
-        'x86': 'x86',
-    }
-    if not self.platform == 'linux':
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        arch_to_gem_arch = {
+            'x64': 'x86_64',
+            'x86': 'x86',
+        }
+        if not self.platform == 'linux':
+            raise Exception("Not supported yet.")
 
-    return create_docker_jobspec(self.name,
-          'tools/dockerfile/distribtest/ruby_%s_%s' % (
-              self.docker_suffix,
-              self.arch),
-          'test/distrib/ruby/run_distrib_test.sh %s %s' %
-          (arch_to_gem_arch[self.arch], self.platform),
-          copy_rel_path='test/distrib')
+        return create_docker_jobspec(
+            self.name,
+            'tools/dockerfile/distribtest/ruby_%s_%s' % (self.docker_suffix,
+                                                         self.arch),
+            'test/distrib/ruby/run_distrib_test.sh %s %s' %
+            (arch_to_gem_arch[self.arch], self.platform),
+            copy_rel_path='test/distrib')
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class PHPDistribTest(object):
-  """Tests PHP package"""
+    """Tests PHP package"""
 
-  def __init__(self, platform, arch, docker_suffix=None):
-    self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
+    def __init__(self, platform, arch, docker_suffix=None):
+        self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(self.name,
-                                   'tools/dockerfile/distribtest/php_%s_%s' % (
-                                       self.docker_suffix,
-                                       self.arch),
-                                   'test/distrib/php/run_distrib_test.sh',
-                                   copy_rel_path='test/distrib')
-    elif self.platform == 'macos':
-      return create_jobspec(self.name,
-          ['test/distrib/php/run_distrib_test.sh'],
-          environ={'EXTERNAL_GIT_ROOT': '../../../..'},
-          use_workspace=True)
-    else:
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name,
+                'tools/dockerfile/distribtest/php_%s_%s' % (self.docker_suffix,
+                                                            self.arch),
+                'test/distrib/php/run_distrib_test.sh',
+                copy_rel_path='test/distrib')
+        elif self.platform == 'macos':
+            return create_jobspec(
+                self.name, ['test/distrib/php/run_distrib_test.sh'],
+                environ={'EXTERNAL_GIT_ROOT': '../../../..'},
+                use_workspace=True)
+        else:
+            raise Exception("Not supported yet.")
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 class CppDistribTest(object):
-  """Tests Cpp make intall by building examples."""
+    """Tests Cpp make intall by building examples."""
 
-  def __init__(self, platform, arch, docker_suffix=None, testcase=None):
-    self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix, testcase)
-    self.platform = platform
-    self.arch = arch
-    self.docker_suffix = docker_suffix
-    self.testcase = testcase
-    self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix, testcase]
+    def __init__(self, platform, arch, docker_suffix=None, testcase=None):
+        if platform == 'linux':
+            self.name = 'cpp_%s_%s_%s_%s' % (platform, arch, docker_suffix,
+                                             testcase)
+        else:
+            self.name = 'cpp_%s_%s_%s' % (platform, arch, testcase)
+        self.platform = platform
+        self.arch = arch
+        self.docker_suffix = docker_suffix
+        self.testcase = testcase
+        self.labels = [
+            'distribtest', 'cpp', platform, arch, docker_suffix, testcase
+        ]
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.platform == 'linux':
-      return create_docker_jobspec(self.name,
-                                   'tools/dockerfile/distribtest/cpp_%s_%s' % (
-                                       self.docker_suffix,
-                                       self.arch),
-                                   'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
-    else:
-      raise Exception("Not supported yet.")
+    def build_jobspec(self):
+        if self.platform == 'linux':
+            return create_docker_jobspec(
+                self.name, 'tools/dockerfile/distribtest/cpp_%s_%s' %
+                (self.docker_suffix, self.arch),
+                'test/distrib/cpp/run_distrib_test_%s.sh' % self.testcase)
+        elif self.platform == 'windows':
+            return create_jobspec(
+                self.name,
+                ['test\\distrib\\cpp\\run_distrib_test_%s.bat' % self.testcase],
+                environ={},
+                timeout_seconds=30 * 60,
+                use_workspace=True)
+        else:
+            raise Exception("Not supported yet.")
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
 
 
 def targets():
-  """Gets list of supported targets"""
-  return [CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
-          CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
-          CSharpDistribTest('linux', 'x64', 'wheezy'),
-          CSharpDistribTest('linux', 'x64', 'jessie'),
-          CSharpDistribTest('linux', 'x86', 'jessie'),
-          CSharpDistribTest('linux', 'x64', 'centos7'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
-          CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
-          CSharpDistribTest('macos', 'x86'),
-          CSharpDistribTest('windows', 'x86'),
-          CSharpDistribTest('windows', 'x64'),
-          PythonDistribTest('linux', 'x64', 'wheezy'),
-          PythonDistribTest('linux', 'x64', 'jessie'),
-          PythonDistribTest('linux', 'x86', 'jessie'),
-          PythonDistribTest('linux', 'x64', 'centos6'),
-          PythonDistribTest('linux', 'x64', 'centos7'),
-          PythonDistribTest('linux', 'x64', 'fedora20'),
-          PythonDistribTest('linux', 'x64', 'fedora21'),
-          PythonDistribTest('linux', 'x64', 'fedora22'),
-          PythonDistribTest('linux', 'x64', 'fedora23'),
-          PythonDistribTest('linux', 'x64', 'opensuse'),
-          PythonDistribTest('linux', 'x64', 'arch'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1204'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1404'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1504'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1510'),
-          PythonDistribTest('linux', 'x64', 'ubuntu1604'),
-          RubyDistribTest('linux', 'x64', 'wheezy'),
-          RubyDistribTest('linux', 'x64', 'jessie'),
-          RubyDistribTest('linux', 'x86', 'jessie'),
-          RubyDistribTest('linux', 'x64', 'centos6'),
-          RubyDistribTest('linux', 'x64', 'centos7'),
-          RubyDistribTest('linux', 'x64', 'fedora20'),
-          RubyDistribTest('linux', 'x64', 'fedora21'),
-          RubyDistribTest('linux', 'x64', 'fedora22'),
-          RubyDistribTest('linux', 'x64', 'fedora23'),
-          RubyDistribTest('linux', 'x64', 'opensuse'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1204'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1404'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1504'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1510'),
-          RubyDistribTest('linux', 'x64', 'ubuntu1604'),
-          PHPDistribTest('linux', 'x64', 'jessie'),
-          PHPDistribTest('macos', 'x64'),
-          ]
+    """Gets list of supported targets"""
+    return [
+        CppDistribTest('linux', 'x64', 'jessie', 'routeguide'),
+        CppDistribTest('linux', 'x64', 'jessie', 'cmake'),
+        CppDistribTest('windows', 'x86', testcase='cmake'),
+        CSharpDistribTest('linux', 'x64', 'wheezy'),
+        CSharpDistribTest('linux', 'x64', 'jessie'),
+        CSharpDistribTest('linux', 'x86', 'jessie'),
+        CSharpDistribTest('linux', 'x64', 'centos7'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
+        CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
+        CSharpDistribTest('macos', 'x86'),
+        CSharpDistribTest('windows', 'x86'),
+        CSharpDistribTest('windows', 'x64'),
+        PythonDistribTest('linux', 'x64', 'wheezy'),
+        PythonDistribTest('linux', 'x64', 'jessie'),
+        PythonDistribTest('linux', 'x86', 'jessie'),
+        PythonDistribTest('linux', 'x64', 'centos6'),
+        PythonDistribTest('linux', 'x64', 'centos7'),
+        PythonDistribTest('linux', 'x64', 'fedora20'),
+        PythonDistribTest('linux', 'x64', 'fedora21'),
+        PythonDistribTest('linux', 'x64', 'fedora22'),
+        PythonDistribTest('linux', 'x64', 'fedora23'),
+        PythonDistribTest('linux', 'x64', 'opensuse'),
+        PythonDistribTest('linux', 'x64', 'arch'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1204'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1404'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1504'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1510'),
+        PythonDistribTest('linux', 'x64', 'ubuntu1604'),
+        RubyDistribTest('linux', 'x64', 'wheezy'),
+        RubyDistribTest('linux', 'x64', 'jessie'),
+        RubyDistribTest('linux', 'x86', 'jessie'),
+        RubyDistribTest('linux', 'x64', 'centos6'),
+        RubyDistribTest('linux', 'x64', 'centos7'),
+        RubyDistribTest('linux', 'x64', 'fedora20'),
+        RubyDistribTest('linux', 'x64', 'fedora21'),
+        RubyDistribTest('linux', 'x64', 'fedora22'),
+        RubyDistribTest('linux', 'x64', 'fedora23'),
+        RubyDistribTest('linux', 'x64', 'opensuse'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1204'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1404'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1504'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1510'),
+        RubyDistribTest('linux', 'x64', 'ubuntu1604'),
+        PHPDistribTest('linux', 'x64', 'jessie'),
+        PHPDistribTest('macos', 'x64'),
+    ]
diff --git a/tools/run_tests/artifacts/package_targets.py b/tools/run_tests/artifacts/package_targets.py
index ff93bb3..abf1b5e 100644
--- a/tools/run_tests/artifacts/package_targets.py
+++ b/tools/run_tests/artifacts/package_targets.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Definition of targets to build distribution packages."""
 
 import os.path
@@ -22,128 +21,140 @@
 import python_utils.jobset as jobset
 
 
-def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
-                   flake_retries=0, timeout_retries=0):
-  """Creates jobspec for a task running under docker."""
-  environ = environ.copy()
-  environ['RUN_COMMAND'] = shell_command
+def create_docker_jobspec(name,
+                          dockerfile_dir,
+                          shell_command,
+                          environ={},
+                          flake_retries=0,
+                          timeout_retries=0):
+    """Creates jobspec for a task running under docker."""
+    environ = environ.copy()
+    environ['RUN_COMMAND'] = shell_command
 
-  docker_args=[]
-  for k,v in environ.items():
-    docker_args += ['-e', '%s=%s' % (k, v)]
-  docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
-                'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
-                'OUTPUT_DIR': 'artifacts'}
-  jobspec = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
-          environ=docker_env,
-          shortname='build_package.%s' % (name),
-          timeout_seconds=30*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries)
-  return jobspec
+    docker_args = []
+    for k, v in environ.items():
+        docker_args += ['-e', '%s=%s' % (k, v)]
+    docker_env = {
+        'DOCKERFILE_DIR': dockerfile_dir,
+        'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh',
+        'OUTPUT_DIR': 'artifacts'
+    }
+    jobspec = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] +
+        docker_args,
+        environ=docker_env,
+        shortname='build_package.%s' % (name),
+        timeout_seconds=30 * 60,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries)
+    return jobspec
 
-def create_jobspec(name, cmdline, environ=None, cwd=None, shell=False,
-                   flake_retries=0, timeout_retries=0):
-  """Creates jobspec."""
-  jobspec = jobset.JobSpec(
-          cmdline=cmdline,
-          environ=environ,
-          cwd=cwd,
-          shortname='build_package.%s' % (name),
-          timeout_seconds=10*60,
-          flake_retries=flake_retries,
-          timeout_retries=timeout_retries,
-          shell=shell)
-  return jobspec
+
+def create_jobspec(name,
+                   cmdline,
+                   environ=None,
+                   cwd=None,
+                   shell=False,
+                   flake_retries=0,
+                   timeout_retries=0):
+    """Creates jobspec."""
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        environ=environ,
+        cwd=cwd,
+        shortname='build_package.%s' % (name),
+        timeout_seconds=10 * 60,
+        flake_retries=flake_retries,
+        timeout_retries=timeout_retries,
+        shell=shell)
+    return jobspec
 
 
 class CSharpPackage:
-  """Builds C# nuget packages."""
+    """Builds C# nuget packages."""
 
-  def __init__(self, linux=False):
-    self.linux = linux
-    self.labels = ['package', 'csharp']
-    if linux:
-      self.name = 'csharp_package_dotnetcli_linux'
-      self.labels += ['linux']
-    else:
-      self.name = 'csharp_package_dotnetcli_windows'
-      self.labels += ['windows']
+    def __init__(self, linux=False):
+        self.linux = linux
+        self.labels = ['package', 'csharp']
+        if linux:
+            self.name = 'csharp_package_dotnetcli_linux'
+            self.labels += ['linux']
+        else:
+            self.name = 'csharp_package_dotnetcli_windows'
+            self.labels += ['windows']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    if self.linux:
-      return create_docker_jobspec(
-          self.name,
-          'tools/dockerfile/test/csharp_jessie_x64',
-          'src/csharp/build_packages_dotnetcli.sh')
-    else:
-      return create_jobspec(self.name,
-                            ['build_packages_dotnetcli.bat'],
-                            cwd='src\\csharp',
-                            shell=True)
+    def build_jobspec(self):
+        if self.linux:
+            return create_docker_jobspec(
+                self.name, 'tools/dockerfile/test/csharp_jessie_x64',
+                'src/csharp/build_packages_dotnetcli.sh')
+        else:
+            return create_jobspec(
+                self.name, ['build_packages_dotnetcli.bat'],
+                cwd='src\\csharp',
+                shell=True)
 
-  def __str__(self):
-    return self.name
+    def __str__(self):
+        return self.name
+
 
 class RubyPackage:
-  """Collects ruby gems created in the artifact phase"""
+    """Collects ruby gems created in the artifact phase"""
 
-  def __init__(self):
-    self.name = 'ruby_package'
-    self.labels = ['package', 'ruby', 'linux']
+    def __init__(self):
+        self.name = 'ruby_package'
+        self.labels = ['package', 'ruby', 'linux']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    return create_docker_jobspec(
-        self.name,
-        'tools/dockerfile/grpc_artifact_linux_x64',
-        'tools/run_tests/artifacts/build_package_ruby.sh')
+    def build_jobspec(self):
+        return create_docker_jobspec(
+            self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+            'tools/run_tests/artifacts/build_package_ruby.sh')
 
 
 class PythonPackage:
-  """Collects python eggs and wheels created in the artifact phase"""
+    """Collects python eggs and wheels created in the artifact phase"""
 
-  def __init__(self):
-    self.name = 'python_package'
-    self.labels = ['package', 'python', 'linux']
+    def __init__(self):
+        self.name = 'python_package'
+        self.labels = ['package', 'python', 'linux']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    return create_docker_jobspec(
-        self.name,
-        'tools/dockerfile/grpc_artifact_linux_x64',
-        'tools/run_tests/artifacts/build_package_python.sh')
+    def build_jobspec(self):
+        return create_docker_jobspec(
+            self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+            'tools/run_tests/artifacts/build_package_python.sh')
 
 
 class PHPPackage:
-  """Copy PHP PECL package artifact"""
+    """Copy PHP PECL package artifact"""
 
-  def __init__(self):
-    self.name = 'php_package'
-    self.labels = ['package', 'php', 'linux']
+    def __init__(self):
+        self.name = 'php_package'
+        self.labels = ['package', 'php', 'linux']
 
-  def pre_build_jobspecs(self):
-    return []
+    def pre_build_jobspecs(self):
+        return []
 
-  def build_jobspec(self):
-    return create_docker_jobspec(
-        self.name,
-        'tools/dockerfile/grpc_artifact_linux_x64',
-        'tools/run_tests/artifacts/build_package_php.sh')
+    def build_jobspec(self):
+        return create_docker_jobspec(
+            self.name, 'tools/dockerfile/grpc_artifact_linux_x64',
+            'tools/run_tests/artifacts/build_package_php.sh')
 
 
 def targets():
-  """Gets list of supported targets"""
-  return [CSharpPackage(),
-          CSharpPackage(linux=True),
-          RubyPackage(),
-          PythonPackage(),
-          PHPPackage()]
+    """Gets list of supported targets"""
+    return [
+        CSharpPackage(),
+        CSharpPackage(linux=True),
+        RubyPackage(),
+        PythonPackage(),
+        PHPPackage()
+    ]
diff --git a/tools/run_tests/dockerize/build_docker_and_run_tests.sh b/tools/run_tests/dockerize/build_docker_and_run_tests.sh
index 06a5dae..32de3fa 100755
--- a/tools/run_tests/dockerize/build_docker_and_run_tests.sh
+++ b/tools/run_tests/dockerize/build_docker_and_run_tests.sh
@@ -66,6 +66,7 @@
   -e "BUILD_ID=$BUILD_ID" \
   -e "BUILD_URL=$BUILD_URL" \
   -e "JOB_BASE_NAME=$JOB_BASE_NAME" \
+  -e "KOKORO_BUILD_ID=$KOKORO_BUILD_ID" \
   -e "KOKORO_BUILD_NUMBER=$KOKORO_BUILD_NUMBER" \
   -e "KOKORO_BUILD_URL=$KOKORO_BUILD_URL" \
   -e "KOKORO_JOB_NAME=$KOKORO_JOB_NAME" \
diff --git a/tools/run_tests/generated/sources_and_headers.json b/tools/run_tests/generated/sources_and_headers.json
index 9b736a7..d432bd0 100644
--- a/tools/run_tests/generated/sources_and_headers.json
+++ b/tools/run_tests/generated/sources_and_headers.json
@@ -104,23 +104,6 @@
       "gpr", 
       "gpr_test_util", 
       "grpc", 
-      "grpc_test_util"
-    ], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "backoff_test", 
-    "src": [
-      "test/core/backoff/backoff_test.cc"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [
-      "gpr", 
-      "gpr_test_util", 
-      "grpc", 
       "grpc_test_util", 
       "test_tcp_server"
     ], 
@@ -590,45 +573,6 @@
   {
     "deps": [
       "gpr", 
-      "grpc"
-    ], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "gen_hpack_tables", 
-    "src": [
-      "tools/codegen/core/gen_hpack_tables.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "gen_legal_metadata_characters", 
-    "src": [
-      "tools/codegen/core/gen_legal_metadata_characters.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
-    "name": "gen_percent_encoding_tables", 
-    "src": [
-      "tools/codegen/core/gen_percent_encoding_tables.c"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [
-      "gpr", 
       "gpr_test_util", 
       "grpc", 
       "grpc_test_util"
@@ -711,21 +655,6 @@
     "headers": [], 
     "is_filegroup": false, 
     "language": "c", 
-    "name": "gpr_histogram_test", 
-    "src": [
-      "test/core/support/histogram_test.cc"
-    ], 
-    "third_party": false, 
-    "type": "target"
-  }, 
-  {
-    "deps": [
-      "gpr", 
-      "gpr_test_util"
-    ], 
-    "headers": [], 
-    "is_filegroup": false, 
-    "language": "c", 
     "name": "gpr_host_port_test", 
     "src": [
       "test/core/support/host_port_test.cc"
@@ -1245,6 +1174,21 @@
   {
     "deps": [
       "gpr", 
+      "grpc_test_util"
+    ], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "c", 
+    "name": "histogram_test", 
+    "src": [
+      "test/core/util/histogram_test.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [
+      "gpr", 
       "gpr_test_util", 
       "grpc", 
       "grpc_test_util"
@@ -2541,6 +2485,23 @@
       "gpr", 
       "gpr_test_util", 
       "grpc", 
+      "grpc_test_util"
+    ], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "c++", 
+    "name": "backoff_test", 
+    "src": [
+      "test/core/backoff/backoff_test.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [
+      "gpr", 
+      "gpr_test_util", 
+      "grpc", 
       "grpc++", 
       "grpc++_test_util", 
       "grpc_test_util"
@@ -3925,6 +3886,44 @@
       "gpr_test_util", 
       "grpc", 
       "grpc++", 
+      "grpc++_test", 
+      "grpc_test_util"
+    ], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "c++", 
+    "name": "ref_counted_ptr_test", 
+    "src": [
+      "test/core/support/ref_counted_ptr_test.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [
+      "gpr", 
+      "gpr_test_util", 
+      "grpc", 
+      "grpc++", 
+      "grpc++_test", 
+      "grpc_test_util"
+    ], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "c++", 
+    "name": "ref_counted_test", 
+    "src": [
+      "test/core/support/ref_counted_test.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [
+      "gpr", 
+      "gpr_test_util", 
+      "grpc", 
+      "grpc++", 
       "grpc++_test_util", 
       "grpc_test_util"
     ], 
@@ -4299,6 +4298,45 @@
   }, 
   {
     "deps": [
+      "gpr", 
+      "grpc"
+    ], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "cc", 
+    "name": "gen_hpack_tables", 
+    "src": [
+      "tools/codegen/core/gen_hpack_tables.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "cc", 
+    "name": "gen_legal_metadata_characters", 
+    "src": [
+      "tools/codegen/core/gen_legal_metadata_characters.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [], 
+    "headers": [], 
+    "is_filegroup": false, 
+    "language": "cc", 
+    "name": "gen_percent_encoding_tables", 
+    "src": [
+      "tools/codegen/core/gen_percent_encoding_tables.cc"
+    ], 
+    "third_party": false, 
+    "type": "target"
+  }, 
+  {
+    "deps": [
       "boringssl", 
       "boringssl_aes_test_lib", 
       "boringssl_test_util"
@@ -7578,6 +7616,7 @@
       "test/core/end2end/tests/filter_call_init_fails.cc", 
       "test/core/end2end/tests/filter_causes_close.cc", 
       "test/core/end2end/tests/filter_latency.cc", 
+      "test/core/end2end/tests/filter_status_code.cc", 
       "test/core/end2end/tests/graceful_server_shutdown.cc", 
       "test/core/end2end/tests/high_initial_seqno.cc", 
       "test/core/end2end/tests/hpack_size.cc", 
@@ -7659,6 +7698,7 @@
       "test/core/end2end/tests/filter_call_init_fails.cc", 
       "test/core/end2end/tests/filter_causes_close.cc", 
       "test/core/end2end/tests/filter_latency.cc", 
+      "test/core/end2end/tests/filter_status_code.cc", 
       "test/core/end2end/tests/graceful_server_shutdown.cc", 
       "test/core/end2end/tests/high_initial_seqno.cc", 
       "test/core/end2end/tests/hpack_size.cc", 
@@ -7745,7 +7785,6 @@
       "src/core/lib/support/env_posix.cc", 
       "src/core/lib/support/env_windows.cc", 
       "src/core/lib/support/fork.cc", 
-      "src/core/lib/support/histogram.cc", 
       "src/core/lib/support/host_port.cc", 
       "src/core/lib/support/log.cc", 
       "src/core/lib/support/log_android.cc", 
@@ -7792,7 +7831,6 @@
       "include/grpc/support/avl.h", 
       "include/grpc/support/cmdline.h", 
       "include/grpc/support/cpu.h", 
-      "include/grpc/support/histogram.h", 
       "include/grpc/support/host_port.h", 
       "include/grpc/support/log.h", 
       "include/grpc/support/log_windows.h", 
@@ -7842,7 +7880,6 @@
       "include/grpc/support/avl.h", 
       "include/grpc/support/cmdline.h", 
       "include/grpc/support/cpu.h", 
-      "include/grpc/support/histogram.h", 
       "include/grpc/support/host_port.h", 
       "include/grpc/support/log.h", 
       "include/grpc/support/log_windows.h", 
@@ -8210,6 +8247,9 @@
       "src/core/lib/slice/slice_hash_table.h", 
       "src/core/lib/slice/slice_internal.h", 
       "src/core/lib/slice/slice_string_helpers.h", 
+      "src/core/lib/support/debug_location.h", 
+      "src/core/lib/support/ref_counted.h", 
+      "src/core/lib/support/ref_counted_ptr.h", 
       "src/core/lib/surface/alarm_internal.h", 
       "src/core/lib/surface/api_trace.h", 
       "src/core/lib/surface/call.h", 
@@ -8346,6 +8386,9 @@
       "src/core/lib/slice/slice_hash_table.h", 
       "src/core/lib/slice/slice_internal.h", 
       "src/core/lib/slice/slice_string_helpers.h", 
+      "src/core/lib/support/debug_location.h", 
+      "src/core/lib/support/ref_counted.h", 
+      "src/core/lib/support/ref_counted_ptr.h", 
       "src/core/lib/surface/alarm_internal.h", 
       "src/core/lib/surface/api_trace.h", 
       "src/core/lib/surface/call.h", 
@@ -8901,6 +8944,7 @@
       "test/core/iomgr/endpoint_tests.h", 
       "test/core/util/debugger_macros.h", 
       "test/core/util/grpc_profiler.h", 
+      "test/core/util/histogram.h", 
       "test/core/util/memory_counters.h", 
       "test/core/util/mock_endpoint.h", 
       "test/core/util/parse_hexstring.h", 
@@ -8929,6 +8973,8 @@
       "test/core/util/debugger_macros.h", 
       "test/core/util/grpc_profiler.cc", 
       "test/core/util/grpc_profiler.h", 
+      "test/core/util/histogram.cc", 
+      "test/core/util/histogram.h", 
       "test/core/util/memory_counters.cc", 
       "test/core/util/memory_counters.h", 
       "test/core/util/mock_endpoint.cc", 
@@ -8939,6 +8985,7 @@
       "test/core/util/passthru_endpoint.h", 
       "test/core/util/port.cc", 
       "test/core/util/port.h", 
+      "test/core/util/port_isolated_runtime_environment.cc", 
       "test/core/util/port_server_client.cc", 
       "test/core/util/port_server_client.h", 
       "test/core/util/slice_splitter.cc", 
diff --git a/tools/run_tests/generated/tests.json b/tools/run_tests/generated/tests.json
index 2584bec..98517cb 100644
--- a/tools/run_tests/generated/tests.json
+++ b/tools/run_tests/generated/tests.json
@@ -132,30 +132,6 @@
     ], 
     "cpu_cost": 1.0, 
     "exclude_configs": [], 
-    "exclude_iomgrs": [], 
-    "flaky": false, 
-    "gtest": false, 
-    "language": "c", 
-    "name": "backoff_test", 
-    "platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "uses_polling": false
-  }, 
-  {
-    "args": [], 
-    "benchmark": false, 
-    "ci_platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "cpu_cost": 1.0, 
-    "exclude_configs": [], 
     "exclude_iomgrs": [
       "uv"
     ], 
@@ -854,30 +830,6 @@
     "flaky": false, 
     "gtest": false, 
     "language": "c", 
-    "name": "gpr_histogram_test", 
-    "platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "uses_polling": false
-  }, 
-  {
-    "args": [], 
-    "benchmark": false, 
-    "ci_platforms": [
-      "linux", 
-      "mac", 
-      "posix", 
-      "windows"
-    ], 
-    "cpu_cost": 1.0, 
-    "exclude_configs": [], 
-    "exclude_iomgrs": [], 
-    "flaky": false, 
-    "gtest": false, 
-    "language": "c", 
     "name": "gpr_host_port_test", 
     "platforms": [
       "linux", 
@@ -1538,6 +1490,30 @@
     "flaky": false, 
     "gtest": false, 
     "language": "c", 
+    "name": "histogram_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "uses_polling": false
+  }, 
+  {
+    "args": [], 
+    "benchmark": false, 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "gtest": false, 
+    "language": "c", 
     "name": "hpack_parser_test", 
     "platforms": [
       "linux", 
@@ -2974,6 +2950,30 @@
     "flaky": false, 
     "gtest": false, 
     "language": "c++", 
+    "name": "backoff_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "uses_polling": false
+  }, 
+  {
+    "args": [], 
+    "benchmark": false, 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "gtest": false, 
+    "language": "c++", 
     "name": "bdp_estimator_test", 
     "platforms": [
       "linux", 
@@ -4138,6 +4138,54 @@
     "flaky": false, 
     "gtest": true, 
     "language": "c++", 
+    "name": "ref_counted_ptr_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "uses_polling": true
+  }, 
+  {
+    "args": [], 
+    "benchmark": false, 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "gtest": true, 
+    "language": "c++", 
+    "name": "ref_counted_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "uses_polling": true
+  }, 
+  {
+    "args": [], 
+    "benchmark": false, 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix", 
+      "windows"
+    ], 
+    "cpu_cost": 1.0, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "gtest": true, 
+    "language": "c++", 
     "name": "secure_auth_context_test", 
     "platforms": [
       "linux", 
@@ -6751,6 +6799,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_census_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -8089,6 +8160,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_compress_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -9384,6 +9478,28 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_fakesec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -10590,6 +10706,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_fd_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -11857,6 +11996,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -13113,6 +13275,25 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+pipe_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -14297,6 +14478,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+trace_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -15589,6 +15793,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+workarounds_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -16945,6 +17172,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_http_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -18343,6 +18594,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_load_reporting_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -19699,6 +19973,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_oauth2_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -21019,6 +21317,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -22123,6 +22445,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -23347,6 +23693,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair+trace_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -24531,6 +24901,32 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "msan"
+    ], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_1byte_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -25877,6 +26273,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -27161,6 +27580,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_ssl_proxy_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -28318,6 +28761,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_uds_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -29514,6 +29980,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "inproc_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "high_initial_seqno"
     ], 
     "ci_platforms": [
@@ -30551,6 +31040,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_census_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -31866,6 +32378,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_compress_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -33064,6 +33599,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_fd_nosec_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -34308,6 +34866,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -35545,6 +36126,25 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+pipe_nosec_test", 
+    "platforms": [
+      "linux"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -36706,6 +37306,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+trace_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -37975,6 +38598,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_full+workarounds_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -39307,6 +39953,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_http_proxy_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -40682,6 +41352,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_load_reporting_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -41942,6 +42635,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_proxy_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -43022,6 +43739,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -44222,6 +44963,30 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair+trace_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -45380,6 +46145,32 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [
+      "msan"
+    ], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_sockpair_1byte_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -46678,6 +47469,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [
+      "uv"
+    ], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "h2_uds_nosec_test", 
+    "platforms": [
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "graceful_server_shutdown"
     ], 
     "ci_platforms": [
@@ -47851,6 +48665,29 @@
   }, 
   {
     "args": [
+      "filter_status_code"
+    ], 
+    "ci_platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ], 
+    "cpu_cost": 0.1, 
+    "exclude_configs": [], 
+    "exclude_iomgrs": [], 
+    "flaky": false, 
+    "language": "c", 
+    "name": "inproc_nosec_test", 
+    "platforms": [
+      "windows", 
+      "linux", 
+      "mac", 
+      "posix"
+    ]
+  }, 
+  {
+    "args": [
       "high_initial_seqno"
     ], 
     "ci_platforms": [
@@ -48684,6 +49521,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1cq_secure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -48710,6 +49573,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1cq_secure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -48736,6 +49625,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_1cq_secure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -49572,6 +50487,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -49598,6 +50539,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -49624,6 +50591,32 @@
   {
     "args": [
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "auto_timeout_scaling": false, 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "json_run_localhost", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 120
+  }, 
+  {
+    "args": [
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "auto_timeout_scaling": false, 
@@ -50513,6 +51506,32 @@
     "args": [
       "--run_inproc", 
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 1000000}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "qps_json_driver", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "qps_json_driver:inproc_cpp_generic_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 360
+  }, 
+  {
+    "args": [
+      "--run_inproc", 
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
@@ -50539,6 +51558,32 @@
     "args": [
       "--run_inproc", 
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "qps_json_driver", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "qps_json_driver:inproc_cpp_protobuf_async_streaming_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 360
+  }, 
+  {
+    "args": [
+      "--run_inproc", 
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
@@ -50565,6 +51610,32 @@
     "args": [
       "--run_inproc", 
       "--scenarios_json", 
+      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1000000, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 1000000, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
+    ], 
+    "boringssl": true, 
+    "ci_platforms": [
+      "linux"
+    ], 
+    "cpu_cost": "capacity", 
+    "defaults": "boringssl", 
+    "exclude_configs": [
+      "tsan", 
+      "asan"
+    ], 
+    "excluded_poll_engines": [], 
+    "flaky": false, 
+    "language": "c++", 
+    "name": "qps_json_driver", 
+    "platforms": [
+      "linux"
+    ], 
+    "shortname": "qps_json_driver:inproc_cpp_protobuf_async_unary_qps_unconstrained_1cq_insecure", 
+    "timeout_seconds": 360
+  }, 
+  {
+    "args": [
+      "--run_inproc", 
+      "--scenarios_json", 
       "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
     ], 
     "boringssl": true, 
@@ -51009,2783 +52080,6 @@
   }, 
   {
     "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_1channel_100rpcs_1MB\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_1channel_100rpcs_1MB_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_client_1channel_1MB\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_client_1channel_1MB_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 16, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 1, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 50, \"req_size\": 300}}, \"client_channels\": 300, \"threads_per_cq\": 0, \"load_params\": {\"poisson\": {\"offered_load\": 37500}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1mps_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 1, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1mps_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_10mps_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 10, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_10mps_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_1channel_1MBmsg_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_1channel_1MBmsg_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_64KBmsg_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_64KBmsg_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_servers\": 1, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 10, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [
-      "poll-cv"
-    ], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 8388608, \"req_size\": 128}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_servers\": 1, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 10, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [
-      "poll-cv"
-    ], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_secure_1MB\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_secure_1MB_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_unary_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_qps_unconstrained_1mps_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 1, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_qps_unconstrained_1mps_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_qps_unconstrained_10mps_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 10, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_qps_unconstrained_10mps_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1mps_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 1, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1mps_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_10mps_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 10, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_10mps_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_client_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_client_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_client_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_client_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_client_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_client_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_client_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_client_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_server_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_server_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_server_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_server_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_server_ping_pong_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_server_ping_pong_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_server_qps_unconstrained_secure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": {\"use_test_ca\": true, \"server_host_override\": \"foo.test.google.fr\"}, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_server_qps_unconstrained_secure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_1mps_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 1, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_1mps_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_10mps_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 10, \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_10mps_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_1channel_1MBmsg_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_1channel_1MBmsg_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_64KBmsg_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 65536, \"req_size\": 65536}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_64KBmsg_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 2}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"security_params\": null, \"threads_per_cq\": 2, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 2, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_generic_async_streaming_qps_one_server_core_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"server_type\": \"ASYNC_GENERIC_SERVER\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"security_params\": null, \"threads_per_cq\": 0}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 13, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"bytebuf_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_generic_async_streaming_qps_one_server_core_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_servers\": 1, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 10, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [
-      "poll-cv"
-    ], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 8388608, \"req_size\": 128}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_servers\": 1, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 10, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [
-      "poll-cv"
-    ], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_insecure_1MB\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 1048576, \"req_size\": 1048576}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_insecure_1MB_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_unary_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_unary_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_unary_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"UNARY\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_unary_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_qps_unconstrained_1mps_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 1, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_qps_unconstrained_1mps_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_qps_unconstrained_10mps_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 10, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_qps_unconstrained_10mps_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_1mps_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 1, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_1mps_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_qps_unconstrained_10mps_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING\", \"messages_per_stream\": 10, \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_qps_unconstrained_10mps_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_client_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_client_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_client_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_client_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_client_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_client_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_client_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING_FROM_CLIENT\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_client_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_server_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_server_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_sync_streaming_from_server_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"SYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"SYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 64, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_sync_streaming_from_server_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_server_ping_pong_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 1, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 0, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 1, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"latency\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 1, \"outstanding_rpcs_per_channel\": 1, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 1, \"threads_per_cq\": 0, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": 2, 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_server_ping_pong_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
-      "--scenarios_json", 
-      "{\"scenarios\": [{\"name\": \"cpp_protobuf_async_streaming_from_server_qps_unconstrained_insecure\", \"warmup_seconds\": 0, \"benchmark_seconds\": 1, \"num_servers\": 1, \"server_config\": {\"async_server_threads\": 0, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"security_params\": null, \"threads_per_cq\": 3, \"server_type\": \"ASYNC_SERVER\"}, \"num_clients\": 0, \"client_config\": {\"security_params\": null, \"channel_args\": [{\"str_value\": \"throughput\", \"name\": \"grpc.optimization_target\"}, {\"int_value\": 1, \"name\": \"grpc.minimal_stack\"}], \"async_client_threads\": 0, \"outstanding_rpcs_per_channel\": 100, \"rpc_type\": \"STREAMING_FROM_SERVER\", \"payload_config\": {\"simple_params\": {\"resp_size\": 0, \"req_size\": 0}}, \"client_channels\": 64, \"threads_per_cq\": 3, \"load_params\": {\"closed_loop\": {}}, \"client_type\": \"ASYNC_CLIENT\", \"histogram_params\": {\"max_possible\": 60000000000.0, \"resolution\": 0.01}}}]}"
-    ], 
-    "auto_timeout_scaling": false, 
-    "boringssl": true, 
-    "ci_platforms": [
-      "linux"
-    ], 
-    "cpu_cost": "capacity", 
-    "defaults": "boringssl", 
-    "exclude_configs": [
-      "asan-noleaks", 
-      "asan-trace-cmp", 
-      "basicprof", 
-      "c++-compat", 
-      "counters", 
-      "dbg", 
-      "gcov", 
-      "helgrind", 
-      "lto", 
-      "memcheck", 
-      "msan", 
-      "mutrace", 
-      "opt", 
-      "stapprof", 
-      "ubsan"
-    ], 
-    "excluded_poll_engines": [], 
-    "flaky": false, 
-    "language": "c++", 
-    "name": "json_run_localhost", 
-    "platforms": [
-      "linux"
-    ], 
-    "shortname": "json_run_localhost:cpp_protobuf_async_streaming_from_server_qps_unconstrained_insecure_low_thread_count", 
-    "timeout_seconds": 600
-  }, 
-  {
-    "args": [
       "test/core/end2end/fuzzers/api_fuzzer_corpus/00.bin"
     ], 
     "ci_platforms": [
diff --git a/tools/run_tests/helper_scripts/build_csharp.sh b/tools/run_tests/helper_scripts/build_csharp.sh
index ec0a441..c6bee82 100755
--- a/tools/run_tests/helper_scripts/build_csharp.sh
+++ b/tools/run_tests/helper_scripts/build_csharp.sh
@@ -15,12 +15,12 @@
 
 set -ex
 
-cd $(dirname $0)/../../../src/csharp
+cd "$(dirname "$0")/../../../src/csharp"
 
 if [ "$CONFIG" == "gcov" ]
 then
   # overriding NativeDependenciesConfigurationUnix makes C# project pick up the gcov flavor of grpc_csharp_ext
-  dotnet build --configuration $MSBUILD_CONFIG /p:NativeDependenciesConfigurationUnix=gcov Grpc.sln
+  dotnet build --configuration "$MSBUILD_CONFIG" /p:NativeDependenciesConfigurationUnix=gcov Grpc.sln
 else
-  dotnet build --configuration $MSBUILD_CONFIG Grpc.sln
+  dotnet build --configuration "$MSBUILD_CONFIG" Grpc.sln
 fi
diff --git a/tools/run_tests/helper_scripts/build_php.sh b/tools/run_tests/helper_scripts/build_php.sh
index 856e5b6..443be34 100755
--- a/tools/run_tests/helper_scripts/build_php.sh
+++ b/tools/run_tests/helper_scripts/build_php.sh
@@ -18,9 +18,9 @@
 CONFIG=${CONFIG:-opt}
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
-root=`pwd`
+root=$(pwd)
 export GRPC_LIB_SUBDIR=libs/$CONFIG
 export CFLAGS="-Wno-parentheses-equality"
 
@@ -30,8 +30,8 @@
 cd ext/grpc
 phpize
 if [ "$CONFIG" != "gcov" ] ; then
-  ./configure --enable-grpc=$root
+  ./configure --enable-grpc="$root"
 else
-  ./configure --enable-grpc=$root --enable-coverage
+  ./configure --enable-grpc="$root" --enable-coverage
 fi
 make
diff --git a/tools/run_tests/helper_scripts/build_python.sh b/tools/run_tests/helper_scripts/build_python.sh
index e362082..b809fe0 100755
--- a/tools/run_tests/helper_scripts/build_python.sh
+++ b/tools/run_tests/helper_scripts/build_python.sh
@@ -16,13 +16,13 @@
 set -ex
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 ##########################
 # Portability operations #
 ##########################
 
-PLATFORM=`uname -s`
+PLATFORM=$(uname -s)
 
 function is_msys() {
   if [ "${PLATFORM/MSYS}" != "$PLATFORM" ]; then
@@ -64,7 +64,7 @@
 # Path to python executable within a virtual environment depending on the
 # system.
 function venv_relative_python() {
-  if [ $(is_mingw) ]; then
+  if [ "$(is_mingw)" ]; then
     echo 'Scripts/python.exe'
   else
     echo 'bin/python'
@@ -73,7 +73,7 @@
 
 # Distutils toolchain to use depending on the system.
 function toolchain() {
-  if [ $(is_mingw) ]; then
+  if [ "$(is_mingw)" ]; then
     echo 'mingw32'
   else
     echo 'unix'
@@ -97,17 +97,17 @@
 ####################
 
 PYTHON=${1:-python2.7}
-VENV=${2:-$(venv $PYTHON)}
+VENV=${2:-$(venv "$PYTHON")}
 VENV_RELATIVE_PYTHON=${3:-$(venv_relative_python)}
 TOOLCHAIN=${4:-$(toolchain)}
 
-if [ $(is_msys) ]; then
+if [ "$(is_msys)" ]; then
   echo "MSYS doesn't directly provide the right compiler(s);"
   echo "switch to a MinGW shell."
   exit 1
 fi
 
-ROOT=`pwd`
+ROOT=$(pwd)
 export CFLAGS="-I$ROOT/include -std=gnu99 -fno-wrapv $CFLAGS"
 export GRPC_PYTHON_BUILD_WITH_CYTHON=1
 export LANG=en_US.UTF-8
@@ -117,7 +117,7 @@
 HOST_PYTHON=${HOST_PYTHON:-python}
 
 # If ccache is available on Linux, use it.
-if [ $(is_linux) ]; then
+if [ "$(is_linux)" ]; then
   # We're not on Darwin (Mac OS X)
   if [ -x "$(command -v ccache)" ]; then
     if [ -x "$(command -v gcc)" ]; then
@@ -137,46 +137,46 @@
 # it's possible that the virtualenv is still usable and we trust the tester to
 # be able to 'figure it out' instead of us e.g. doing potentially expensive and
 # unnecessary error recovery by `rm -rf`ing the virtualenv.
-($PYTHON -m virtualenv $VENV ||
- $HOST_PYTHON -m virtualenv -p $PYTHON $VENV ||
+($PYTHON -m virtualenv "$VENV" ||
+ $HOST_PYTHON -m virtualenv -p "$PYTHON" "$VENV" ||
  true)
-VENV_PYTHON=`script_realpath "$VENV/$VENV_RELATIVE_PYTHON"`
+VENV_PYTHON=$(script_realpath "$VENV/$VENV_RELATIVE_PYTHON")
 
 # pip-installs the directory specified. Used because on MSYS the vanilla Windows
 # Python gets confused when parsing paths.
 pip_install_dir() {
-  PWD=`pwd`
-  cd $1
-  ($VENV_PYTHON setup.py build_ext -c $TOOLCHAIN || true)
+  PWD=$(pwd)
+  cd "$1"
+  ($VENV_PYTHON setup.py build_ext -c "$TOOLCHAIN" || true)
   $VENV_PYTHON -m pip install --no-deps .
-  cd $PWD
+  cd "$PWD"
 }
 
 $VENV_PYTHON -m pip install --upgrade pip==9.0.1
 $VENV_PYTHON -m pip install setuptools
 $VENV_PYTHON -m pip install cython
 $VENV_PYTHON -m pip install six enum34 protobuf futures
-pip_install_dir $ROOT
+pip_install_dir "$ROOT"
 
-$VENV_PYTHON $ROOT/tools/distrib/python/make_grpcio_tools.py
-pip_install_dir $ROOT/tools/distrib/python/grpcio_tools
+$VENV_PYTHON "$ROOT/tools/distrib/python/make_grpcio_tools.py"
+pip_install_dir "$ROOT/tools/distrib/python/grpcio_tools"
 
 # Build/install health checking
-$VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py preprocess
-$VENV_PYTHON $ROOT/src/python/grpcio_health_checking/setup.py build_package_protos
-pip_install_dir $ROOT/src/python/grpcio_health_checking
+$VENV_PYTHON "$ROOT/src/python/grpcio_health_checking/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_health_checking/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_health_checking"
 
 # Build/install reflection
-$VENV_PYTHON $ROOT/src/python/grpcio_reflection/setup.py preprocess
-$VENV_PYTHON $ROOT/src/python/grpcio_reflection/setup.py build_package_protos
-pip_install_dir $ROOT/src/python/grpcio_reflection
+$VENV_PYTHON "$ROOT/src/python/grpcio_reflection/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_reflection/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_reflection"
 
 # Install testing
-pip_install_dir $ROOT/src/python/grpcio_testing
+pip_install_dir "$ROOT/src/python/grpcio_testing"
 
 # Build/install tests
 $VENV_PYTHON -m pip install coverage==4.4 oauth2client==4.1.0 \
                             google-auth==1.0.0 requests==2.14.2
-$VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py preprocess
-$VENV_PYTHON $ROOT/src/python/grpcio_tests/setup.py build_package_protos
-pip_install_dir $ROOT/src/python/grpcio_tests
+$VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" preprocess
+$VENV_PYTHON "$ROOT/src/python/grpcio_tests/setup.py" build_package_protos
+pip_install_dir "$ROOT/src/python/grpcio_tests"
diff --git a/tools/run_tests/helper_scripts/build_python_msys2.sh b/tools/run_tests/helper_scripts/build_python_msys2.sh
index 4c54f1c..f388b4b 100644
--- a/tools/run_tests/helper_scripts/build_python_msys2.sh
+++ b/tools/run_tests/helper_scripts/build_python_msys2.sh
@@ -15,7 +15,7 @@
 
 set -ex
 
-BUILD_PYTHON=`realpath "$(dirname $0)/build_python.sh"`
+BUILD_PYTHON=$(realpath "$(dirname "$0")/build_python.sh")
 export MSYSTEM=$1
 shift 1
-bash --login $BUILD_PYTHON "$@"
+bash --login "$BUILD_PYTHON" "$@"
diff --git a/tools/run_tests/helper_scripts/build_ruby.sh b/tools/run_tests/helper_scripts/build_ruby.sh
index a9267e1..b15a863 100755
--- a/tools/run_tests/helper_scripts/build_ruby.sh
+++ b/tools/run_tests/helper_scripts/build_ruby.sh
@@ -19,7 +19,7 @@
 export GRPC_CONFIG=${CONFIG:-opt}
 
 # change to grpc's ruby directory
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 rm -rf ./tmp
 rake compile
diff --git a/tools/run_tests/helper_scripts/bundle_install_wrapper.sh b/tools/run_tests/helper_scripts/bundle_install_wrapper.sh
index 27b8fce..ab31dd5 100755
--- a/tools/run_tests/helper_scripts/bundle_install_wrapper.sh
+++ b/tools/run_tests/helper_scripts/bundle_install_wrapper.sh
@@ -17,9 +17,9 @@
 set -ex
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
-SYSTEM=`uname | cut -f 1 -d_`
+SYSTEM=$(uname | cut -f 1 -d_)
 
 if [ "$SYSTEM" == "Darwin" ] ; then
   # Workaround for crash during bundle install
diff --git a/tools/run_tests/helper_scripts/post_tests_c.sh b/tools/run_tests/helper_scripts/post_tests_c.sh
index a4a8f44..e4ab203 100755
--- a/tools/run_tests/helper_scripts/post_tests_c.sh
+++ b/tools/run_tests/helper_scripts/post_tests_c.sh
@@ -17,14 +17,14 @@
 
 if [ "$CONFIG" != "gcov" ] ; then exit ; fi
 
-root=$(readlink -f $(dirname $0)/../../..)
+root=$(readlink -f "$(dirname "$0")/../../..")
 out=$root/reports/c_cxx_coverage
 tmp1=$(mktemp)
 tmp2=$(mktemp)
-cd $root
-lcov --capture --directory . --output-file $tmp1
-lcov --extract $tmp1 "$root/src/*" "$root/include/*" --output-file $tmp2
-genhtml $tmp2 --output-directory $out
-rm $tmp2
-rm $tmp1
+cd "$root"
+lcov --capture --directory . --output-file "$tmp1"
+lcov --extract "$tmp1" "$root/src/*" "$root/include/*" --output-file "$tmp2"
+genhtml "$tmp2" --output-directory "$out"
+rm "$tmp2"
+rm "$tmp1"
 
diff --git a/tools/run_tests/helper_scripts/post_tests_csharp.sh b/tools/run_tests/helper_scripts/post_tests_csharp.sh
index f92ea00..6473dfd 100755
--- a/tools/run_tests/helper_scripts/post_tests_csharp.sh
+++ b/tools/run_tests/helper_scripts/post_tests_csharp.sh
@@ -18,7 +18,7 @@
 if [ "$CONFIG" != "gcov" ] ; then exit ; fi
 
 # change to gRPC repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 # Generate the csharp extension coverage report
 gcov objs/gcov/src/csharp/ext/*.o
diff --git a/tools/run_tests/helper_scripts/post_tests_php.sh b/tools/run_tests/helper_scripts/post_tests_php.sh
index 8ebc1e4..b23e4bd 100755
--- a/tools/run_tests/helper_scripts/post_tests_php.sh
+++ b/tools/run_tests/helper_scripts/post_tests_php.sh
@@ -17,15 +17,15 @@
 
 if [ "$CONFIG" != "gcov" ] ; then exit ; fi
 
-root=$(readlink -f $(dirname $0)/../../..)
+root=$(readlink -f "$(dirname "$0")/../../..")
 out=$root/reports/php_ext_coverage
 tmp1=$(mktemp)
 tmp2=$(mktemp)
-cd $root
-lcov --capture --directory . --output-file $tmp1
-lcov --extract $tmp1 "$root/src/php/ext/grpc/*" --output-file $tmp2
-genhtml $tmp2 --output-directory $out
-rm $tmp2
-rm $tmp1
+cd "$root"
+lcov --capture --directory . --output-file "$tmp1"
+lcov --extract "$tmp1" "$root/src/php/ext/grpc/*" --output-file "$tmp2"
+genhtml "$tmp2" --output-directory "$out"
+rm "$tmp2"
+rm "$tmp1"
 
 # todo(mattkwong): generate coverage report for php and copy to reports/php
diff --git a/tools/run_tests/helper_scripts/post_tests_python.sh b/tools/run_tests/helper_scripts/post_tests_python.sh
index 071e81a..bca9b20 100755
--- a/tools/run_tests/helper_scripts/post_tests_python.sh
+++ b/tools/run_tests/helper_scripts/post_tests_python.sh
@@ -18,7 +18,7 @@
 if [ "$CONFIG" != "gcov" ] ; then exit ; fi
 
 # change to directory of Python coverage files
-cd $(dirname $0)/../../../src/python/grpcio_tests/
+cd "$(dirname "$0")/../../../src/python/grpcio_tests/"
 
 coverage combine .
 coverage html -i -d ./../../../reports/python
diff --git a/tools/run_tests/helper_scripts/post_tests_ruby.sh b/tools/run_tests/helper_scripts/post_tests_ruby.sh
index a0b0736..f086001 100755
--- a/tools/run_tests/helper_scripts/post_tests_ruby.sh
+++ b/tools/run_tests/helper_scripts/post_tests_ruby.sh
@@ -17,15 +17,15 @@
 
 if [ "$CONFIG" != "gcov" ] ; then exit ; fi
 
-root=$(readlink -f $(dirname $0)/../../..)
+root=$(readlink -f "$(dirname "$0")/../../..")
 out=$root/reports/ruby_ext_coverage
 tmp1=$(mktemp)
 tmp2=$(mktemp)
-cd $root
-lcov --capture --directory . --output-file $tmp1
-lcov --extract $tmp1 "$root/src/ruby/*" --output-file $tmp2
-genhtml $tmp2 --output-directory $out
-rm $tmp2
-rm $tmp1
+cd "$root"
+lcov --capture --directory . --output-file "$tmp1"
+lcov --extract "$tmp1" "$root/src/ruby/*" --output-file "$tmp2"
+genhtml "$tmp2" --output-directory "$out"
+rm "$tmp2"
+rm "$tmp1"
 
-cp -rv $root/coverage $root/reports/ruby
+cp -rv "$root/coverage" "$root/reports/ruby"
diff --git a/tools/run_tests/helper_scripts/pre_build_cmake.sh b/tools/run_tests/helper_scripts/pre_build_cmake.sh
index 0300cd8..bb36588 100755
--- a/tools/run_tests/helper_scripts/pre_build_cmake.sh
+++ b/tools/run_tests/helper_scripts/pre_build_cmake.sh
@@ -15,10 +15,10 @@
 
 set -ex
 
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 mkdir -p cmake/build
 cd cmake/build
 
 # MSBUILD_CONFIG's values are suitable for cmake as well
-cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE=${MSBUILD_CONFIG} ../..
+cmake -DgRPC_BUILD_TESTS=ON -DCMAKE_BUILD_TYPE="${MSBUILD_CONFIG}" ../..
diff --git a/tools/run_tests/helper_scripts/pre_build_csharp.sh b/tools/run_tests/helper_scripts/pre_build_csharp.sh
index e2aeddc..f9f5440 100755
--- a/tools/run_tests/helper_scripts/pre_build_csharp.sh
+++ b/tools/run_tests/helper_scripts/pre_build_csharp.sh
@@ -16,6 +16,6 @@
 set -ex
 
 # cd to gRPC csharp directory
-cd $(dirname $0)/../../../src/csharp
+cd "$(dirname "$0")/../../../src/csharp"
 
 dotnet restore Grpc.sln
diff --git a/tools/run_tests/helper_scripts/pre_build_ruby.sh b/tools/run_tests/helper_scripts/pre_build_ruby.sh
index d68f7e9..b574096 100755
--- a/tools/run_tests/helper_scripts/pre_build_ruby.sh
+++ b/tools/run_tests/helper_scripts/pre_build_ruby.sh
@@ -19,6 +19,6 @@
 export GRPC_CONFIG=${CONFIG:-opt}
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 tools/run_tests/helper_scripts/bundle_install_wrapper.sh
diff --git a/tools/run_tests/helper_scripts/run_grpc-node.sh b/tools/run_tests/helper_scripts/run_grpc-node.sh
index 25f149f..747aae7 100755
--- a/tools/run_tests/helper_scripts/run_grpc-node.sh
+++ b/tools/run_tests/helper_scripts/run_grpc-node.sh
@@ -17,12 +17,12 @@
 # to this reference
 
 # cd to gRPC root directory
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
-CURRENT_COMMIT=$(git rev-parse --verify HEAD)
+CURRENT_COMMIT="$(git rev-parse --verify HEAD)"
 
 rm -rf ./../grpc-node
 git clone --recursive https://github.com/grpc/grpc-node ./../grpc-node
 cd ./../grpc-node
 
-./test-grpc-submodule.sh $CURRENT_COMMIT
+./test-grpc-submodule.sh "$CURRENT_COMMIT"
diff --git a/tools/run_tests/helper_scripts/run_lcov.sh b/tools/run_tests/helper_scripts/run_lcov.sh
index c7b2cfe..9d8b679 100755
--- a/tools/run_tests/helper_scripts/run_lcov.sh
+++ b/tools/run_tests/helper_scripts/run_lcov.sh
@@ -15,17 +15,17 @@
 
 set -ex
 
-out=$(readlink -f ${1:-coverage})
+out=$(readlink -f "${1:-coverage}")
 
-root=$(readlink -f $(dirname $0)/../../..)
+root=$(readlink -f "$(dirname "$0")/../../..")
 shift || true
 tmp=$(mktemp)
-cd $root
-tools/run_tests/run_tests.py -c gcov -l c c++ $@ || true
-lcov --capture --directory . --output-file $tmp
-genhtml $tmp --output-directory $out
-rm $tmp
+cd "$root"
+tools/run_tests/run_tests.py -c gcov -l c c++ "$@" || true
+lcov --capture --directory . --output-file "$tmp"
+genhtml "$tmp" --output-directory "$out"
+rm "$tmp"
 if which xdg-open > /dev/null
 then
-  xdg-open file://$out/index.html
+  xdg-open "file://$out/index.html"
 fi
diff --git a/tools/run_tests/helper_scripts/run_python.sh b/tools/run_tests/helper_scripts/run_python.sh
index 90f28c8..bcfe3a6 100755
--- a/tools/run_tests/helper_scripts/run_python.sh
+++ b/tools/run_tests/helper_scripts/run_python.sh
@@ -16,15 +16,15 @@
 set -ex
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
-PYTHON=`realpath "${1:-py27/bin/python}"`
+PYTHON=$(realpath "${1:-py27/bin/python}")
 
-ROOT=`pwd`
+ROOT=$(pwd)
 
-$PYTHON $ROOT/src/python/grpcio_tests/setup.py test_lite
+$PYTHON "$ROOT/src/python/grpcio_tests/setup.py" test_lite
 
-mkdir -p $ROOT/reports
-rm -rf $ROOT/reports/python-coverage
-(mv -T $ROOT/htmlcov $ROOT/reports/python-coverage) || true
+mkdir -p "$ROOT/reports"
+rm -rf "$ROOT/reports/python-coverage"
+(mv -T "$ROOT/htmlcov" "$ROOT/reports/python-coverage") || true
 
diff --git a/tools/run_tests/helper_scripts/run_ruby.sh b/tools/run_tests/helper_scripts/run_ruby.sh
index 4bd7d74..4e9c212 100755
--- a/tools/run_tests/helper_scripts/run_ruby.sh
+++ b/tools/run_tests/helper_scripts/run_ruby.sh
@@ -16,6 +16,6 @@
 set -ex
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 rake
diff --git a/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh b/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh
index 5cfab14..1955442 100755
--- a/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh
+++ b/tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh
@@ -16,7 +16,7 @@
 set -ex
 
 # change to grpc repo root
-cd $(dirname $0)/../../..
+cd "$(dirname "$0")/../../.."
 
 EXIT_CODE=0
 ruby src/ruby/end2end/sig_handling_driver.rb || EXIT_CODE=1
diff --git a/tools/run_tests/helper_scripts/run_tests_in_workspace.sh b/tools/run_tests/helper_scripts/run_tests_in_workspace.sh
index 529dc04..790c041 100755
--- a/tools/run_tests/helper_scripts/run_tests_in_workspace.sh
+++ b/tools/run_tests/helper_scripts/run_tests_in_workspace.sh
@@ -19,15 +19,15 @@
 # newly created workspace)
 set -ex
 
-cd $(dirname $0)/../../..
-export repo_root=$(pwd)
+cd "$(dirname "$0")/../../.."
+export repo_root="$(pwd)"
 
 rm -rf "${WORKSPACE_NAME}"
 git clone . "${WORKSPACE_NAME}"
 # clone gRPC submodules, use data from locally cloned submodules where possible
+# shellcheck disable=SC2016,SC1004
 git submodule foreach 'cd "${repo_root}/${WORKSPACE_NAME}" \
     && git submodule update --init --reference ${repo_root}/${name} ${name}'
 
 echo "Running run_tests.py in workspace ${WORKSPACE_NAME}" 
-python "${WORKSPACE_NAME}/tools/run_tests/run_tests.py" $@
-
+python "${WORKSPACE_NAME}/tools/run_tests/run_tests.py" "$@"
diff --git a/tools/run_tests/performance/OWNERS b/tools/run_tests/performance/OWNERS
new file mode 100644
index 0000000..98c8152
--- /dev/null
+++ b/tools/run_tests/performance/OWNERS
@@ -0,0 +1,9 @@
+set noparent
+
+# These owners are in place to ensure that scenario_result_schema.json is not
+# modified without also running tools/run_tests/performance/patch_scenario_results_schema.py
+# to update the BigQuery schema
+
+@ncteisen
+@matt-kwong
+@ctiller
diff --git a/tools/run_tests/performance/bq_upload_result.py b/tools/run_tests/performance/bq_upload_result.py
index 31819d6..6702587 100755
--- a/tools/run_tests/performance/bq_upload_result.py
+++ b/tools/run_tests/performance/bq_upload_result.py
@@ -26,146 +26,161 @@
 import uuid
 import massage_qps_stats
 
-
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
-
-_PROJECT_ID='grpc-testing'
+_PROJECT_ID = 'grpc-testing'
 
 
 def _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, result_file):
-  with open(result_file, 'r') as f:
-    (col1, col2, col3) = f.read().split(',')
-    latency50 = float(col1.strip()) * 1000
-    latency90 = float(col2.strip()) * 1000
-    latency99 = float(col3.strip()) * 1000
+    with open(result_file, 'r') as f:
+        (col1, col2, col3) = f.read().split(',')
+        latency50 = float(col1.strip()) * 1000
+        latency90 = float(col2.strip()) * 1000
+        latency99 = float(col3.strip()) * 1000
 
-    scenario_result = {
-        'scenario': {
-          'name': 'netperf_tcp_rr'
-        },
-        'summary': {
-          'latency50': latency50,
-          'latency90': latency90,
-          'latency99': latency99
+        scenario_result = {
+            'scenario': {
+                'name': 'netperf_tcp_rr'
+            },
+            'summary': {
+                'latency50': latency50,
+                'latency90': latency90,
+                'latency99': latency99
+            }
         }
-    }
 
-  bq = big_query_utils.create_big_query()
-  _create_results_table(bq, dataset_id, table_id)
+    bq = big_query_utils.create_big_query()
+    _create_results_table(bq, dataset_id, table_id)
 
-  if not _insert_result(bq, dataset_id, table_id, scenario_result, flatten=False):
-    print('Error uploading result to bigquery.')
-    sys.exit(1)
+    if not _insert_result(
+            bq, dataset_id, table_id, scenario_result, flatten=False):
+        print('Error uploading result to bigquery.')
+        sys.exit(1)
 
 
 def _upload_scenario_result_to_bigquery(dataset_id, table_id, result_file):
-  with open(result_file, 'r') as f:
-    scenario_result = json.loads(f.read())
+    with open(result_file, 'r') as f:
+        scenario_result = json.loads(f.read())
 
-  bq = big_query_utils.create_big_query()
-  _create_results_table(bq, dataset_id, table_id)
+    bq = big_query_utils.create_big_query()
+    _create_results_table(bq, dataset_id, table_id)
 
-  if not _insert_result(bq, dataset_id, table_id, scenario_result):
-    print('Error uploading result to bigquery.')
-    sys.exit(1)
+    if not _insert_result(bq, dataset_id, table_id, scenario_result):
+        print('Error uploading result to bigquery.')
+        sys.exit(1)
 
 
 def _insert_result(bq, dataset_id, table_id, scenario_result, flatten=True):
-  if flatten:
-    _flatten_result_inplace(scenario_result)
-  _populate_metadata_inplace(scenario_result)
-  row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
-  return big_query_utils.insert_rows(bq,
-                                     _PROJECT_ID,
-                                     dataset_id,
-                                     table_id,
-                                     [row])
+    if flatten:
+        _flatten_result_inplace(scenario_result)
+    _populate_metadata_inplace(scenario_result)
+    row = big_query_utils.make_row(str(uuid.uuid4()), scenario_result)
+    return big_query_utils.insert_rows(bq, _PROJECT_ID, dataset_id, table_id,
+                                       [row])
 
 
 def _create_results_table(bq, dataset_id, table_id):
-  with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
-    table_schema = json.loads(f.read())
-  desc = 'Results of performance benchmarks.'
-  return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id,
-                               table_id, table_schema, desc)
+    with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+              'r') as f:
+        table_schema = json.loads(f.read())
+    desc = 'Results of performance benchmarks.'
+    return big_query_utils.create_table2(bq, _PROJECT_ID, dataset_id, table_id,
+                                         table_schema, desc)
 
 
 def _flatten_result_inplace(scenario_result):
-  """Bigquery is not really great for handling deeply nested data
+    """Bigquery is not really great for handling deeply nested data
   and repeated fields. To maintain values of some fields while keeping
   the schema relatively simple, we artificially leave some of the fields
   as JSON strings.
   """
-  scenario_result['scenario']['clientConfig'] = json.dumps(scenario_result['scenario']['clientConfig'])
-  scenario_result['scenario']['serverConfig'] = json.dumps(scenario_result['scenario']['serverConfig'])
-  scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
-  scenario_result['serverCpuStats'] = []
-  for stats in scenario_result['serverStats']:
-    scenario_result['serverCpuStats'].append(dict())
-    scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop('totalCpuTime', None)
-    scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop('idleCpuTime', None)
-  for stats in scenario_result['clientStats']:
-    stats['latencies'] = json.dumps(stats['latencies'])
-    stats.pop('requestResults', None)
-  scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
-  scenario_result['clientSuccess'] = json.dumps(scenario_result['clientSuccess'])
-  scenario_result['serverSuccess'] = json.dumps(scenario_result['serverSuccess'])
-  scenario_result['requestResults'] = json.dumps(scenario_result.get('requestResults', []))
-  scenario_result['serverCpuUsage'] = scenario_result['summary'].pop('serverCpuUsage', None)
-  scenario_result['summary'].pop('successfulRequestsPerSecond', None)
-  scenario_result['summary'].pop('failedRequestsPerSecond', None)
-  massage_qps_stats.massage_qps_stats(scenario_result)
+    scenario_result['scenario']['clientConfig'] = json.dumps(
+        scenario_result['scenario']['clientConfig'])
+    scenario_result['scenario']['serverConfig'] = json.dumps(
+        scenario_result['scenario']['serverConfig'])
+    scenario_result['latencies'] = json.dumps(scenario_result['latencies'])
+    scenario_result['serverCpuStats'] = []
+    for stats in scenario_result['serverStats']:
+        scenario_result['serverCpuStats'].append(dict())
+        scenario_result['serverCpuStats'][-1]['totalCpuTime'] = stats.pop(
+            'totalCpuTime', None)
+        scenario_result['serverCpuStats'][-1]['idleCpuTime'] = stats.pop(
+            'idleCpuTime', None)
+    for stats in scenario_result['clientStats']:
+        stats['latencies'] = json.dumps(stats['latencies'])
+        stats.pop('requestResults', None)
+    scenario_result['serverCores'] = json.dumps(scenario_result['serverCores'])
+    scenario_result['clientSuccess'] = json.dumps(
+        scenario_result['clientSuccess'])
+    scenario_result['serverSuccess'] = json.dumps(
+        scenario_result['serverSuccess'])
+    scenario_result['requestResults'] = json.dumps(
+        scenario_result.get('requestResults', []))
+    scenario_result['serverCpuUsage'] = scenario_result['summary'].pop(
+        'serverCpuUsage', None)
+    scenario_result['summary'].pop('successfulRequestsPerSecond', None)
+    scenario_result['summary'].pop('failedRequestsPerSecond', None)
+    massage_qps_stats.massage_qps_stats(scenario_result)
 
 
 def _populate_metadata_inplace(scenario_result):
-  """Populates metadata based on environment variables set by Jenkins."""
-  # NOTE: Grabbing the Jenkins environment variables will only work if the
-  # driver is running locally on the same machine where Jenkins has started
-  # the job. For our setup, this is currently the case, so just assume that.
-  build_number = os.getenv('BUILD_NUMBER')
-  build_url = os.getenv('BUILD_URL')
-  job_name = os.getenv('JOB_NAME')
-  git_commit = os.getenv('GIT_COMMIT')
-  # actual commit is the actual head of PR that is getting tested
-  git_actual_commit = os.getenv('ghprbActualCommit')
+    """Populates metadata based on environment variables set by Jenkins."""
+    # NOTE: Grabbing the Jenkins environment variables will only work if the
+    # driver is running locally on the same machine where Jenkins has started
+    # the job. For our setup, this is currently the case, so just assume that.
+    build_number = os.getenv('BUILD_NUMBER')
+    build_url = os.getenv('BUILD_URL')
+    job_name = os.getenv('JOB_NAME')
+    git_commit = os.getenv('GIT_COMMIT')
+    # actual commit is the actual head of PR that is getting tested
+    git_actual_commit = os.getenv('ghprbActualCommit')
 
-  utc_timestamp = str(calendar.timegm(time.gmtime()))
-  metadata = {'created': utc_timestamp}
+    utc_timestamp = str(calendar.timegm(time.gmtime()))
+    metadata = {'created': utc_timestamp}
 
-  if build_number:
-    metadata['buildNumber'] = build_number
-  if build_url:
-    metadata['buildUrl'] = build_url
-  if job_name:
-    metadata['jobName'] = job_name
-  if git_commit:
-    metadata['gitCommit'] = git_commit
-  if git_actual_commit:
-    metadata['gitActualCommit'] = git_actual_commit
+    if build_number:
+        metadata['buildNumber'] = build_number
+    if build_url:
+        metadata['buildUrl'] = build_url
+    if job_name:
+        metadata['jobName'] = job_name
+    if git_commit:
+        metadata['gitCommit'] = git_commit
+    if git_actual_commit:
+        metadata['gitActualCommit'] = git_actual_commit
 
-  scenario_result['metadata'] = metadata
+    scenario_result['metadata'] = metadata
 
 
 argp = argparse.ArgumentParser(description='Upload result to big query.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
-                  help='Bigquery "dataset.table" to upload results to.')
-argp.add_argument('--file_to_upload', default='scenario_result.json', type=str,
-                  help='Report file to upload.')
-argp.add_argument('--file_format',
-                  choices=['scenario_result','netperf_latency_csv'],
-                  default='scenario_result',
-                  help='Format of the file to upload.')
+argp.add_argument(
+    '--bq_result_table',
+    required=True,
+    default=None,
+    type=str,
+    help='Bigquery "dataset.table" to upload results to.')
+argp.add_argument(
+    '--file_to_upload',
+    default='scenario_result.json',
+    type=str,
+    help='Report file to upload.')
+argp.add_argument(
+    '--file_format',
+    choices=['scenario_result', 'netperf_latency_csv'],
+    default='scenario_result',
+    help='Format of the file to upload.')
 
 args = argp.parse_args()
 
 dataset_id, table_id = args.bq_result_table.split('.', 2)
 
 if args.file_format == 'netperf_latency_csv':
-  _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id, args.file_to_upload)
+    _upload_netperf_latency_csv_to_bigquery(dataset_id, table_id,
+                                            args.file_to_upload)
 else:
-  _upload_scenario_result_to_bigquery(dataset_id, table_id, args.file_to_upload)
+    _upload_scenario_result_to_bigquery(dataset_id, table_id,
+                                        args.file_to_upload)
 print('Successfully uploaded %s to BigQuery.\n' % args.file_to_upload)
diff --git a/tools/run_tests/performance/massage_qps_stats.py b/tools/run_tests/performance/massage_qps_stats.py
index 48c5758..790202c 100644
--- a/tools/run_tests/performance/massage_qps_stats.py
+++ b/tools/run_tests/performance/massage_qps_stats.py
@@ -15,182 +15,455 @@
 # Autogenerated by tools/codegen/core/gen_stats_data.py
 
 import massage_qps_stats_helpers
+
+
 def massage_qps_stats(scenario_result):
-  for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
-    if "coreStats" not in stats: return
-    core_stats = stats["coreStats"]
-    del stats["coreStats"]
-    stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "client_calls_created")
-    stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(core_stats, "server_calls_created")
-    stats["core_cqs_created"] = massage_qps_stats_helpers.counter(core_stats, "cqs_created")
-    stats["core_client_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_channels_created")
-    stats["core_client_subchannels_created"] = massage_qps_stats_helpers.counter(core_stats, "client_subchannels_created")
-    stats["core_server_channels_created"] = massage_qps_stats_helpers.counter(core_stats, "server_channels_created")
-    stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(core_stats, "syscall_poll")
-    stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(core_stats, "syscall_wait")
-    stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick")
-    stats["core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_without_poller")
-    stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kicked_again")
-    stats["core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_fd")
-    stats["core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_wakeup_cv")
-    stats["core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(core_stats, "pollset_kick_own_thread")
-    stats["core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(core_stats, "histogram_slow_lookups")
-    stats["core_syscall_write"] = massage_qps_stats_helpers.counter(core_stats, "syscall_write")
-    stats["core_syscall_read"] = massage_qps_stats_helpers.counter(core_stats, "syscall_read")
-    stats["core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_pollers_created")
-    stats["core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(core_stats, "tcp_backup_poller_polls")
-    stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_batches")
-    stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_cancel")
-    stats["core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_initial_metadata")
-    stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_message")
-    stats["core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_send_trailing_metadata")
-    stats["core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_initial_metadata")
-    stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_message")
-    stats["core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_op_recv_trailing_metadata")
-    stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_settings_writes")
-    stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_pings_sent")
-    stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_begun")
-    stats["core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_offloaded")
-    stats["core_http2_writes_continued"] = massage_qps_stats_helpers.counter(core_stats, "http2_writes_continued")
-    stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(core_stats, "http2_partial_writes")
-    stats["core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_initial_write")
-    stats["core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_start_new_stream")
-    stats["core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_message")
-    stats["core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_initial_metadata")
-    stats["core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_trailing_metadata")
-    stats["core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_retry_send_ping")
-    stats["core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_continue_pings")
-    stats["core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_goaway_sent")
-    stats["core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_rst_stream")
-    stats["core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_close_from_api")
-    stats["core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_stream_flow_control")
-    stats["core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control")
-    stats["core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_send_settings")
-    stats["core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
-    stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
-    stats["core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_flow_control_unstalled_by_update")
-    stats["core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_application_ping")
-    stats["core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_keepalive_ping")
-    stats["core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_transport_flow_control_unstalled")
-    stats["core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_ping_response")
-    stats["core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(core_stats, "http2_initiate_write_due_to_force_rst_stream")
-    stats["core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(core_stats, "http2_spurious_writes_begun")
-    stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_indexed")
-    stats["core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx")
-    stats["core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_incidx_v")
-    stats["core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx")
-    stats["core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_notidx_v")
-    stats["core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx")
-    stats["core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_lithdr_nvridx_v")
-    stats["core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_uncompressed")
-    stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_huffman")
-    stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary")
-    stats["core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_recv_binary_base64")
-    stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_indexed")
-    stats["core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx")
-    stats["core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_incidx_v")
-    stats["core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx")
-    stats["core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_notidx_v")
-    stats["core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx")
-    stats["core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_lithdr_nvridx_v")
-    stats["core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_uncompressed")
-    stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_huffman")
-    stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary")
-    stats["core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(core_stats, "hpack_send_binary_base64")
-    stats["core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_initiated")
-    stats["core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_items")
-    stats["core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_scheduled_final_items")
-    stats["core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(core_stats, "combiner_locks_offloaded")
-    stats["core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_locks_initiated")
-    stats["core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_locks_scheduled_items")
-    stats["core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_set_notify_on_cancel")
-    stats["core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(core_stats, "call_combiner_cancelled")
-    stats["core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_short_items")
-    stats["core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_long_items")
-    stats["core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(core_stats, "executor_scheduled_to_self")
-    stats["core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(core_stats, "executor_wakeup_initiated")
-    stats["core_executor_queue_drained"] = massage_qps_stats_helpers.counter(core_stats, "executor_queue_drained")
-    stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(core_stats, "executor_push_retries")
-    stats["core_server_requested_calls"] = massage_qps_stats_helpers.counter(core_stats, "server_requested_calls")
-    stats["core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(core_stats, "server_slowpath_requests_queued")
-    stats["core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_trylock_failures")
-    stats["core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_trylock_successes")
-    stats["core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(core_stats, "cq_ev_queue_transient_pop_failures")
-    h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
-    stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_call_initial_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "poll_events_returned")
-    stats["core_poll_events_returned"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_poll_events_returned_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
-    stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_write_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_iov_size")
-    stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_write_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
-    stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_read_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
-    stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_read_offer_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer_iov_size")
-    stats["core_tcp_read_offer_iov_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_tcp_read_offer_iov_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_size")
-    stats["core_http2_send_message_size"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_message_size_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_initial_metadata_per_write")
-    stats["core_http2_send_initial_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_message_per_write")
-    stats["core_http2_send_message_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_message_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_trailing_metadata_per_write")
-    stats["core_http2_send_trailing_metadata_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "http2_send_flowctl_per_write")
-    stats["core_http2_send_flowctl_per_write"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_http2_send_flowctl_per_write_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
-    h = massage_qps_stats_helpers.histogram(core_stats, "server_cqs_checked")
-    stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
-    stats["core_server_cqs_checked_bkts"] = ",".join("%f" % x for x in h.boundaries)
-    stats["core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(h.buckets, 50, h.boundaries)
-    stats["core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(h.buckets, 95, h.boundaries)
-    stats["core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(h.buckets, 99, h.boundaries)
+    for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
+        if "coreStats" not in stats: return
+        core_stats = stats["coreStats"]
+        del stats["coreStats"]
+        stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
+            core_stats, "client_calls_created")
+        stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
+            core_stats, "server_calls_created")
+        stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
+            core_stats, "cqs_created")
+        stats[
+            "core_client_channels_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "client_channels_created")
+        stats[
+            "core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "client_subchannels_created")
+        stats[
+            "core_server_channels_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "server_channels_created")
+        stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_poll")
+        stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_wait")
+        stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
+            core_stats, "pollset_kick")
+        stats[
+            "core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kicked_without_poller")
+        stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
+            core_stats, "pollset_kicked_again")
+        stats[
+            "core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kick_wakeup_fd")
+        stats[
+            "core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kick_wakeup_cv")
+        stats[
+            "core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
+                core_stats, "pollset_kick_own_thread")
+        stats[
+            "core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
+                core_stats, "histogram_slow_lookups")
+        stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_write")
+        stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
+            core_stats, "syscall_read")
+        stats[
+            "core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
+                core_stats, "tcp_backup_pollers_created")
+        stats[
+            "core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
+                core_stats, "tcp_backup_poller_polls")
+        stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_batches")
+        stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_cancel")
+        stats[
+            "core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_send_initial_metadata")
+        stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_send_message")
+        stats[
+            "core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_send_trailing_metadata")
+        stats[
+            "core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_recv_initial_metadata")
+        stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_op_recv_message")
+        stats[
+            "core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_op_recv_trailing_metadata")
+        stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_settings_writes")
+        stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_pings_sent")
+        stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_writes_begun")
+        stats[
+            "core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_writes_offloaded")
+        stats[
+            "core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_writes_continued")
+        stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
+            core_stats, "http2_partial_writes")
+        stats[
+            "core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_initial_write")
+        stats[
+            "core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_start_new_stream")
+        stats[
+            "core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_send_message")
+        stats[
+            "core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_send_initial_metadata")
+        stats[
+            "core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_send_trailing_metadata")
+        stats[
+            "core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_retry_send_ping")
+        stats[
+            "core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_continue_pings")
+        stats[
+            "core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_goaway_sent")
+        stats[
+            "core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_rst_stream")
+        stats[
+            "core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_close_from_api")
+        stats[
+            "core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_stream_flow_control")
+        stats[
+            "core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_transport_flow_control")
+        stats[
+            "core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_send_settings")
+        stats[
+            "core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
+        stats[
+            "core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_flow_control_unstalled_by_setting")
+        stats[
+            "core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_flow_control_unstalled_by_update")
+        stats[
+            "core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_application_ping")
+        stats[
+            "core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_keepalive_ping")
+        stats[
+            "core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
+                core_stats,
+                "http2_initiate_write_due_to_transport_flow_control_unstalled")
+        stats[
+            "core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_ping_response")
+        stats[
+            "core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_initiate_write_due_to_force_rst_stream")
+        stats[
+            "core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
+                core_stats, "http2_spurious_writes_begun")
+        stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_recv_indexed")
+        stats[
+            "core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_incidx")
+        stats[
+            "core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_incidx_v")
+        stats[
+            "core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_notidx")
+        stats[
+            "core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_notidx_v")
+        stats[
+            "core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_nvridx")
+        stats[
+            "core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_lithdr_nvridx_v")
+        stats[
+            "core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_uncompressed")
+        stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_recv_huffman")
+        stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_recv_binary")
+        stats[
+            "core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_recv_binary_base64")
+        stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_send_indexed")
+        stats[
+            "core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_incidx")
+        stats[
+            "core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_incidx_v")
+        stats[
+            "core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_notidx")
+        stats[
+            "core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_notidx_v")
+        stats[
+            "core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_nvridx")
+        stats[
+            "core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_lithdr_nvridx_v")
+        stats[
+            "core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_uncompressed")
+        stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_send_huffman")
+        stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
+            core_stats, "hpack_send_binary")
+        stats[
+            "core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
+                core_stats, "hpack_send_binary_base64")
+        stats[
+            "core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_initiated")
+        stats[
+            "core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_scheduled_items")
+        stats[
+            "core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_scheduled_final_items")
+        stats[
+            "core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
+                core_stats, "combiner_locks_offloaded")
+        stats[
+            "core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_locks_initiated")
+        stats[
+            "core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_locks_scheduled_items")
+        stats[
+            "core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_set_notify_on_cancel")
+        stats[
+            "core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
+                core_stats, "call_combiner_cancelled")
+        stats[
+            "core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_scheduled_short_items")
+        stats[
+            "core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_scheduled_long_items")
+        stats[
+            "core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_scheduled_to_self")
+        stats[
+            "core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_wakeup_initiated")
+        stats[
+            "core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
+                core_stats, "executor_queue_drained")
+        stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
+            core_stats, "executor_push_retries")
+        stats[
+            "core_server_requested_calls"] = massage_qps_stats_helpers.counter(
+                core_stats, "server_requested_calls")
+        stats[
+            "core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
+                core_stats, "server_slowpath_requests_queued")
+        stats[
+            "core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
+                core_stats, "cq_ev_queue_trylock_failures")
+        stats[
+            "core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
+                core_stats, "cq_ev_queue_trylock_successes")
+        stats[
+            "core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
+                core_stats, "cq_ev_queue_transient_pop_failures")
+        h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
+        stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_call_initial_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "poll_events_returned")
+        stats["core_poll_events_returned"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_poll_events_returned_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
+        stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_write_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 50, h.boundaries)
+        stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 95, h.boundaries)
+        stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "tcp_write_iov_size")
+        stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_write_iov_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
+        stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_read_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 50, h.boundaries)
+        stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 95, h.boundaries)
+        stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
+        stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_tcp_read_offer_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 50, h.boundaries)
+        stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 95, h.boundaries)
+        stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
+            h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "tcp_read_offer_iov_size")
+        stats["core_tcp_read_offer_iov_size"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "http2_send_message_size")
+        stats["core_http2_send_message_size"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_message_size_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(
+            core_stats, "http2_send_initial_metadata_per_write")
+        stats["core_http2_send_initial_metadata_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "http2_send_message_per_write")
+        stats["core_http2_send_message_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_message_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(
+            core_stats, "http2_send_trailing_metadata_per_write")
+        stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "http2_send_flowctl_per_write")
+        stats["core_http2_send_flowctl_per_write"] = ",".join(
+            "%f" % x for x in h.buckets)
+        stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
+        h = massage_qps_stats_helpers.histogram(core_stats,
+                                                "server_cqs_checked")
+        stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
+        stats["core_server_cqs_checked_bkts"] = ",".join(
+            "%f" % x for x in h.boundaries)
+        stats[
+            "core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 50, h.boundaries)
+        stats[
+            "core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 95, h.boundaries)
+        stats[
+            "core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
+                h.buckets, 99, h.boundaries)
diff --git a/tools/run_tests/performance/massage_qps_stats_helpers.py b/tools/run_tests/performance/massage_qps_stats_helpers.py
index a2fe4ae..108451c 100644
--- a/tools/run_tests/performance/massage_qps_stats_helpers.py
+++ b/tools/run_tests/performance/massage_qps_stats_helpers.py
@@ -14,44 +14,49 @@
 
 import collections
 
+
 def _threshold_for_count_below(buckets, boundaries, count_below):
-  count_so_far = 0
-  for lower_idx in range(0, len(buckets)):
-    count_so_far += buckets[lower_idx]
-    if count_so_far >= count_below:
-      break
-  if count_so_far == count_below:
-    # this bucket hits the threshold exactly... we should be midway through
-    # any run of zero values following the bucket
-    for upper_idx in range(lower_idx + 1, len(buckets)):
-      if buckets[upper_idx] != 0:
-        break
-    return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
-  else:
-    # treat values as uniform throughout the bucket, and find where this value
-    # should lie
-    lower_bound = boundaries[lower_idx]
-    upper_bound = boundaries[lower_idx + 1]
-    return (upper_bound -
-           (upper_bound - lower_bound) * (count_so_far - count_below) /
-               float(buckets[lower_idx]))
+    count_so_far = 0
+    for lower_idx in range(0, len(buckets)):
+        count_so_far += buckets[lower_idx]
+        if count_so_far >= count_below:
+            break
+    if count_so_far == count_below:
+        # this bucket hits the threshold exactly... we should be midway through
+        # any run of zero values following the bucket
+        for upper_idx in range(lower_idx + 1, len(buckets)):
+            if buckets[upper_idx] != 0:
+                break
+        return (boundaries[lower_idx] + boundaries[upper_idx]) / 2.0
+    else:
+        # treat values as uniform throughout the bucket, and find where this value
+        # should lie
+        lower_bound = boundaries[lower_idx]
+        upper_bound = boundaries[lower_idx + 1]
+        return (upper_bound - (upper_bound - lower_bound) *
+                (count_so_far - count_below) / float(buckets[lower_idx]))
+
 
 def percentile(buckets, pctl, boundaries):
-  return _threshold_for_count_below(
-      buckets, boundaries, sum(buckets) * pctl / 100.0)
+    return _threshold_for_count_below(buckets, boundaries,
+                                      sum(buckets) * pctl / 100.0)
+
 
 def counter(core_stats, name):
-  for stat in core_stats['metrics']:
-    if stat['name'] == name:
-      return int(stat.get('count', 0))
+    for stat in core_stats['metrics']:
+        if stat['name'] == name:
+            return int(stat.get('count', 0))
+
 
 Histogram = collections.namedtuple('Histogram', 'buckets boundaries')
+
+
 def histogram(core_stats, name):
-  for stat in core_stats['metrics']:
-    if stat['name'] == name:
-      buckets = []
-      boundaries = []
-      for b in stat['histogram']['buckets']:
-        buckets.append(int(b.get('count', 0)))
-        boundaries.append(int(b.get('start', 0)))
-  return Histogram(buckets=buckets, boundaries=boundaries)
+    for stat in core_stats['metrics']:
+        if stat['name'] == name:
+            buckets = []
+            boundaries = []
+            for b in stat['histogram']['buckets']:
+                buckets.append(int(b.get('count', 0)))
+                boundaries.append(int(b.get('start', 0)))
+    return Histogram(buckets=buckets, boundaries=boundaries)
diff --git a/tools/run_tests/performance/patch_scenario_results_schema.py b/tools/run_tests/performance/patch_scenario_results_schema.py
index 81ba538..2a2aadc 100755
--- a/tools/run_tests/performance/patch_scenario_results_schema.py
+++ b/tools/run_tests/performance/patch_scenario_results_schema.py
@@ -25,27 +25,32 @@
 import time
 import uuid
 
-
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
+_PROJECT_ID = 'grpc-testing'
 
-_PROJECT_ID='grpc-testing'
 
 def _patch_results_table(dataset_id, table_id):
-  bq = big_query_utils.create_big_query()
-  with open(os.path.dirname(__file__) + '/scenario_result_schema.json', 'r') as f:
-    table_schema = json.loads(f.read())
-  desc = 'Results of performance benchmarks.'
-  return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id,
-                                     table_id, table_schema)
+    bq = big_query_utils.create_big_query()
+    with open(os.path.dirname(__file__) + '/scenario_result_schema.json',
+              'r') as f:
+        table_schema = json.loads(f.read())
+    desc = 'Results of performance benchmarks.'
+    return big_query_utils.patch_table(bq, _PROJECT_ID, dataset_id, table_id,
+                                       table_schema)
 
 
-argp = argparse.ArgumentParser(description='Patch schema of scenario results table.')
-argp.add_argument('--bq_result_table', required=True, default=None, type=str,
-                  help='Bigquery "dataset.table" to patch.')
+argp = argparse.ArgumentParser(
+    description='Patch schema of scenario results table.')
+argp.add_argument(
+    '--bq_result_table',
+    required=True,
+    default=None,
+    type=str,
+    help='Bigquery "dataset.table" to patch.')
 
 args = argp.parse_args()
 
diff --git a/tools/run_tests/performance/scenario_config.py b/tools/run_tests/performance/scenario_config.py
index cafac3d..f057531 100644
--- a/tools/run_tests/performance/scenario_config.py
+++ b/tools/run_tests/performance/scenario_config.py
@@ -16,66 +16,64 @@
 
 import math
 
-WARMUP_SECONDS=5
-JAVA_WARMUP_SECONDS=15  # Java needs more warmup time for JIT to kick in.
-BENCHMARK_SECONDS=30
+WARMUP_SECONDS = 5
+JAVA_WARMUP_SECONDS = 15  # Java needs more warmup time for JIT to kick in.
+BENCHMARK_SECONDS = 30
 
-SMOKETEST='smoketest'
-SCALABLE='scalable'
-INPROC='inproc'
-SWEEP='sweep'
-DEFAULT_CATEGORIES=[SCALABLE, SMOKETEST]
+SMOKETEST = 'smoketest'
+SCALABLE = 'scalable'
+INPROC = 'inproc'
+SWEEP = 'sweep'
+DEFAULT_CATEGORIES = [SCALABLE, SMOKETEST]
 
-SECURE_SECARGS = {'use_test_ca': True,
-                  'server_host_override': 'foo.test.google.fr'}
+SECURE_SECARGS = {
+    'use_test_ca': True,
+    'server_host_override': 'foo.test.google.fr'
+}
 
 HISTOGRAM_PARAMS = {
-  'resolution': 0.01,
-  'max_possible': 60e9,
+    'resolution': 0.01,
+    'max_possible': 60e9,
 }
 
 # target number of RPCs outstanding on across all client channels in
 # non-ping-pong tests (since we can only specify per-channel numbers, the
 # actual target will be slightly higher)
-OUTSTANDING_REQUESTS={
-    'async': 6400,
-    'async-limited': 800,
-    'sync': 1000
-}
+OUTSTANDING_REQUESTS = {'async': 6400, 'async-limited': 800, 'sync': 1000}
 
 # wide is the number of client channels in multi-channel tests (1 otherwise)
-WIDE=64
+WIDE = 64
 
 
 def _get_secargs(is_secure):
-  if is_secure:
-    return SECURE_SECARGS
-  else:
-    return None
+    if is_secure:
+        return SECURE_SECARGS
+    else:
+        return None
 
 
 def remove_nonproto_fields(scenario):
-  """Remove special-purpose that contains some extra info about the scenario
+    """Remove special-purpose that contains some extra info about the scenario
   but don't belong to the ScenarioConfig protobuf message"""
-  scenario.pop('CATEGORIES', None)
-  scenario.pop('CLIENT_LANGUAGE', None)
-  scenario.pop('SERVER_LANGUAGE', None)
-  scenario.pop('EXCLUDED_POLL_ENGINES', None)
-  return scenario
+    scenario.pop('CATEGORIES', None)
+    scenario.pop('CLIENT_LANGUAGE', None)
+    scenario.pop('SERVER_LANGUAGE', None)
+    scenario.pop('EXCLUDED_POLL_ENGINES', None)
+    return scenario
 
 
 def geometric_progression(start, stop, step):
-  n = start
-  while n < stop:
-    yield int(round(n))
-    n *= step
+    n = start
+    while n < stop:
+        yield int(round(n))
+        n *= step
 
 
 def _payload_type(use_generic_payload, req_size, resp_size):
     r = {}
     sizes = {
-      'req_size': req_size,
-      'resp_size': resp_size,
+        'req_size': req_size,
+        'resp_size': resp_size,
     }
     if use_generic_payload:
         r['bytebuf_params'] = sizes
@@ -83,6 +81,7 @@
         r['simple_params'] = sizes
     return r
 
+
 def _load_params(offered_load):
     r = {}
     if offered_load is None:
@@ -93,21 +92,25 @@
         r['poisson'] = load
     return r
 
-def _add_channel_arg(config, key, value):
-  if 'channel_args' in config:
-    channel_args = config['channel_args']
-  else:
-    channel_args = []
-    config['channel_args'] = channel_args
-  arg = {'name': key}
-  if isinstance(value, int):
-    arg['int_value'] = value
-  else:
-    arg['str_value'] = value
-  channel_args.append(arg)
 
-def _ping_pong_scenario(name, rpc_type,
-                        client_type, server_type,
+def _add_channel_arg(config, key, value):
+    if 'channel_args' in config:
+        channel_args = config['channel_args']
+    else:
+        channel_args = []
+        config['channel_args'] = channel_args
+    arg = {'name': key}
+    if isinstance(value, int):
+        arg['int_value'] = value
+    else:
+        arg['str_value'] = value
+    channel_args.append(arg)
+
+
+def _ping_pong_scenario(name,
+                        rpc_type,
+                        client_type,
+                        server_type,
                         secure=True,
                         use_generic_payload=False,
                         req_size=0,
@@ -128,824 +131,1033 @@
                         excluded_poll_engines=[],
                         minimal_stack=False,
                         offered_load=None):
-  """Creates a basic ping pong scenario."""
-  scenario = {
-    'name': name,
-    'num_servers': 1,
-    'num_clients': 1,
-    'client_config': {
-      'client_type': client_type,
-      'security_params': _get_secargs(secure),
-      'outstanding_rpcs_per_channel': 1,
-      'client_channels': 1,
-      'async_client_threads': 1,
-      'threads_per_cq': client_threads_per_cq,
-      'rpc_type': rpc_type,
-      'histogram_params': HISTOGRAM_PARAMS,
-      'channel_args': [],
-    },
-    'server_config': {
-      'server_type': server_type,
-      'security_params': _get_secargs(secure),
-      'async_server_threads': async_server_threads,
-      'threads_per_cq': server_threads_per_cq,
-      'channel_args': [],
-    },
-    'warmup_seconds': warmup_seconds,
-    'benchmark_seconds': BENCHMARK_SECONDS
-  }
-  if resource_quota_size:
-    scenario['server_config']['resource_quota_size'] = resource_quota_size
-  if use_generic_payload:
-    if server_type != 'ASYNC_GENERIC_SERVER':
-      raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
-    scenario['server_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
+    """Creates a basic ping pong scenario."""
+    scenario = {
+        'name': name,
+        'num_servers': 1,
+        'num_clients': 1,
+        'client_config': {
+            'client_type': client_type,
+            'security_params': _get_secargs(secure),
+            'outstanding_rpcs_per_channel': 1,
+            'client_channels': 1,
+            'async_client_threads': 1,
+            'threads_per_cq': client_threads_per_cq,
+            'rpc_type': rpc_type,
+            'histogram_params': HISTOGRAM_PARAMS,
+            'channel_args': [],
+        },
+        'server_config': {
+            'server_type': server_type,
+            'security_params': _get_secargs(secure),
+            'async_server_threads': async_server_threads,
+            'threads_per_cq': server_threads_per_cq,
+            'channel_args': [],
+        },
+        'warmup_seconds': warmup_seconds,
+        'benchmark_seconds': BENCHMARK_SECONDS
+    }
+    if resource_quota_size:
+        scenario['server_config']['resource_quota_size'] = resource_quota_size
+    if use_generic_payload:
+        if server_type != 'ASYNC_GENERIC_SERVER':
+            raise Exception('Use ASYNC_GENERIC_SERVER for generic payload.')
+        scenario['server_config']['payload_config'] = _payload_type(
+            use_generic_payload, req_size, resp_size)
 
-  scenario['client_config']['payload_config'] = _payload_type(use_generic_payload, req_size, resp_size)
+    scenario['client_config']['payload_config'] = _payload_type(
+        use_generic_payload, req_size, resp_size)
 
-  # Optimization target of 'throughput' does not work well with epoll1 polling
-  # engine. Use the default value of 'blend'
-  optimization_target = 'throughput'
+    # Optimization target of 'throughput' does not work well with epoll1 polling
+    # engine. Use the default value of 'blend'
+    optimization_target = 'throughput'
 
-  if unconstrained_client:
-    outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[unconstrained_client]
-    # clamp buffer usage to something reasonable (16 gig for now)
-    MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
-    if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
-        outstanding_calls = max(1, MAX_MEMORY_USE / max(req_size, resp_size))
-    wide = channels if channels is not None else WIDE
-    deep = int(math.ceil(1.0 * outstanding_calls / wide))
+    if unconstrained_client:
+        outstanding_calls = outstanding if outstanding is not None else OUTSTANDING_REQUESTS[
+            unconstrained_client]
+        # clamp buffer usage to something reasonable (16 gig for now)
+        MAX_MEMORY_USE = 16 * 1024 * 1024 * 1024
+        if outstanding_calls * max(req_size, resp_size) > MAX_MEMORY_USE:
+            outstanding_calls = max(1,
+                                    MAX_MEMORY_USE / max(req_size, resp_size))
+        wide = channels if channels is not None else WIDE
+        deep = int(math.ceil(1.0 * outstanding_calls / wide))
 
-    scenario['num_clients'] = num_clients if num_clients is not None else 0  # use as many clients as available.
-    scenario['client_config']['outstanding_rpcs_per_channel'] = deep
-    scenario['client_config']['client_channels'] = wide
-    scenario['client_config']['async_client_threads'] = 0
-    if offered_load is not None:
+        scenario[
+            'num_clients'] = num_clients if num_clients is not None else 0  # use as many clients as available.
+        scenario['client_config']['outstanding_rpcs_per_channel'] = deep
+        scenario['client_config']['client_channels'] = wide
+        scenario['client_config']['async_client_threads'] = 0
+        if offered_load is not None:
+            optimization_target = 'latency'
+    else:
+        scenario['client_config']['outstanding_rpcs_per_channel'] = 1
+        scenario['client_config']['client_channels'] = 1
+        scenario['client_config']['async_client_threads'] = 1
         optimization_target = 'latency'
-  else:
-    scenario['client_config']['outstanding_rpcs_per_channel'] = 1
-    scenario['client_config']['client_channels'] = 1
-    scenario['client_config']['async_client_threads'] = 1
-    optimization_target = 'latency'
 
-  scenario['client_config']['load_params'] = _load_params(offered_load)
+    scenario['client_config']['load_params'] = _load_params(offered_load)
 
-  optimization_channel_arg = {
-    'name': 'grpc.optimization_target',
-    'str_value': optimization_target
-  }
-  scenario['client_config']['channel_args'].append(optimization_channel_arg)
-  scenario['server_config']['channel_args'].append(optimization_channel_arg)
+    optimization_channel_arg = {
+        'name': 'grpc.optimization_target',
+        'str_value': optimization_target
+    }
+    scenario['client_config']['channel_args'].append(optimization_channel_arg)
+    scenario['server_config']['channel_args'].append(optimization_channel_arg)
 
-  if minimal_stack:
-    _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
-    _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
+    if minimal_stack:
+        _add_channel_arg(scenario['client_config'], 'grpc.minimal_stack', 1)
+        _add_channel_arg(scenario['server_config'], 'grpc.minimal_stack', 1)
 
-  if messages_per_stream:
-    scenario['client_config']['messages_per_stream'] = messages_per_stream
-  if client_language:
-    # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
-    scenario['CLIENT_LANGUAGE'] = client_language
-  if server_language:
-    # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
-    scenario['SERVER_LANGUAGE'] = server_language
-  if categories:
-    scenario['CATEGORIES'] = categories
-  if len(excluded_poll_engines):
-    # The polling engines for which this scenario is excluded
-    scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
-  return scenario
+    if messages_per_stream:
+        scenario['client_config']['messages_per_stream'] = messages_per_stream
+    if client_language:
+        # the CLIENT_LANGUAGE field is recognized by run_performance_tests.py
+        scenario['CLIENT_LANGUAGE'] = client_language
+    if server_language:
+        # the SERVER_LANGUAGE field is recognized by run_performance_tests.py
+        scenario['SERVER_LANGUAGE'] = server_language
+    if categories:
+        scenario['CATEGORIES'] = categories
+    if len(excluded_poll_engines):
+        # The polling engines for which this scenario is excluded
+        scenario['EXCLUDED_POLL_ENGINES'] = excluded_poll_engines
+    return scenario
 
 
 class CXXLanguage:
 
-  def __init__(self):
-    self.safename = 'cxx'
+    def __init__(self):
+        self.safename = 'cxx'
 
-  def worker_cmdline(self):
-    return ['bins/opt/qps_worker']
+    def worker_cmdline(self):
+        return ['bins/opt/qps_worker']
 
-  def worker_port_offset(self):
-    return 0
+    def worker_port_offset(self):
+        return 0
 
-  def scenarios(self):
-    # TODO(ctiller): add 70% load latency test
-    yield _ping_pong_scenario(
-      'cpp_protobuf_async_unary_1channel_100rpcs_1MB', rpc_type='UNARY',
-      client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-      req_size=1024*1024, resp_size=1024*1024,
-      unconstrained_client='async', outstanding=100, channels=1,
-      num_clients=1,
-      secure=False,
-      categories=[SMOKETEST] + [INPROC] + [SCALABLE])
-
-    yield _ping_pong_scenario(
-      'cpp_protobuf_async_streaming_from_client_1channel_1MB', rpc_type='STREAMING_FROM_CLIENT',
-      client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-      req_size=1024*1024, resp_size=1024*1024,
-      unconstrained_client='async', outstanding=1, channels=1,
-      num_clients=1,
-      secure=False,
-      categories=[SMOKETEST] + [INPROC] + [SCALABLE])
-
-    yield _ping_pong_scenario(
-       'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
-       rpc_type='UNARY', client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-       req_size=300, resp_size=50,
-       unconstrained_client='async', outstanding=30000, channels=300,
-       offered_load=37500, secure=False,
-       async_server_threads=16, server_threads_per_cq=1,
-       categories=[SMOKETEST] + [SCALABLE])
-
-    for secure in [True, False]:
-      secstr = 'secure' if secure else 'insecure'
-      smoketest_categories = ([SMOKETEST] if secure else [INPROC]) + [SCALABLE]
-
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_ping_pong_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          use_generic_payload=True, async_server_threads=1,
-          secure=secure,
-          categories=smoketest_categories)
-
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE])
-
-      for mps in geometric_progression(1, 20, 10):
+    def scenarios(self):
+        # TODO(ctiller): add 70% load latency test
         yield _ping_pong_scenario(
-            'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
-            rpc_type='STREAMING',
+            'cpp_protobuf_async_unary_1channel_100rpcs_1MB',
+            rpc_type='UNARY',
             client_type='ASYNC_CLIENT',
-            server_type='ASYNC_GENERIC_SERVER',
-            unconstrained_client='async', use_generic_payload=True,
-            secure=secure, messages_per_stream=mps,
-            minimal_stack=not secure,
-            categories=smoketest_categories+[SCALABLE])
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            unconstrained_client='async',
+            outstanding=100,
+            channels=1,
+            num_clients=1,
+            secure=False,
+            categories=[SMOKETEST] + [INPROC] + [SCALABLE])
 
-      for mps in geometric_progression(1, 200, math.sqrt(10)):
         yield _ping_pong_scenario(
-            'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' % (mps, secstr),
-            rpc_type='STREAMING',
+            'cpp_protobuf_async_streaming_from_client_1channel_1MB',
+            rpc_type='STREAMING_FROM_CLIENT',
             client_type='ASYNC_CLIENT',
-            server_type='ASYNC_GENERIC_SERVER',
-            unconstrained_client='async', use_generic_payload=True,
-            secure=secure, messages_per_stream=mps,
-            minimal_stack=not secure,
-            categories=[SWEEP])
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            unconstrained_client='async',
+            outstanding=1,
+            channels=1,
+            num_clients=1,
+            secure=False,
+            categories=[SMOKETEST] + [INPROC] + [SCALABLE])
 
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
-          rpc_type='STREAMING',
-          req_size=1024*1024,
-          resp_size=1024*1024,
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE],
-          channels=1, outstanding=100)
+        yield _ping_pong_scenario(
+            'cpp_protobuf_async_unary_75Kqps_600channel_60Krpcs_300Breq_50Bresp',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            req_size=300,
+            resp_size=50,
+            unconstrained_client='async',
+            outstanding=30000,
+            channels=300,
+            offered_load=37500,
+            secure=False,
+            async_server_threads=16,
+            server_threads_per_cq=1,
+            categories=[SMOKETEST] + [SCALABLE])
 
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' % secstr,
-          rpc_type='STREAMING',
-          req_size=64*1024,
-          resp_size=64*1024,
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE])
+        for secure in [True, False]:
+            secstr = 'secure' if secure else 'insecure'
+            smoketest_categories = ([SMOKETEST]
+                                    if secure else [INPROC]) + [SCALABLE]
 
-      # TODO(https://github.com/grpc/grpc/issues/11500) Re-enable this test
-      #yield _ping_pong_scenario(
-      #    'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
-      #    rpc_type='STREAMING',
-      #    client_type='ASYNC_CLIENT',
-      #    server_type='ASYNC_GENERIC_SERVER',
-      #    unconstrained_client='async-limited', use_generic_payload=True,
-      #    secure=secure,
-      #    client_threads_per_cq=1000000, server_threads_per_cq=1000000,
-      #    categories=smoketest_categories+[SCALABLE])
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          client_threads_per_cq=2, server_threads_per_cq=2,
-          categories=smoketest_categories+[SCALABLE])
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
 
-      #yield _ping_pong_scenario(
-      #    'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' % secstr,
-      #    rpc_type='STREAMING',
-      #    client_type='ASYNC_CLIENT',
-      #    server_type='ASYNC_SERVER',
-      #    unconstrained_client='async-limited',
-      #    secure=secure,
-      #    client_threads_per_cq=1000000, server_threads_per_cq=1000000,
-      #    categories=smoketest_categories+[SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          client_threads_per_cq=2, server_threads_per_cq=2,
-          categories=smoketest_categories+[SCALABLE])
-
-      #yield _ping_pong_scenario(
-      #    'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
-      #    rpc_type='UNARY',
-      #    client_type='ASYNC_CLIENT',
-      #    server_type='ASYNC_SERVER',
-      #    unconstrained_client='async-limited',
-      #    secure=secure,
-      #    client_threads_per_cq=1000000, server_threads_per_cq=1000000,
-      #    categories=smoketest_categories+[SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' % secstr,
-          rpc_type='UNARY',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          client_threads_per_cq=2, server_threads_per_cq=2,
-          categories=smoketest_categories+[SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async-limited', use_generic_payload=True,
-          async_server_threads=1,
-          minimal_stack=not secure,
-          secure=secure)
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s' %
-          (secstr),
-          rpc_type='UNARY',
-          client_type='ASYNC_CLIENT',
-          server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories + [SCALABLE],
-          excluded_poll_engines = ['poll-cv'])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s' %
-          (secstr),
-          rpc_type='UNARY',
-          client_type='ASYNC_CLIENT',
-          server_type='ASYNC_SERVER',
-          channels=1,
-          outstanding=64,
-          req_size=128,
-          resp_size=8*1024*1024,
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories + [SCALABLE])
-
-      yield _ping_pong_scenario(
-          'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s' % secstr,
-          rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT',
-          server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          minimal_stack=not secure,
-          categories=smoketest_categories+[SCALABLE],
-          excluded_poll_engines = ['poll-cv'])
-
-      yield _ping_pong_scenario(
-        'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr, rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        secure=secure,
-        minimal_stack=not secure,
-        categories=smoketest_categories + [SCALABLE])
-
-      for rpc_type in ['unary', 'streaming', 'streaming_from_client', 'streaming_from_server']:
-        for synchronicity in ['sync', 'async']:
-          yield _ping_pong_scenario(
-              'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity, rpc_type, secstr),
-              rpc_type=rpc_type.upper(),
-              client_type='%s_CLIENT' % synchronicity.upper(),
-              server_type='%s_SERVER' % synchronicity.upper(),
-              async_server_threads=1,
-              minimal_stack=not secure,
-              secure=secure)
-
-          for size in geometric_progression(1, 1024*1024*1024+1, 8):
-              yield _ping_pong_scenario(
-                  'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' % (synchronicity, rpc_type, secstr, size),
-                  rpc_type=rpc_type.upper(),
-                  req_size=size,
-                  resp_size=size,
-                  client_type='%s_CLIENT' % synchronicity.upper(),
-                  server_type='%s_SERVER' % synchronicity.upper(),
-                  unconstrained_client=synchronicity,
-                  secure=secure,
-                  minimal_stack=not secure,
-                  categories=[SWEEP])
-
-          yield _ping_pong_scenario(
-              'cpp_protobuf_%s_%s_qps_unconstrained_%s' % (synchronicity, rpc_type, secstr),
-              rpc_type=rpc_type.upper(),
-              client_type='%s_CLIENT' % synchronicity.upper(),
-              server_type='%s_SERVER' % synchronicity.upper(),
-              unconstrained_client=synchronicity,
-              secure=secure,
-              minimal_stack=not secure,
-              server_threads_per_cq=3,
-              client_threads_per_cq=3,
-              categories=smoketest_categories+[SCALABLE])
-
-          # TODO(vjpai): Re-enable this test. It has a lot of timeouts
-          # and hasn't yet been conclusively identified as a test failure
-          # or race in the library
-          # yield _ping_pong_scenario(
-          #     'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
-          #     rpc_type=rpc_type.upper(),
-          #     client_type='%s_CLIENT' % synchronicity.upper(),
-          #     server_type='%s_SERVER' % synchronicity.upper(),
-          #     unconstrained_client=synchronicity,
-          #     secure=secure,
-          #     categories=smoketest_categories+[SCALABLE],
-          #     resource_quota_size=500*1024)
-
-          if rpc_type == 'streaming':
             for mps in geometric_progression(1, 20, 10):
-              yield _ping_pong_scenario(
-                  'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
-                  rpc_type=rpc_type.upper(),
-                  client_type='%s_CLIENT' % synchronicity.upper(),
-                  server_type='%s_SERVER' % synchronicity.upper(),
-                  unconstrained_client=synchronicity,
-                  secure=secure, messages_per_stream=mps,
-                  minimal_stack=not secure,
-                  categories=smoketest_categories+[SCALABLE])
+                yield _ping_pong_scenario(
+                    'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+                    (mps, secstr),
+                    rpc_type='STREAMING',
+                    client_type='ASYNC_CLIENT',
+                    server_type='ASYNC_GENERIC_SERVER',
+                    unconstrained_client='async',
+                    use_generic_payload=True,
+                    secure=secure,
+                    messages_per_stream=mps,
+                    minimal_stack=not secure,
+                    categories=smoketest_categories + [SCALABLE])
 
             for mps in geometric_progression(1, 200, math.sqrt(10)):
-              yield _ping_pong_scenario(
-                  'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s' % (synchronicity, rpc_type, mps, secstr),
-                  rpc_type=rpc_type.upper(),
-                  client_type='%s_CLIENT' % synchronicity.upper(),
-                  server_type='%s_SERVER' % synchronicity.upper(),
-                  unconstrained_client=synchronicity,
-                  secure=secure, messages_per_stream=mps,
-                  minimal_stack=not secure,
-                  categories=[SWEEP])
-
-          for channels in geometric_progression(1, 20000, math.sqrt(10)):
-            for outstanding in geometric_progression(1, 200000, math.sqrt(10)):
-                if synchronicity == 'sync' and outstanding > 1200: continue
-                if outstanding < channels: continue
                 yield _ping_pong_scenario(
-                    'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding' % (synchronicity, rpc_type, secstr, channels, outstanding),
-                    rpc_type=rpc_type.upper(),
-                    client_type='%s_CLIENT' % synchronicity.upper(),
-                    server_type='%s_SERVER' % synchronicity.upper(),
-                    unconstrained_client=synchronicity, secure=secure,
+                    'cpp_generic_async_streaming_qps_unconstrained_%smps_%s' %
+                    (mps, secstr),
+                    rpc_type='STREAMING',
+                    client_type='ASYNC_CLIENT',
+                    server_type='ASYNC_GENERIC_SERVER',
+                    unconstrained_client='async',
+                    use_generic_payload=True,
+                    secure=secure,
+                    messages_per_stream=mps,
                     minimal_stack=not secure,
-                    categories=[SWEEP], channels=channels, outstanding=outstanding)
+                    categories=[SWEEP])
 
-  def __str__(self):
-    return 'c++'
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_1channel_1MBmsg_%s' % secstr,
+                rpc_type='STREAMING',
+                req_size=1024 * 1024,
+                resp_size=1024 * 1024,
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE],
+                channels=1,
+                outstanding=100)
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_64KBmsg_%s' %
+                secstr,
+                rpc_type='STREAMING',
+                req_size=64 * 1024,
+                resp_size=64 * 1024,
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_1cq_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async-limited',
+                use_generic_payload=True,
+                secure=secure,
+                client_threads_per_cq=1000000,
+                server_threads_per_cq=1000000,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_unconstrained_2waysharedcq_%s'
+                % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                client_threads_per_cq=2,
+                server_threads_per_cq=2,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_streaming_qps_unconstrained_1cq_%s' %
+                secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async-limited',
+                secure=secure,
+                client_threads_per_cq=1000000,
+                server_threads_per_cq=1000000,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_streaming_qps_unconstrained_2waysharedcq_%s'
+                % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                client_threads_per_cq=2,
+                server_threads_per_cq=2,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_unary_qps_unconstrained_1cq_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async-limited',
+                secure=secure,
+                client_threads_per_cq=1000000,
+                server_threads_per_cq=1000000,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_unary_qps_unconstrained_2waysharedcq_%s' %
+                secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                client_threads_per_cq=2,
+                server_threads_per_cq=2,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_generic_async_streaming_qps_one_server_core_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async-limited',
+                use_generic_payload=True,
+                async_server_threads=1,
+                minimal_stack=not secure,
+                secure=secure)
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_client_sync_server_unary_qps_unconstrained_%s'
+                % (secstr),
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE],
+                excluded_poll_engines=['poll-cv'])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_client_unary_1channel_64wide_128Breq_8MBresp_%s'
+                % (secstr),
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                channels=1,
+                outstanding=64,
+                req_size=128,
+                resp_size=8 * 1024 * 1024,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_client_sync_server_streaming_qps_unconstrained_%s'
+                % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE],
+                excluded_poll_engines=['poll-cv'])
+
+            yield _ping_pong_scenario(
+                'cpp_protobuf_async_unary_ping_pong_%s_1MB' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                req_size=1024 * 1024,
+                resp_size=1024 * 1024,
+                secure=secure,
+                minimal_stack=not secure,
+                categories=smoketest_categories + [SCALABLE])
+
+            for rpc_type in [
+                    'unary', 'streaming', 'streaming_from_client',
+                    'streaming_from_server'
+            ]:
+                for synchronicity in ['sync', 'async']:
+                    yield _ping_pong_scenario(
+                        'cpp_protobuf_%s_%s_ping_pong_%s' % (synchronicity,
+                                                             rpc_type, secstr),
+                        rpc_type=rpc_type.upper(),
+                        client_type='%s_CLIENT' % synchronicity.upper(),
+                        server_type='%s_SERVER' % synchronicity.upper(),
+                        async_server_threads=1,
+                        minimal_stack=not secure,
+                        secure=secure)
+
+                    for size in geometric_progression(1, 1024 * 1024 * 1024 + 1,
+                                                      8):
+                        yield _ping_pong_scenario(
+                            'cpp_protobuf_%s_%s_qps_unconstrained_%s_%db' %
+                            (synchronicity, rpc_type, secstr, size),
+                            rpc_type=rpc_type.upper(),
+                            req_size=size,
+                            resp_size=size,
+                            client_type='%s_CLIENT' % synchronicity.upper(),
+                            server_type='%s_SERVER' % synchronicity.upper(),
+                            unconstrained_client=synchronicity,
+                            secure=secure,
+                            minimal_stack=not secure,
+                            categories=[SWEEP])
+
+                    yield _ping_pong_scenario(
+                        'cpp_protobuf_%s_%s_qps_unconstrained_%s' %
+                        (synchronicity, rpc_type, secstr),
+                        rpc_type=rpc_type.upper(),
+                        client_type='%s_CLIENT' % synchronicity.upper(),
+                        server_type='%s_SERVER' % synchronicity.upper(),
+                        unconstrained_client=synchronicity,
+                        secure=secure,
+                        minimal_stack=not secure,
+                        server_threads_per_cq=3,
+                        client_threads_per_cq=3,
+                        categories=smoketest_categories + [SCALABLE])
+
+                    # TODO(vjpai): Re-enable this test. It has a lot of timeouts
+                    # and hasn't yet been conclusively identified as a test failure
+                    # or race in the library
+                    # yield _ping_pong_scenario(
+                    #     'cpp_protobuf_%s_%s_qps_unconstrained_%s_500kib_resource_quota' % (synchronicity, rpc_type, secstr),
+                    #     rpc_type=rpc_type.upper(),
+                    #     client_type='%s_CLIENT' % synchronicity.upper(),
+                    #     server_type='%s_SERVER' % synchronicity.upper(),
+                    #     unconstrained_client=synchronicity,
+                    #     secure=secure,
+                    #     categories=smoketest_categories+[SCALABLE],
+                    #     resource_quota_size=500*1024)
+
+                    if rpc_type == 'streaming':
+                        for mps in geometric_progression(1, 20, 10):
+                            yield _ping_pong_scenario(
+                                'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+                                % (synchronicity, rpc_type, mps, secstr),
+                                rpc_type=rpc_type.upper(),
+                                client_type='%s_CLIENT' % synchronicity.upper(),
+                                server_type='%s_SERVER' % synchronicity.upper(),
+                                unconstrained_client=synchronicity,
+                                secure=secure,
+                                messages_per_stream=mps,
+                                minimal_stack=not secure,
+                                categories=smoketest_categories + [SCALABLE])
+
+                        for mps in geometric_progression(1, 200, math.sqrt(10)):
+                            yield _ping_pong_scenario(
+                                'cpp_protobuf_%s_%s_qps_unconstrained_%smps_%s'
+                                % (synchronicity, rpc_type, mps, secstr),
+                                rpc_type=rpc_type.upper(),
+                                client_type='%s_CLIENT' % synchronicity.upper(),
+                                server_type='%s_SERVER' % synchronicity.upper(),
+                                unconstrained_client=synchronicity,
+                                secure=secure,
+                                messages_per_stream=mps,
+                                minimal_stack=not secure,
+                                categories=[SWEEP])
+
+                    for channels in geometric_progression(
+                            1, 20000, math.sqrt(10)):
+                        for outstanding in geometric_progression(
+                                1, 200000, math.sqrt(10)):
+                            if synchronicity == 'sync' and outstanding > 1200:
+                                continue
+                            if outstanding < channels: continue
+                            yield _ping_pong_scenario(
+                                'cpp_protobuf_%s_%s_qps_unconstrained_%s_%d_channels_%d_outstanding'
+                                % (synchronicity, rpc_type, secstr, channels,
+                                   outstanding),
+                                rpc_type=rpc_type.upper(),
+                                client_type='%s_CLIENT' % synchronicity.upper(),
+                                server_type='%s_SERVER' % synchronicity.upper(),
+                                unconstrained_client=synchronicity,
+                                secure=secure,
+                                minimal_stack=not secure,
+                                categories=[SWEEP],
+                                channels=channels,
+                                outstanding=outstanding)
+
+    def __str__(self):
+        return 'c++'
 
 
 class CSharpLanguage:
 
-  def __init__(self):
-    self.safename = str(self)
+    def __init__(self):
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_csharp.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_csharp.sh']
 
-  def worker_port_offset(self):
-    return 100
+    def worker_port_offset(self):
+        return 100
 
-  def scenarios(self):
-    yield _ping_pong_scenario(
-        'csharp_generic_async_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        use_generic_payload=True,
-        categories=[SMOKETEST, SCALABLE])
+    def scenarios(self):
+        yield _ping_pong_scenario(
+            'csharp_generic_async_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            use_generic_payload=True,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_generic_async_streaming_ping_pong_insecure_1MB', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        use_generic_payload=True,
-        secure=False,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_generic_async_streaming_ping_pong_insecure_1MB',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            use_generic_payload=True,
+            secure=False,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_generic_async_streaming_qps_unconstrained_insecure', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        unconstrained_client='async', use_generic_payload=True,
-        secure=False,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_generic_async_streaming_qps_unconstrained_insecure',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            unconstrained_client='async',
+            use_generic_payload=True,
+            secure=False,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_sync_to_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'csharp_protobuf_sync_to_async_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async',
-        categories=[SMOKETEST,SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_streaming_qps_unconstrained',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_async_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_async_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async', server_language='c++',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            server_language='c++',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='sync', server_language='c++',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_to_cpp_protobuf_sync_to_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='sync',
+            server_language='c++',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'cpp_to_csharp_protobuf_async_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='async', client_language='c++',
-        categories=[SCALABLE])
+        yield _ping_pong_scenario(
+            'cpp_to_csharp_protobuf_async_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='async',
+            client_language='c++',
+            categories=[SCALABLE])
 
-    yield _ping_pong_scenario(
-        'csharp_protobuf_async_unary_ping_pong_1MB', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'csharp_protobuf_async_unary_ping_pong_1MB',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            categories=[SMOKETEST, SCALABLE])
 
-  def __str__(self):
-    return 'csharp'
+    def __str__(self):
+        return 'csharp'
+
 
 class PythonLanguage:
 
-  def __init__(self):
-    self.safename = 'python'
+    def __init__(self):
+        self.safename = 'python'
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_python.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_python.sh']
 
-  def worker_port_offset(self):
-    return 500
+    def worker_port_offset(self):
+        return 500
 
-  def scenarios(self):
-    yield _ping_pong_scenario(
-        'python_generic_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-        use_generic_payload=True,
-        categories=[SMOKETEST, SCALABLE])
+    def scenarios(self):
+        yield _ping_pong_scenario(
+            'python_generic_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_GENERIC_SERVER',
+            use_generic_payload=True,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'python_protobuf_async_unary_ping_pong', rpc_type='UNARY',
-        client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER')
+        yield _ping_pong_scenario(
+            'python_protobuf_async_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='ASYNC_CLIENT',
+            server_type='ASYNC_SERVER')
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_streaming_qps_unconstrained',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'python_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', async_server_threads=1,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'python_to_cpp_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1,
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'python_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'python_to_cpp_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'python_protobuf_sync_unary_ping_pong_1MB', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'python_protobuf_sync_unary_ping_pong_1MB',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            categories=[SMOKETEST, SCALABLE])
 
-  def __str__(self):
-    return 'python'
+    def __str__(self):
+        return 'python'
+
 
 class RubyLanguage:
 
-  def __init__(self):
-    pass
-    self.safename = str(self)
+    def __init__(self):
+        pass
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_ruby.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_ruby.sh']
 
-  def worker_port_offset(self):
-    return 300
+    def worker_port_offset(self):
+        return 300
 
-  def scenarios(self):
-    yield _ping_pong_scenario(
-        'ruby_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+    def scenarios(self):
+        yield _ping_pong_scenario(
+            'ruby_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'ruby_protobuf_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            categories=[SMOKETEST, SCALABLE])
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_sync_unary_qps_unconstrained', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'ruby_protobuf_sync_unary_qps_unconstrained',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_sync_streaming_qps_unconstrained', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            'ruby_protobuf_sync_streaming_qps_unconstrained',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        'ruby_to_cpp_protobuf_sync_unary_ping_pong', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'ruby_to_cpp_protobuf_sync_unary_ping_pong',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'ruby_to_cpp_protobuf_sync_streaming_ping_pong', rpc_type='STREAMING',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            'ruby_to_cpp_protobuf_sync_streaming_ping_pong',
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        'ruby_protobuf_unary_ping_pong_1MB', rpc_type='UNARY',
-        client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        req_size=1024*1024, resp_size=1024*1024,
-        categories=[SMOKETEST, SCALABLE])
+        yield _ping_pong_scenario(
+            'ruby_protobuf_unary_ping_pong_1MB',
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            req_size=1024 * 1024,
+            resp_size=1024 * 1024,
+            categories=[SMOKETEST, SCALABLE])
 
-  def __str__(self):
-    return 'ruby'
+    def __str__(self):
+        return 'ruby'
 
 
 class Php7Language:
 
-  def __init__(self, php7_protobuf_c=False):
-    pass
-    self.php7_protobuf_c=php7_protobuf_c
-    self.safename = str(self)
+    def __init__(self, php7_protobuf_c=False):
+        pass
+        self.php7_protobuf_c = php7_protobuf_c
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    if self.php7_protobuf_c:
-        return ['tools/run_tests/performance/run_worker_php.sh', '--use_protobuf_c_extension']
-    return ['tools/run_tests/performance/run_worker_php.sh']
+    def worker_cmdline(self):
+        if self.php7_protobuf_c:
+            return [
+                'tools/run_tests/performance/run_worker_php.sh',
+                '--use_protobuf_c_extension'
+            ]
+        return ['tools/run_tests/performance/run_worker_php.sh']
 
-  def worker_port_offset(self):
-    if self.php7_protobuf_c:
-        return 900
-    return 800
+    def worker_port_offset(self):
+        if self.php7_protobuf_c:
+            return 900
+        return 800
 
-  def scenarios(self):
-    php7_extension_mode='php7_protobuf_php_extension'
-    if self.php7_protobuf_c:
-        php7_extension_mode='php7_protobuf_c_extension'
+    def scenarios(self):
+        php7_extension_mode = 'php7_protobuf_php_extension'
+        if self.php7_protobuf_c:
+            php7_extension_mode = 'php7_protobuf_c_extension'
 
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
-        rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_unary_ping_pong' % php7_extension_mode,
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
-        rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-        server_language='c++', async_server_threads=1)
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_streaming_ping_pong' % php7_extension_mode,
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='SYNC_SERVER',
+            server_language='c++',
+            async_server_threads=1)
 
-    # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
-    # better than async_server_threads=0/CPU usage 490%.
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' % php7_extension_mode,
-        rpc_type='UNARY', client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', outstanding=1, async_server_threads=1, unconstrained_client='sync')
+        # TODO(ddyihai): Investigate why when async_server_threads=1/CPU usage 340%, the QPS performs
+        # better than async_server_threads=0/CPU usage 490%.
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_unary_qps_unconstrained' %
+            php7_extension_mode,
+            rpc_type='UNARY',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            outstanding=1,
+            async_server_threads=1,
+            unconstrained_client='sync')
 
-    yield _ping_pong_scenario(
-        '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' % php7_extension_mode,
-        rpc_type='STREAMING', client_type='SYNC_CLIENT', server_type='ASYNC_SERVER',
-        server_language='c++', outstanding=1, async_server_threads=1, unconstrained_client='sync')
+        yield _ping_pong_scenario(
+            '%s_to_cpp_protobuf_sync_streaming_qps_unconstrained' %
+            php7_extension_mode,
+            rpc_type='STREAMING',
+            client_type='SYNC_CLIENT',
+            server_type='ASYNC_SERVER',
+            server_language='c++',
+            outstanding=1,
+            async_server_threads=1,
+            unconstrained_client='sync')
 
-  def __str__(self):
-    if self.php7_protobuf_c:
-        return 'php7_protobuf_c'
-    return 'php7'
+    def __str__(self):
+        if self.php7_protobuf_c:
+            return 'php7_protobuf_c'
+        return 'php7'
+
 
 class JavaLanguage:
 
-  def __init__(self):
-    pass
-    self.safename = str(self)
+    def __init__(self):
+        pass
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_java.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_java.sh']
 
-  def worker_port_offset(self):
-    return 400
+    def worker_port_offset(self):
+        return 400
 
-  def scenarios(self):
-    for secure in [True, False]:
-      secstr = 'secure' if secure else 'insecure'
-      smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+    def scenarios(self):
+        for secure in [True, False]:
+            secstr = 'secure' if secure else 'insecure'
+            smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
 
-      yield _ping_pong_scenario(
-          'java_generic_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          use_generic_payload=True, async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=smoketest_categories)
+            yield _ping_pong_scenario(
+                'java_generic_async_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
+            yield _ping_pong_scenario(
+                'java_protobuf_async_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=smoketest_categories)
+            yield _ping_pong_scenario(
+                'java_protobuf_async_unary_ping_pong_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
+            yield _ping_pong_scenario(
+                'java_protobuf_unary_ping_pong_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS)
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=smoketest_categories+[SCALABLE])
+            yield _ping_pong_scenario(
+                'java_protobuf_async_unary_qps_unconstrained_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=smoketest_categories + [SCALABLE])
 
-      yield _ping_pong_scenario(
-          'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=[SCALABLE])
+            yield _ping_pong_scenario(
+                'java_protobuf_async_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=[SCALABLE])
 
-      yield _ping_pong_scenario(
-          'java_generic_async_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS,
-          categories=[SCALABLE])
+            yield _ping_pong_scenario(
+                'java_generic_async_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS,
+                categories=[SCALABLE])
 
-      yield _ping_pong_scenario(
-          'java_generic_async_streaming_qps_one_server_core_%s' % secstr, rpc_type='STREAMING',
-          client_type='ASYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async-limited', use_generic_payload=True,
-          async_server_threads=1,
-          secure=secure, warmup_seconds=JAVA_WARMUP_SECONDS)
+            yield _ping_pong_scenario(
+                'java_generic_async_streaming_qps_one_server_core_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='ASYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async-limited',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                warmup_seconds=JAVA_WARMUP_SECONDS)
 
-      # TODO(jtattermusch): add scenarios java vs C++
+            # TODO(jtattermusch): add scenarios java vs C++
 
-  def __str__(self):
-    return 'java'
+    def __str__(self):
+        return 'java'
 
 
 class GoLanguage:
 
-  def __init__(self):
-    pass
-    self.safename = str(self)
+    def __init__(self):
+        pass
+        self.safename = str(self)
 
-  def worker_cmdline(self):
-    return ['tools/run_tests/performance/run_worker_go.sh']
+    def worker_cmdline(self):
+        return ['tools/run_tests/performance/run_worker_go.sh']
 
-  def worker_port_offset(self):
-    return 600
+    def worker_port_offset(self):
+        return 600
 
-  def scenarios(self):
-    for secure in [True, False]:
-      secstr = 'secure' if secure else 'insecure'
-      smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
+    def scenarios(self):
+        for secure in [True, False]:
+            secstr = 'secure' if secure else 'insecure'
+            smoketest_categories = ([SMOKETEST] if secure else []) + [SCALABLE]
 
-      # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
-      # but that's mostly because of lack of better name of the enum value.
-      yield _ping_pong_scenario(
-          'go_generic_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          use_generic_payload=True, async_server_threads=1,
-          secure=secure,
-          categories=smoketest_categories)
+            # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+            # but that's mostly because of lack of better name of the enum value.
+            yield _ping_pong_scenario(
+                'go_generic_sync_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                use_generic_payload=True,
+                async_server_threads=1,
+                secure=secure,
+                categories=smoketest_categories)
 
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_streaming_ping_pong_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          async_server_threads=1,
-          secure=secure)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_streaming_ping_pong_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                async_server_threads=1,
+                secure=secure)
 
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_unary_ping_pong_%s' % secstr, rpc_type='UNARY',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          async_server_threads=1,
-          secure=secure,
-          categories=smoketest_categories)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_unary_ping_pong_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                async_server_threads=1,
+                secure=secure,
+                categories=smoketest_categories)
 
-      # unconstrained_client='async' is intended (client uses goroutines)
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr, rpc_type='UNARY',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          categories=smoketest_categories+[SCALABLE])
+            # unconstrained_client='async' is intended (client uses goroutines)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_unary_qps_unconstrained_%s' % secstr,
+                rpc_type='UNARY',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                categories=smoketest_categories + [SCALABLE])
 
-      # unconstrained_client='async' is intended (client uses goroutines)
-      yield _ping_pong_scenario(
-          'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='SYNC_SERVER',
-          unconstrained_client='async',
-          secure=secure,
-          categories=[SCALABLE])
+            # unconstrained_client='async' is intended (client uses goroutines)
+            yield _ping_pong_scenario(
+                'go_protobuf_sync_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='SYNC_SERVER',
+                unconstrained_client='async',
+                secure=secure,
+                categories=[SCALABLE])
 
-      # unconstrained_client='async' is intended (client uses goroutines)
-      # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
-      # but that's mostly because of lack of better name of the enum value.
-      yield _ping_pong_scenario(
-          'go_generic_sync_streaming_qps_unconstrained_%s' % secstr, rpc_type='STREAMING',
-          client_type='SYNC_CLIENT', server_type='ASYNC_GENERIC_SERVER',
-          unconstrained_client='async', use_generic_payload=True,
-          secure=secure,
-          categories=[SCALABLE])
+            # unconstrained_client='async' is intended (client uses goroutines)
+            # ASYNC_GENERIC_SERVER for Go actually uses a sync streaming server,
+            # but that's mostly because of lack of better name of the enum value.
+            yield _ping_pong_scenario(
+                'go_generic_sync_streaming_qps_unconstrained_%s' % secstr,
+                rpc_type='STREAMING',
+                client_type='SYNC_CLIENT',
+                server_type='ASYNC_GENERIC_SERVER',
+                unconstrained_client='async',
+                use_generic_payload=True,
+                secure=secure,
+                categories=[SCALABLE])
 
-      # TODO(jtattermusch): add scenarios go vs C++
+            # TODO(jtattermusch): add scenarios go vs C++
 
-  def __str__(self):
-    return 'go'
+    def __str__(self):
+        return 'go'
 
 
 LANGUAGES = {
-    'c++' : CXXLanguage(),
-    'csharp' : CSharpLanguage(),
-    'ruby' : RubyLanguage(),
-    'php7' : Php7Language(),
-    'php7_protobuf_c' : Php7Language(php7_protobuf_c=True),
-    'java' : JavaLanguage(),
-    'python' : PythonLanguage(),
-    'go' : GoLanguage(),
+    'c++': CXXLanguage(),
+    'csharp': CSharpLanguage(),
+    'ruby': RubyLanguage(),
+    'php7': Php7Language(),
+    'php7_protobuf_c': Php7Language(php7_protobuf_c=True),
+    'java': JavaLanguage(),
+    'python': PythonLanguage(),
+    'go': GoLanguage(),
 }
diff --git a/tools/run_tests/python_utils/antagonist.py b/tools/run_tests/python_utils/antagonist.py
index 0d79ce0..a928a4c 100755
--- a/tools/run_tests/python_utils/antagonist.py
+++ b/tools/run_tests/python_utils/antagonist.py
@@ -12,8 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """This is used by run_tests.py to create cpu load on a machine"""
 
 while True:
-	pass
+    pass
diff --git a/tools/run_tests/python_utils/comment_on_pr.py b/tools/run_tests/python_utils/comment_on_pr.py
index 21b9bb7..399c996 100644
--- a/tools/run_tests/python_utils/comment_on_pr.py
+++ b/tools/run_tests/python_utils/comment_on_pr.py
@@ -16,19 +16,22 @@
 import json
 import urllib2
 
+
 def comment_on_pr(text):
-  if 'JENKINS_OAUTH_TOKEN' not in os.environ:
-    print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
-    return
-  if 'ghprbPullId' not in os.environ:
-    print 'Missing ghprbPullId env var: not commenting'
-    return
-  req = urllib2.Request(
-      url = 'https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
-          os.environ['ghprbPullId'],
-      data = json.dumps({'body': text}),
-      headers = {
-        'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
-        'Content-Type': 'application/json',
-      })
-  print urllib2.urlopen(req).read()
+    if 'JENKINS_OAUTH_TOKEN' not in os.environ:
+        print 'Missing JENKINS_OAUTH_TOKEN env var: not commenting'
+        return
+    if 'ghprbPullId' not in os.environ:
+        print 'Missing ghprbPullId env var: not commenting'
+        return
+    req = urllib2.Request(
+        url='https://api.github.com/repos/grpc/grpc/issues/%s/comments' %
+        os.environ['ghprbPullId'],
+        data=json.dumps({
+            'body': text
+        }),
+        headers={
+            'Authorization': 'token %s' % os.environ['JENKINS_OAUTH_TOKEN'],
+            'Content-Type': 'application/json',
+        })
+    print urllib2.urlopen(req).read()
diff --git a/tools/run_tests/python_utils/dockerjob.py b/tools/run_tests/python_utils/dockerjob.py
index 2f5285b..2d22dc1 100755
--- a/tools/run_tests/python_utils/dockerjob.py
+++ b/tools/run_tests/python_utils/dockerjob.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Helpers to run docker instances as jobs."""
 
 from __future__ import print_function
@@ -28,102 +27,109 @@
 
 
 def random_name(base_name):
-  """Randomizes given base name."""
-  return '%s_%s' % (base_name, uuid.uuid4())
+    """Randomizes given base name."""
+    return '%s_%s' % (base_name, uuid.uuid4())
 
 
 def docker_kill(cid):
-  """Kills a docker container. Returns True if successful."""
-  return subprocess.call(['docker','kill', str(cid)],
-                         stdin=subprocess.PIPE,
-                         stdout=_DEVNULL,
-                         stderr=subprocess.STDOUT) == 0
+    """Kills a docker container. Returns True if successful."""
+    return subprocess.call(
+        ['docker', 'kill', str(cid)],
+        stdin=subprocess.PIPE,
+        stdout=_DEVNULL,
+        stderr=subprocess.STDOUT) == 0
 
 
 def docker_mapped_port(cid, port, timeout_seconds=15):
-  """Get port mapped to internal given internal port for given container."""
-  started = time.time()
-  while time.time() - started < timeout_seconds:
-    try:
-      output = subprocess.check_output('docker port %s %s' % (cid, port),
-                                       stderr=_DEVNULL,
-                                       shell=True)
-      return int(output.split(':', 2)[1])
-    except subprocess.CalledProcessError as e:
-      pass
-  raise Exception('Failed to get exposed port %s for container %s.' %
-                  (port, cid))
+    """Get port mapped to internal given internal port for given container."""
+    started = time.time()
+    while time.time() - started < timeout_seconds:
+        try:
+            output = subprocess.check_output(
+                'docker port %s %s' % (cid, port), stderr=_DEVNULL, shell=True)
+            return int(output.split(':', 2)[1])
+        except subprocess.CalledProcessError as e:
+            pass
+    raise Exception('Failed to get exposed port %s for container %s.' % (port,
+                                                                         cid))
 
 
 def wait_for_healthy(cid, shortname, timeout_seconds):
-  """Wait timeout_seconds for the container to become healthy"""
-  started = time.time()
-  while time.time() - started < timeout_seconds:
-    try:
-      output = subprocess.check_output(
-          ['docker', 'inspect', '--format="{{.State.Health.Status}}"', cid],
-          stderr=_DEVNULL)
-      if output.strip('\n') == 'healthy':
-        return
-    except subprocess.CalledProcessError as e:
-      pass
-    time.sleep(1)
-  raise Exception('Timed out waiting for %s (%s) to pass health check' %
-                  (shortname, cid))
+    """Wait timeout_seconds for the container to become healthy"""
+    started = time.time()
+    while time.time() - started < timeout_seconds:
+        try:
+            output = subprocess.check_output(
+                [
+                    'docker', 'inspect', '--format="{{.State.Health.Status}}"',
+                    cid
+                ],
+                stderr=_DEVNULL)
+            if output.strip('\n') == 'healthy':
+                return
+        except subprocess.CalledProcessError as e:
+            pass
+        time.sleep(1)
+    raise Exception('Timed out waiting for %s (%s) to pass health check' %
+                    (shortname, cid))
 
 
 def finish_jobs(jobs):
-  """Kills given docker containers and waits for corresponding jobs to finish"""
-  for job in jobs:
-    job.kill(suppress_failure=True)
+    """Kills given docker containers and waits for corresponding jobs to finish"""
+    for job in jobs:
+        job.kill(suppress_failure=True)
 
-  while any(job.is_running() for job in jobs):
-    time.sleep(1)
+    while any(job.is_running() for job in jobs):
+        time.sleep(1)
 
 
 def image_exists(image):
-  """Returns True if given docker image exists."""
-  return subprocess.call(['docker','inspect', image],
-                         stdin=subprocess.PIPE,
-                         stdout=_DEVNULL,
-                         stderr=subprocess.STDOUT) == 0
+    """Returns True if given docker image exists."""
+    return subprocess.call(
+        ['docker', 'inspect', image],
+        stdin=subprocess.PIPE,
+        stdout=_DEVNULL,
+        stderr=subprocess.STDOUT) == 0
 
 
 def remove_image(image, skip_nonexistent=False, max_retries=10):
-  """Attempts to remove docker image with retries."""
-  if skip_nonexistent and not image_exists(image):
-    return True
-  for attempt in range(0, max_retries):
-    if subprocess.call(['docker','rmi', '-f', image],
-                       stdin=subprocess.PIPE,
-                       stdout=_DEVNULL,
-                       stderr=subprocess.STDOUT) == 0:
-      return True
-    time.sleep(2)
-  print('Failed to remove docker image %s' % image)
-  return False
+    """Attempts to remove docker image with retries."""
+    if skip_nonexistent and not image_exists(image):
+        return True
+    for attempt in range(0, max_retries):
+        if subprocess.call(
+            ['docker', 'rmi', '-f', image],
+                stdin=subprocess.PIPE,
+                stdout=_DEVNULL,
+                stderr=subprocess.STDOUT) == 0:
+            return True
+        time.sleep(2)
+    print('Failed to remove docker image %s' % image)
+    return False
 
 
 class DockerJob:
-  """Encapsulates a job"""
+    """Encapsulates a job"""
 
-  def __init__(self, spec):
-    self._spec = spec
-    self._job = jobset.Job(spec, newline_on_success=True, travis=True, add_env={})
-    self._container_name = spec.container_name
+    def __init__(self, spec):
+        self._spec = spec
+        self._job = jobset.Job(
+            spec, newline_on_success=True, travis=True, add_env={})
+        self._container_name = spec.container_name
 
-  def mapped_port(self, port):
-    return docker_mapped_port(self._container_name, port)
+    def mapped_port(self, port):
+        return docker_mapped_port(self._container_name, port)
 
-  def wait_for_healthy(self, timeout_seconds):
-    wait_for_healthy(self._container_name, self._spec.shortname, timeout_seconds)
+    def wait_for_healthy(self, timeout_seconds):
+        wait_for_healthy(self._container_name, self._spec.shortname,
+                         timeout_seconds)
 
-  def kill(self, suppress_failure=False):
-    """Sends kill signal to the container."""
-    if suppress_failure:
-      self._job.suppress_failure_message()
-    return docker_kill(self._container_name)
+    def kill(self, suppress_failure=False):
+        """Sends kill signal to the container."""
+        if suppress_failure:
+            self._job.suppress_failure_message()
+        return docker_kill(self._container_name)
 
-  def is_running(self):
-    """Polls a job and returns True if given job is still running."""
-    return self._job.state() == jobset._RUNNING
+    def is_running(self):
+        """Polls a job and returns True if given job is still running."""
+        return self._job.state() == jobset._RUNNING
diff --git a/tools/run_tests/python_utils/filter_pull_request_tests.py b/tools/run_tests/python_utils/filter_pull_request_tests.py
index e880734..4c09b34 100644
--- a/tools/run_tests/python_utils/filter_pull_request_tests.py
+++ b/tools/run_tests/python_utils/filter_pull_request_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Filter out tests based on file differences compared to merge target branch"""
 
 from __future__ import print_function
@@ -23,24 +22,25 @@
 
 
 class TestSuite:
-  """
+    """
   Contains label to identify job as belonging to this test suite and
   triggers to identify if changed files are relevant
   """
-  def __init__(self, labels):
-    """
+
+    def __init__(self, labels):
+        """
     Build TestSuite to group tests based on labeling
     :param label: strings that should match a jobs's platform, config, language, or test group
     """
-    self.triggers = []
-    self.labels = labels
+        self.triggers = []
+        self.labels = labels
 
-  def add_trigger(self, trigger):
-    """
+    def add_trigger(self, trigger):
+        """
     Add a regex to list of triggers that determine if a changed file should run tests
     :param trigger: regex matching file relevant to tests
     """
-    self.triggers.append(trigger)
+        self.triggers.append(trigger)
 
 
 # Create test suites
@@ -55,10 +55,11 @@
 _LINUX_TEST_SUITE = TestSuite(['linux'])
 _WINDOWS_TEST_SUITE = TestSuite(['windows'])
 _MACOS_TEST_SUITE = TestSuite(['macos'])
-_ALL_TEST_SUITES = [_CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE,
-                    _NODE_TEST_SUITE, _OBJC_TEST_SUITE, _PHP_TEST_SUITE,
-                    _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE, _LINUX_TEST_SUITE,
-                    _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE]
+_ALL_TEST_SUITES = [
+    _CORE_TEST_SUITE, _CPP_TEST_SUITE, _CSHARP_TEST_SUITE, _NODE_TEST_SUITE,
+    _OBJC_TEST_SUITE, _PHP_TEST_SUITE, _PYTHON_TEST_SUITE, _RUBY_TEST_SUITE,
+    _LINUX_TEST_SUITE, _WINDOWS_TEST_SUITE, _MACOS_TEST_SUITE
+]
 
 # Dictionary of whitelistable files where the key is a regex matching changed files
 # and the value is a list of tests that should be run. An empty list means that
@@ -66,46 +67,46 @@
 # match any of these regexes will trigger all tests
 # DO NOT CHANGE THIS UNLESS YOU KNOW WHAT YOU ARE DOING (be careful even if you do)
 _WHITELIST_DICT = {
-  '^doc/': [],
-  '^examples/': [],
-  '^include/grpc\+\+/': [_CPP_TEST_SUITE],
-  '^summerofcode/': [],
-  '^src/cpp/': [_CPP_TEST_SUITE],
-  '^src/csharp/': [_CSHARP_TEST_SUITE],
-  '^src/objective\-c/': [_OBJC_TEST_SUITE],
-  '^src/php/': [_PHP_TEST_SUITE],
-  '^src/python/': [_PYTHON_TEST_SUITE],
-  '^src/ruby/': [_RUBY_TEST_SUITE],
-  '^templates/': [],
-  '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
-  '^test/cpp/': [_CPP_TEST_SUITE],
-  '^test/distrib/cpp/': [_CPP_TEST_SUITE],
-  '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
-  '^test/distrib/php/': [_PHP_TEST_SUITE],
-  '^test/distrib/python/': [_PYTHON_TEST_SUITE],
-  '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
-  '^vsprojects/': [_WINDOWS_TEST_SUITE],
-  'composer\.json$': [_PHP_TEST_SUITE],
-  'config\.m4$': [_PHP_TEST_SUITE],
-  'CONTRIBUTING\.md$': [],
-  'Gemfile$': [_RUBY_TEST_SUITE],
-  'grpc\.def$': [_WINDOWS_TEST_SUITE],
-  'grpc\.gemspec$': [_RUBY_TEST_SUITE],
-  'gRPC\.podspec$': [_OBJC_TEST_SUITE],
-  'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
-  'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
-  'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
-  'INSTALL\.md$': [],
-  'LICENSE$': [],
-  'MANIFEST\.md$': [],
-  'package\.json$': [_PHP_TEST_SUITE],
-  'package\.xml$': [_PHP_TEST_SUITE],
-  'PATENTS$': [],
-  'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
-  'README\.md$': [],
-  'requirements\.txt$': [_PYTHON_TEST_SUITE],
-  'setup\.cfg$': [_PYTHON_TEST_SUITE],
-  'setup\.py$': [_PYTHON_TEST_SUITE]
+    '^doc/': [],
+    '^examples/': [],
+    '^include/grpc\+\+/': [_CPP_TEST_SUITE],
+    '^summerofcode/': [],
+    '^src/cpp/': [_CPP_TEST_SUITE],
+    '^src/csharp/': [_CSHARP_TEST_SUITE],
+    '^src/objective\-c/': [_OBJC_TEST_SUITE],
+    '^src/php/': [_PHP_TEST_SUITE],
+    '^src/python/': [_PYTHON_TEST_SUITE],
+    '^src/ruby/': [_RUBY_TEST_SUITE],
+    '^templates/': [],
+    '^test/core/': [_CORE_TEST_SUITE, _CPP_TEST_SUITE],
+    '^test/cpp/': [_CPP_TEST_SUITE],
+    '^test/distrib/cpp/': [_CPP_TEST_SUITE],
+    '^test/distrib/csharp/': [_CSHARP_TEST_SUITE],
+    '^test/distrib/php/': [_PHP_TEST_SUITE],
+    '^test/distrib/python/': [_PYTHON_TEST_SUITE],
+    '^test/distrib/ruby/': [_RUBY_TEST_SUITE],
+    '^vsprojects/': [_WINDOWS_TEST_SUITE],
+    'composer\.json$': [_PHP_TEST_SUITE],
+    'config\.m4$': [_PHP_TEST_SUITE],
+    'CONTRIBUTING\.md$': [],
+    'Gemfile$': [_RUBY_TEST_SUITE],
+    'grpc\.def$': [_WINDOWS_TEST_SUITE],
+    'grpc\.gemspec$': [_RUBY_TEST_SUITE],
+    'gRPC\.podspec$': [_OBJC_TEST_SUITE],
+    'gRPC\-Core\.podspec$': [_OBJC_TEST_SUITE],
+    'gRPC\-ProtoRPC\.podspec$': [_OBJC_TEST_SUITE],
+    'gRPC\-RxLibrary\.podspec$': [_OBJC_TEST_SUITE],
+    'INSTALL\.md$': [],
+    'LICENSE$': [],
+    'MANIFEST\.md$': [],
+    'package\.json$': [_PHP_TEST_SUITE],
+    'package\.xml$': [_PHP_TEST_SUITE],
+    'PATENTS$': [],
+    'PYTHON\-MANIFEST\.in$': [_PYTHON_TEST_SUITE],
+    'README\.md$': [],
+    'requirements\.txt$': [_PYTHON_TEST_SUITE],
+    'setup\.cfg$': [_PYTHON_TEST_SUITE],
+    'setup\.py$': [_PYTHON_TEST_SUITE]
 }
 
 # Regex that combines all keys in _WHITELIST_DICT
@@ -113,83 +114,88 @@
 
 # Add all triggers to their respective test suites
 for trigger, test_suites in six.iteritems(_WHITELIST_DICT):
-  for test_suite in test_suites:
-    test_suite.add_trigger(trigger)
+    for test_suite in test_suites:
+        test_suite.add_trigger(trigger)
 
 
 def _get_changed_files(base_branch):
-  """
+    """
   Get list of changed files between current branch and base of target merge branch
   """
-  # Get file changes between branch and merge-base of specified branch
-  # Not combined to be Windows friendly
-  base_commit = check_output(["git", "merge-base", base_branch, "HEAD"]).rstrip()
-  return check_output(["git", "diff", base_commit, "--name-only", "HEAD"]).splitlines()
+    # Get file changes between branch and merge-base of specified branch
+    # Not combined to be Windows friendly
+    base_commit = check_output(["git", "merge-base", base_branch,
+                                "HEAD"]).rstrip()
+    return check_output(["git", "diff", base_commit, "--name-only",
+                         "HEAD"]).splitlines()
 
 
 def _can_skip_tests(file_names, triggers):
-  """
+    """
   Determines if tests are skippable based on if all files do not match list of regexes
   :param file_names: list of changed files generated by _get_changed_files()
   :param triggers: list of regexes matching file name that indicates tests should be run
   :return: safe to skip tests
   """
-  for file_name in file_names:
-    if any(re.match(trigger, file_name) for trigger in triggers):
-      return False
-  return True
+    for file_name in file_names:
+        if any(re.match(trigger, file_name) for trigger in triggers):
+            return False
+    return True
 
 
 def _remove_irrelevant_tests(tests, skippable_labels):
-  """
+    """
   Filters out tests by config or language - will not remove sanitizer tests
   :param tests: list of all tests generated by run_tests_matrix.py
   :param skippable_labels: list of languages and platforms with skippable tests
   :return: list of relevant tests
   """
-  # test.labels[0] is platform and test.labels[2] is language
-  # We skip a test if both are considered safe to skip
-  return [test for test in tests if test.labels[0] not in skippable_labels or \
-          test.labels[2] not in skippable_labels]
+    # test.labels[0] is platform and test.labels[2] is language
+    # We skip a test if both are considered safe to skip
+    return [test for test in tests if test.labels[0] not in skippable_labels or \
+            test.labels[2] not in skippable_labels]
 
 
 def affects_c_cpp(base_branch):
-  """
+    """
   Determines if a pull request's changes affect C/C++. This function exists because
   there are pull request tests that only test C/C++ code
   :param base_branch: branch that a pull request is requesting to merge into
   :return: boolean indicating whether C/C++ changes are made in pull request
   """
-  changed_files = _get_changed_files(base_branch)
-  # Run all tests if any changed file is not in the whitelist dictionary
-  for changed_file in changed_files:
-    if not re.match(_ALL_TRIGGERS, changed_file):
-      return True
-  return not _can_skip_tests(changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
+    changed_files = _get_changed_files(base_branch)
+    # Run all tests if any changed file is not in the whitelist dictionary
+    for changed_file in changed_files:
+        if not re.match(_ALL_TRIGGERS, changed_file):
+            return True
+    return not _can_skip_tests(
+        changed_files, _CPP_TEST_SUITE.triggers + _CORE_TEST_SUITE.triggers)
 
 
 def filter_tests(tests, base_branch):
-  """
+    """
   Filters out tests that are safe to ignore
   :param tests: list of all tests generated by run_tests_matrix.py
   :return: list of relevant tests
   """
-  print('Finding file differences between gRPC %s branch and pull request...\n' % base_branch)
-  changed_files = _get_changed_files(base_branch)
-  for changed_file in changed_files:
-    print('  %s' % changed_file)
-  print('')
+    print(
+        'Finding file differences between gRPC %s branch and pull request...\n'
+        % base_branch)
+    changed_files = _get_changed_files(base_branch)
+    for changed_file in changed_files:
+        print('  %s' % changed_file)
+    print('')
 
-  # Run all tests if any changed file is not in the whitelist dictionary
-  for changed_file in changed_files:
-    if not re.match(_ALL_TRIGGERS, changed_file):
-      return(tests)
-  # Figure out which language and platform tests to run
-  skippable_labels = []
-  for test_suite in _ALL_TEST_SUITES:
-    if _can_skip_tests(changed_files, test_suite.triggers):
-      for label in test_suite.labels:
-        print('  %s tests safe to skip' % label)
-        skippable_labels.append(label)
-  tests = _remove_irrelevant_tests(tests, skippable_labels)
-  return tests
+    # Run all tests if any changed file is not in the whitelist dictionary
+    for changed_file in changed_files:
+        if not re.match(_ALL_TRIGGERS, changed_file):
+            return (tests)
+    # Figure out which language and platform tests to run
+    skippable_labels = []
+    for test_suite in _ALL_TEST_SUITES:
+        if _can_skip_tests(changed_files, test_suite.triggers):
+            for label in test_suite.labels:
+                print('  %s tests safe to skip' % label)
+                skippable_labels.append(label)
+    tests = _remove_irrelevant_tests(tests, skippable_labels)
+    return tests
diff --git a/tools/run_tests/python_utils/jobset.py b/tools/run_tests/python_utils/jobset.py
index 85eef44..6a33913 100755
--- a/tools/run_tests/python_utils/jobset.py
+++ b/tools/run_tests/python_utils/jobset.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run a group of subprocesses and then finish."""
 
 from __future__ import print_function
@@ -28,11 +27,9 @@
 import time
 import errno
 
-
 # cpu cost measurement
 measure_cpu_costs = False
 
-
 _DEFAULT_MAX_JOBS = 16 * multiprocessing.cpu_count()
 _MAX_RESULT_SIZE = 8192
 
@@ -42,63 +39,60 @@
 # characters to the PR description, which leak into the environment here
 # and cause failures.
 def strip_non_ascii_chars(s):
-  return ''.join(c for c in s if ord(c) < 128)
+    return ''.join(c for c in s if ord(c) < 128)
 
 
 def sanitized_environment(env):
-  sanitized = {}
-  for key, value in env.items():
-    sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
-  return sanitized
+    sanitized = {}
+    for key, value in env.items():
+        sanitized[strip_non_ascii_chars(key)] = strip_non_ascii_chars(value)
+    return sanitized
 
 
 def platform_string():
-  if platform.system() == 'Windows':
-    return 'windows'
-  elif platform.system()[:7] == 'MSYS_NT':
-    return 'windows'
-  elif platform.system() == 'Darwin':
-    return 'mac'
-  elif platform.system() == 'Linux':
-    return 'linux'
-  else:
-    return 'posix'
+    if platform.system() == 'Windows':
+        return 'windows'
+    elif platform.system()[:7] == 'MSYS_NT':
+        return 'windows'
+    elif platform.system() == 'Darwin':
+        return 'mac'
+    elif platform.system() == 'Linux':
+        return 'linux'
+    else:
+        return 'posix'
 
 
 # setup a signal handler so that signal.pause registers 'something'
 # when a child finishes
 # not using futures and threading to avoid a dependency on subprocess32
 if platform_string() == 'windows':
-  pass
-else:
-  def alarm_handler(unused_signum, unused_frame):
     pass
+else:
 
-  signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
-  signal.signal(signal.SIGALRM, alarm_handler)
+    def alarm_handler(unused_signum, unused_frame):
+        pass
 
+    signal.signal(signal.SIGCHLD, lambda unused_signum, unused_frame: None)
+    signal.signal(signal.SIGALRM, alarm_handler)
 
 _SUCCESS = object()
 _FAILURE = object()
 _RUNNING = object()
 _KILLED = object()
 
-
 _COLORS = {
-    'red': [ 31, 0 ],
-    'green': [ 32, 0 ],
-    'yellow': [ 33, 0 ],
-    'lightgray': [ 37, 0],
-    'gray': [ 30, 1 ],
-    'purple': [ 35, 0 ],
-    'cyan': [ 36, 0 ]
-    }
-
+    'red': [31, 0],
+    'green': [32, 0],
+    'yellow': [33, 0],
+    'lightgray': [37, 0],
+    'gray': [30, 1],
+    'purple': [35, 0],
+    'cyan': [36, 0]
+}
 
 _BEGINNING_OF_LINE = '\x1b[0G'
 _CLEAR_LINE = '\x1b[2K'
 
-
 _TAG_COLOR = {
     'FAILED': 'red',
     'FLAKE': 'purple',
@@ -111,392 +105,437 @@
     'SUCCESS': 'green',
     'IDLE': 'gray',
     'SKIPPED': 'cyan'
-    }
+}
 
 _FORMAT = '%(asctime)-15s %(message)s'
 logging.basicConfig(level=logging.INFO, format=_FORMAT)
 
 
 def eintr_be_gone(fn):
-  """Run fn until it doesn't stop because of EINTR"""
-  while True:
-    try:
-      return fn()
-    except IOError, e:
-      if e.errno != errno.EINTR:
-        raise
-
+    """Run fn until it doesn't stop because of EINTR"""
+    while True:
+        try:
+            return fn()
+        except IOError, e:
+            if e.errno != errno.EINTR:
+                raise
 
 
 def message(tag, msg, explanatory_text=None, do_newline=False):
-  if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
-    return
-  message.old_tag = tag
-  message.old_msg = msg
-  while True:
-    try:
-      if platform_string() == 'windows' or not sys.stdout.isatty():
-        if explanatory_text:
-          logging.info(explanatory_text)
-        logging.info('%s: %s', tag, msg)
-      else:
-        sys.stdout.write('%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' % (
-            _BEGINNING_OF_LINE,
-            _CLEAR_LINE,
-            '\n%s' % explanatory_text if explanatory_text is not None else '',
-            _COLORS[_TAG_COLOR[tag]][1],
-            _COLORS[_TAG_COLOR[tag]][0],
-            tag,
-            msg,
-            '\n' if do_newline or explanatory_text is not None else ''))
-      sys.stdout.flush()
-      return
-    except IOError, e:
-      if e.errno != errno.EINTR:
-        raise
+    if message.old_tag == tag and message.old_msg == msg and not explanatory_text:
+        return
+    message.old_tag = tag
+    message.old_msg = msg
+    while True:
+        try:
+            if platform_string() == 'windows' or not sys.stdout.isatty():
+                if explanatory_text:
+                    logging.info(explanatory_text)
+                logging.info('%s: %s', tag, msg)
+            else:
+                sys.stdout.write(
+                    '%s%s%s\x1b[%d;%dm%s\x1b[0m: %s%s' %
+                    (_BEGINNING_OF_LINE, _CLEAR_LINE, '\n%s' % explanatory_text
+                     if explanatory_text is not None else '',
+                     _COLORS[_TAG_COLOR[tag]][1], _COLORS[_TAG_COLOR[tag]][0],
+                     tag, msg, '\n'
+                     if do_newline or explanatory_text is not None else ''))
+            sys.stdout.flush()
+            return
+        except IOError, e:
+            if e.errno != errno.EINTR:
+                raise
+
 
 message.old_tag = ''
 message.old_msg = ''
 
+
 def which(filename):
-  if '/' in filename:
-    return filename
-  for path in os.environ['PATH'].split(os.pathsep):
-    if os.path.exists(os.path.join(path, filename)):
-      return os.path.join(path, filename)
-  raise Exception('%s not found' % filename)
+    if '/' in filename:
+        return filename
+    for path in os.environ['PATH'].split(os.pathsep):
+        if os.path.exists(os.path.join(path, filename)):
+            return os.path.join(path, filename)
+    raise Exception('%s not found' % filename)
 
 
 class JobSpec(object):
-  """Specifies what to run for a job."""
+    """Specifies what to run for a job."""
 
-  def __init__(self, cmdline, shortname=None, environ=None,
-               cwd=None, shell=False, timeout_seconds=5*60, flake_retries=0,
-               timeout_retries=0, kill_handler=None, cpu_cost=1.0,
-               verbose_success=False):
-    """
+    def __init__(self,
+                 cmdline,
+                 shortname=None,
+                 environ=None,
+                 cwd=None,
+                 shell=False,
+                 timeout_seconds=5 * 60,
+                 flake_retries=0,
+                 timeout_retries=0,
+                 kill_handler=None,
+                 cpu_cost=1.0,
+                 verbose_success=False):
+        """
     Arguments:
       cmdline: a list of arguments to pass as the command line
       environ: a dictionary of environment variables to set in the child process
       kill_handler: a handler that will be called whenever job.kill() is invoked
       cpu_cost: number of cores per second this job needs
     """
-    if environ is None:
-      environ = {}
-    self.cmdline = cmdline
-    self.environ = environ
-    self.shortname = cmdline[0] if shortname is None else shortname
-    self.cwd = cwd
-    self.shell = shell
-    self.timeout_seconds = timeout_seconds
-    self.flake_retries = flake_retries
-    self.timeout_retries = timeout_retries
-    self.kill_handler = kill_handler
-    self.cpu_cost = cpu_cost
-    self.verbose_success = verbose_success
+        if environ is None:
+            environ = {}
+        self.cmdline = cmdline
+        self.environ = environ
+        self.shortname = cmdline[0] if shortname is None else shortname
+        self.cwd = cwd
+        self.shell = shell
+        self.timeout_seconds = timeout_seconds
+        self.flake_retries = flake_retries
+        self.timeout_retries = timeout_retries
+        self.kill_handler = kill_handler
+        self.cpu_cost = cpu_cost
+        self.verbose_success = verbose_success
 
-  def identity(self):
-    return '%r %r' % (self.cmdline, self.environ)
+    def identity(self):
+        return '%r %r' % (self.cmdline, self.environ)
 
-  def __hash__(self):
-    return hash(self.identity())
+    def __hash__(self):
+        return hash(self.identity())
 
-  def __cmp__(self, other):
-    return self.identity() == other.identity()
+    def __cmp__(self, other):
+        return self.identity() == other.identity()
 
-  def __repr__(self):
-    return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname, self.cmdline)
+    def __repr__(self):
+        return 'JobSpec(shortname=%s, cmdline=%s)' % (self.shortname,
+                                                      self.cmdline)
 
-  def __str__(self):
-    return '%s: %s %s' % (self.shortname,
-                          ' '.join('%s=%s' % kv for kv in self.environ.items()),
-                          ' '.join(self.cmdline))
+    def __str__(self):
+        return '%s: %s %s' % (self.shortname, ' '.join(
+            '%s=%s' % kv for kv in self.environ.items()),
+                              ' '.join(self.cmdline))
 
 
 class JobResult(object):
-  def __init__(self):
-    self.state = 'UNKNOWN'
-    self.returncode = -1
-    self.elapsed_time = 0
-    self.num_failures = 0
-    self.retries = 0
-    self.message = ''
-    self.cpu_estimated = 1
-    self.cpu_measured = 1
+
+    def __init__(self):
+        self.state = 'UNKNOWN'
+        self.returncode = -1
+        self.elapsed_time = 0
+        self.num_failures = 0
+        self.retries = 0
+        self.message = ''
+        self.cpu_estimated = 1
+        self.cpu_measured = 1
 
 
 def read_from_start(f):
-  f.seek(0)
-  return f.read()
+    f.seek(0)
+    return f.read()
 
 
 class Job(object):
-  """Manages one job."""
+    """Manages one job."""
 
-  def __init__(self, spec, newline_on_success, travis, add_env,
-               quiet_success=False):
-    self._spec = spec
-    self._newline_on_success = newline_on_success
-    self._travis = travis
-    self._add_env = add_env.copy()
-    self._retries = 0
-    self._timeout_retries = 0
-    self._suppress_failure_message = False
-    self._quiet_success = quiet_success
-    if not self._quiet_success:
-      message('START', spec.shortname, do_newline=self._travis)
-    self.result = JobResult()
-    self.start()
-
-  def GetSpec(self):
-    return self._spec
-
-  def start(self):
-    self._tempfile = tempfile.TemporaryFile()
-    env = dict(os.environ)
-    env.update(self._spec.environ)
-    env.update(self._add_env)
-    env = sanitized_environment(env)
-    self._start = time.time()
-    cmdline = self._spec.cmdline
-    # The Unix time command is finicky when used with MSBuild, so we don't use it
-    # with jobs that run MSBuild.
-    global measure_cpu_costs
-    if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
-      cmdline = ['time', '-p'] + cmdline
-    else:
-      measure_cpu_costs = False
-    try_start = lambda: subprocess.Popen(args=cmdline,
-                                         stderr=subprocess.STDOUT,
-                                         stdout=self._tempfile,
-                                         cwd=self._spec.cwd,
-                                         shell=self._spec.shell,
-                                         env=env)
-    delay = 0.3
-    for i in range(0, 4):
-      try:
-        self._process = try_start()
-        break
-      except OSError:
-        message('WARNING', 'Failed to start %s, retrying in %f seconds' % (self._spec.shortname, delay))
-        time.sleep(delay)
-        delay *= 2
-    else:
-      self._process = try_start()
-    self._state = _RUNNING
-
-  def state(self):
-    """Poll current state of the job. Prints messages at completion."""
-    def stdout(self=self):
-      stdout = read_from_start(self._tempfile)
-      self.result.message = stdout[-_MAX_RESULT_SIZE:]
-      return stdout
-    if self._state == _RUNNING and self._process.poll() is not None:
-      elapsed = time.time() - self._start
-      self.result.elapsed_time = elapsed
-      if self._process.returncode != 0:
-        if self._retries < self._spec.flake_retries:
-          message('FLAKE', '%s [ret=%d, pid=%d]' % (
-            self._spec.shortname, self._process.returncode, self._process.pid),
-            stdout(), do_newline=True)
-          self._retries += 1
-          self.result.num_failures += 1
-          self.result.retries = self._timeout_retries + self._retries
-          # NOTE: job is restarted regardless of jobset's max_time setting
-          self.start()
-        else:
-          self._state = _FAILURE
-          if not self._suppress_failure_message:
-            message('FAILED', '%s [ret=%d, pid=%d, time=%.1fsec]' % (
-                self._spec.shortname, self._process.returncode, self._process.pid, elapsed),
-                stdout(), do_newline=True)
-          self.result.state = 'FAILED'
-          self.result.num_failures += 1
-          self.result.returncode = self._process.returncode
-      else:
-        self._state = _SUCCESS
-        measurement = ''
-        if measure_cpu_costs:
-          m = re.search(r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)', stdout())
-          real = float(m.group(1))
-          user = float(m.group(2))
-          sys = float(m.group(3))
-          if real > 0.5:
-            cores = (user + sys) / real
-            self.result.cpu_measured = float('%.01f' % cores)
-            self.result.cpu_estimated = float('%.01f' % self._spec.cpu_cost)
-            measurement = '; cpu_cost=%.01f; estimated=%.01f' % (self.result.cpu_measured, self.result.cpu_estimated)
+    def __init__(self,
+                 spec,
+                 newline_on_success,
+                 travis,
+                 add_env,
+                 quiet_success=False):
+        self._spec = spec
+        self._newline_on_success = newline_on_success
+        self._travis = travis
+        self._add_env = add_env.copy()
+        self._retries = 0
+        self._timeout_retries = 0
+        self._suppress_failure_message = False
+        self._quiet_success = quiet_success
         if not self._quiet_success:
-          message('PASSED', '%s [time=%.1fsec, retries=%d:%d%s]' % (
-              self._spec.shortname, elapsed, self._retries, self._timeout_retries, measurement),
-              stdout() if self._spec.verbose_success else None,
-              do_newline=self._newline_on_success or self._travis)
-        self.result.state = 'PASSED'
-    elif (self._state == _RUNNING and
-          self._spec.timeout_seconds is not None and
-          time.time() - self._start > self._spec.timeout_seconds):
-      elapsed = time.time() - self._start
-      self.result.elapsed_time = elapsed
-      if self._timeout_retries < self._spec.timeout_retries:
-        message('TIMEOUT_FLAKE', '%s [pid=%d]' % (self._spec.shortname, self._process.pid), stdout(), do_newline=True)
-        self._timeout_retries += 1
-        self.result.num_failures += 1
-        self.result.retries = self._timeout_retries + self._retries
-        if self._spec.kill_handler:
-          self._spec.kill_handler(self)
-        self._process.terminate()
-        # NOTE: job is restarted regardless of jobset's max_time setting
+            message('START', spec.shortname, do_newline=self._travis)
+        self.result = JobResult()
         self.start()
-      else:
-        message('TIMEOUT', '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname, self._process.pid, elapsed), stdout(), do_newline=True)
-        self.kill()
-        self.result.state = 'TIMEOUT'
-        self.result.num_failures += 1
-    return self._state
 
-  def kill(self):
-    if self._state == _RUNNING:
-      self._state = _KILLED
-      if self._spec.kill_handler:
-        self._spec.kill_handler(self)
-      self._process.terminate()
+    def GetSpec(self):
+        return self._spec
 
-  def suppress_failure_message(self):
-    self._suppress_failure_message = True
+    def start(self):
+        self._tempfile = tempfile.TemporaryFile()
+        env = dict(os.environ)
+        env.update(self._spec.environ)
+        env.update(self._add_env)
+        env = sanitized_environment(env)
+        self._start = time.time()
+        cmdline = self._spec.cmdline
+        # The Unix time command is finicky when used with MSBuild, so we don't use it
+        # with jobs that run MSBuild.
+        global measure_cpu_costs
+        if measure_cpu_costs and not 'vsprojects\\build' in cmdline[0]:
+            cmdline = ['time', '-p'] + cmdline
+        else:
+            measure_cpu_costs = False
+        try_start = lambda: subprocess.Popen(args=cmdline,
+                                             stderr=subprocess.STDOUT,
+                                             stdout=self._tempfile,
+                                             cwd=self._spec.cwd,
+                                             shell=self._spec.shell,
+                                             env=env)
+        delay = 0.3
+        for i in range(0, 4):
+            try:
+                self._process = try_start()
+                break
+            except OSError:
+                message('WARNING',
+                        'Failed to start %s, retrying in %f seconds' %
+                        (self._spec.shortname, delay))
+                time.sleep(delay)
+                delay *= 2
+        else:
+            self._process = try_start()
+        self._state = _RUNNING
+
+    def state(self):
+        """Poll current state of the job. Prints messages at completion."""
+
+        def stdout(self=self):
+            stdout = read_from_start(self._tempfile)
+            self.result.message = stdout[-_MAX_RESULT_SIZE:]
+            return stdout
+
+        if self._state == _RUNNING and self._process.poll() is not None:
+            elapsed = time.time() - self._start
+            self.result.elapsed_time = elapsed
+            if self._process.returncode != 0:
+                if self._retries < self._spec.flake_retries:
+                    message(
+                        'FLAKE',
+                        '%s [ret=%d, pid=%d]' %
+                        (self._spec.shortname, self._process.returncode,
+                         self._process.pid),
+                        stdout(),
+                        do_newline=True)
+                    self._retries += 1
+                    self.result.num_failures += 1
+                    self.result.retries = self._timeout_retries + self._retries
+                    # NOTE: job is restarted regardless of jobset's max_time setting
+                    self.start()
+                else:
+                    self._state = _FAILURE
+                    if not self._suppress_failure_message:
+                        message(
+                            'FAILED',
+                            '%s [ret=%d, pid=%d, time=%.1fsec]' %
+                            (self._spec.shortname, self._process.returncode,
+                             self._process.pid, elapsed),
+                            stdout(),
+                            do_newline=True)
+                    self.result.state = 'FAILED'
+                    self.result.num_failures += 1
+                    self.result.returncode = self._process.returncode
+            else:
+                self._state = _SUCCESS
+                measurement = ''
+                if measure_cpu_costs:
+                    m = re.search(
+                        r'real\s+([0-9.]+)\nuser\s+([0-9.]+)\nsys\s+([0-9.]+)',
+                        stdout())
+                    real = float(m.group(1))
+                    user = float(m.group(2))
+                    sys = float(m.group(3))
+                    if real > 0.5:
+                        cores = (user + sys) / real
+                        self.result.cpu_measured = float('%.01f' % cores)
+                        self.result.cpu_estimated = float(
+                            '%.01f' % self._spec.cpu_cost)
+                        measurement = '; cpu_cost=%.01f; estimated=%.01f' % (
+                            self.result.cpu_measured, self.result.cpu_estimated)
+                if not self._quiet_success:
+                    message(
+                        'PASSED',
+                        '%s [time=%.1fsec, retries=%d:%d%s]' %
+                        (self._spec.shortname, elapsed, self._retries,
+                         self._timeout_retries, measurement),
+                        stdout() if self._spec.verbose_success else None,
+                        do_newline=self._newline_on_success or self._travis)
+                self.result.state = 'PASSED'
+        elif (self._state == _RUNNING and
+              self._spec.timeout_seconds is not None and
+              time.time() - self._start > self._spec.timeout_seconds):
+            elapsed = time.time() - self._start
+            self.result.elapsed_time = elapsed
+            if self._timeout_retries < self._spec.timeout_retries:
+                message(
+                    'TIMEOUT_FLAKE',
+                    '%s [pid=%d]' % (self._spec.shortname, self._process.pid),
+                    stdout(),
+                    do_newline=True)
+                self._timeout_retries += 1
+                self.result.num_failures += 1
+                self.result.retries = self._timeout_retries + self._retries
+                if self._spec.kill_handler:
+                    self._spec.kill_handler(self)
+                self._process.terminate()
+                # NOTE: job is restarted regardless of jobset's max_time setting
+                self.start()
+            else:
+                message(
+                    'TIMEOUT',
+                    '%s [pid=%d, time=%.1fsec]' % (self._spec.shortname,
+                                                   self._process.pid, elapsed),
+                    stdout(),
+                    do_newline=True)
+                self.kill()
+                self.result.state = 'TIMEOUT'
+                self.result.num_failures += 1
+        return self._state
+
+    def kill(self):
+        if self._state == _RUNNING:
+            self._state = _KILLED
+            if self._spec.kill_handler:
+                self._spec.kill_handler(self)
+            self._process.terminate()
+
+    def suppress_failure_message(self):
+        self._suppress_failure_message = True
 
 
 class Jobset(object):
-  """Manages one run of jobs."""
+    """Manages one run of jobs."""
 
-  def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic, newline_on_success, travis,
-               stop_on_failure, add_env, quiet_success, max_time):
-    self._running = set()
-    self._check_cancelled = check_cancelled
-    self._cancelled = False
-    self._failures = 0
-    self._completed = 0
-    self._maxjobs = maxjobs
-    self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
-    self._newline_on_success = newline_on_success
-    self._travis = travis
-    self._stop_on_failure = stop_on_failure
-    self._add_env = add_env
-    self._quiet_success = quiet_success
-    self._max_time = max_time
-    self.resultset = {}
-    self._remaining = None
-    self._start_time = time.time()
+    def __init__(self, check_cancelled, maxjobs, maxjobs_cpu_agnostic,
+                 newline_on_success, travis, stop_on_failure, add_env,
+                 quiet_success, max_time):
+        self._running = set()
+        self._check_cancelled = check_cancelled
+        self._cancelled = False
+        self._failures = 0
+        self._completed = 0
+        self._maxjobs = maxjobs
+        self._maxjobs_cpu_agnostic = maxjobs_cpu_agnostic
+        self._newline_on_success = newline_on_success
+        self._travis = travis
+        self._stop_on_failure = stop_on_failure
+        self._add_env = add_env
+        self._quiet_success = quiet_success
+        self._max_time = max_time
+        self.resultset = {}
+        self._remaining = None
+        self._start_time = time.time()
 
-  def set_remaining(self, remaining):
-    self._remaining = remaining
+    def set_remaining(self, remaining):
+        self._remaining = remaining
 
-  def get_num_failures(self):
-    return self._failures
+    def get_num_failures(self):
+        return self._failures
 
-  def cpu_cost(self):
-    c = 0
-    for job in self._running:
-      c += job._spec.cpu_cost
-    return c
+    def cpu_cost(self):
+        c = 0
+        for job in self._running:
+            c += job._spec.cpu_cost
+        return c
 
-  def start(self, spec):
-    """Start a job. Return True on success, False on failure."""
-    while True:
-      if self._max_time > 0 and time.time() - self._start_time > self._max_time:
-        skipped_job_result = JobResult()
-        skipped_job_result.state = 'SKIPPED'
-        message('SKIPPED', spec.shortname, do_newline=True)
-        self.resultset[spec.shortname] = [skipped_job_result]
+    def start(self, spec):
+        """Start a job. Return True on success, False on failure."""
+        while True:
+            if self._max_time > 0 and time.time(
+            ) - self._start_time > self._max_time:
+                skipped_job_result = JobResult()
+                skipped_job_result.state = 'SKIPPED'
+                message('SKIPPED', spec.shortname, do_newline=True)
+                self.resultset[spec.shortname] = [skipped_job_result]
+                return True
+            if self.cancelled(): return False
+            current_cpu_cost = self.cpu_cost()
+            if current_cpu_cost == 0: break
+            if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
+                if len(self._running) < self._maxjobs_cpu_agnostic:
+                    break
+            self.reap(spec.shortname, spec.cpu_cost)
+        if self.cancelled(): return False
+        job = Job(spec, self._newline_on_success, self._travis, self._add_env,
+                  self._quiet_success)
+        self._running.add(job)
+        if job.GetSpec().shortname not in self.resultset:
+            self.resultset[job.GetSpec().shortname] = []
         return True
-      if self.cancelled(): return False
-      current_cpu_cost = self.cpu_cost()
-      if current_cpu_cost == 0: break
-      if current_cpu_cost + spec.cpu_cost <= self._maxjobs:
-        if len(self._running) < self._maxjobs_cpu_agnostic:
-          break
-      self.reap(spec.shortname, spec.cpu_cost)
-    if self.cancelled(): return False
-    job = Job(spec,
-              self._newline_on_success,
-              self._travis,
-              self._add_env,
-              self._quiet_success)
-    self._running.add(job)
-    if job.GetSpec().shortname not in self.resultset:
-      self.resultset[job.GetSpec().shortname] = []
-    return True
 
-  def reap(self, waiting_for=None, waiting_for_cost=None):
-    """Collect the dead jobs."""
-    while self._running:
-      dead = set()
-      for job in self._running:
-        st = eintr_be_gone(lambda: job.state())
-        if st == _RUNNING: continue
-        if st == _FAILURE or st == _KILLED:
-          self._failures += 1
-          if self._stop_on_failure:
-            self._cancelled = True
+    def reap(self, waiting_for=None, waiting_for_cost=None):
+        """Collect the dead jobs."""
+        while self._running:
+            dead = set()
             for job in self._running:
-              job.kill()
-        dead.add(job)
-        break
-      for job in dead:
-        self._completed += 1
-        if not self._quiet_success or job.result.state != 'PASSED':
-          self.resultset[job.GetSpec().shortname].append(job.result)
-        self._running.remove(job)
-      if dead: return
-      if not self._travis and platform_string() != 'windows':
-        rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
-        if self._remaining is not None and self._completed > 0:
-          now = time.time()
-          sofar = now - self._start_time
-          remaining = sofar / self._completed * (self._remaining + len(self._running))
-          rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
-        if waiting_for is not None:
-          wstr = ' next: %s @ %.2f cpu' % (waiting_for, waiting_for_cost)
-        else:
-          wstr = ''
-        message('WAITING', '%s%d jobs running, %d complete, %d failed (load %.2f)%s' % (
-            rstr, len(self._running), self._completed, self._failures, self.cpu_cost(), wstr))
-      if platform_string() == 'windows':
-        time.sleep(0.1)
-      else:
-        signal.alarm(10)
-        signal.pause()
+                st = eintr_be_gone(lambda: job.state())
+                if st == _RUNNING: continue
+                if st == _FAILURE or st == _KILLED:
+                    self._failures += 1
+                    if self._stop_on_failure:
+                        self._cancelled = True
+                        for job in self._running:
+                            job.kill()
+                dead.add(job)
+                break
+            for job in dead:
+                self._completed += 1
+                if not self._quiet_success or job.result.state != 'PASSED':
+                    self.resultset[job.GetSpec().shortname].append(job.result)
+                self._running.remove(job)
+            if dead: return
+            if not self._travis and platform_string() != 'windows':
+                rstr = '' if self._remaining is None else '%d queued, ' % self._remaining
+                if self._remaining is not None and self._completed > 0:
+                    now = time.time()
+                    sofar = now - self._start_time
+                    remaining = sofar / self._completed * (
+                        self._remaining + len(self._running))
+                    rstr = 'ETA %.1f sec; %s' % (remaining, rstr)
+                if waiting_for is not None:
+                    wstr = ' next: %s @ %.2f cpu' % (waiting_for,
+                                                     waiting_for_cost)
+                else:
+                    wstr = ''
+                message(
+                    'WAITING',
+                    '%s%d jobs running, %d complete, %d failed (load %.2f)%s' %
+                    (rstr, len(self._running), self._completed, self._failures,
+                     self.cpu_cost(), wstr))
+            if platform_string() == 'windows':
+                time.sleep(0.1)
+            else:
+                signal.alarm(10)
+                signal.pause()
 
-  def cancelled(self):
-    """Poll for cancellation."""
-    if self._cancelled: return True
-    if not self._check_cancelled(): return False
-    for job in self._running:
-      job.kill()
-    self._cancelled = True
-    return True
+    def cancelled(self):
+        """Poll for cancellation."""
+        if self._cancelled: return True
+        if not self._check_cancelled(): return False
+        for job in self._running:
+            job.kill()
+        self._cancelled = True
+        return True
 
-  def finish(self):
-    while self._running:
-      if self.cancelled(): pass  # poll cancellation
-      self.reap()
-    if platform_string() != 'windows':
-      signal.alarm(0)
-    return not self.cancelled() and self._failures == 0
+    def finish(self):
+        while self._running:
+            if self.cancelled(): pass  # poll cancellation
+            self.reap()
+        if platform_string() != 'windows':
+            signal.alarm(0)
+        return not self.cancelled() and self._failures == 0
 
 
 def _never_cancelled():
-  return False
+    return False
 
 
 def tag_remaining(xs):
-  staging = []
-  for x in xs:
-    staging.append(x)
-    if len(staging) > 5000:
-      yield (staging.pop(0), None)
-  n = len(staging)
-  for i, x in enumerate(staging):
-    yield (x, n - i - 1)
+    staging = []
+    for x in xs:
+        staging.append(x)
+        if len(staging) > 5000:
+            yield (staging.pop(0), None)
+    n = len(staging)
+    for i, x in enumerate(staging):
+        yield (x, n - i - 1)
 
 
 def run(cmdlines,
@@ -511,23 +550,23 @@
         skip_jobs=False,
         quiet_success=False,
         max_time=-1):
-  if skip_jobs:
-    resultset = {}
-    skipped_job_result = JobResult()
-    skipped_job_result.state = 'SKIPPED'
-    for job in cmdlines:
-      message('SKIPPED', job.shortname, do_newline=True)
-      resultset[job.shortname] = [skipped_job_result]
-    return 0, resultset
-  js = Jobset(check_cancelled,
-              maxjobs if maxjobs is not None else _DEFAULT_MAX_JOBS,
-              maxjobs_cpu_agnostic if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
-              newline_on_success, travis, stop_on_failure, add_env,
-              quiet_success, max_time)
-  for cmdline, remaining in tag_remaining(cmdlines):
-    if not js.start(cmdline):
-      break
-    if remaining is not None:
-      js.set_remaining(remaining)
-  js.finish()
-  return js.get_num_failures(), js.resultset
+    if skip_jobs:
+        resultset = {}
+        skipped_job_result = JobResult()
+        skipped_job_result.state = 'SKIPPED'
+        for job in cmdlines:
+            message('SKIPPED', job.shortname, do_newline=True)
+            resultset[job.shortname] = [skipped_job_result]
+        return 0, resultset
+    js = Jobset(check_cancelled, maxjobs if maxjobs is not None else
+                _DEFAULT_MAX_JOBS, maxjobs_cpu_agnostic
+                if maxjobs_cpu_agnostic is not None else _DEFAULT_MAX_JOBS,
+                newline_on_success, travis, stop_on_failure, add_env,
+                quiet_success, max_time)
+    for cmdline, remaining in tag_remaining(cmdlines):
+        if not js.start(cmdline):
+            break
+        if remaining is not None:
+            js.set_remaining(remaining)
+    js.finish()
+    return js.get_num_failures(), js.resultset
diff --git a/tools/run_tests/python_utils/port_server.py b/tools/run_tests/python_utils/port_server.py
index 4fb5ca0..83e09c0 100755
--- a/tools/run_tests/python_utils/port_server.py
+++ b/tools/run_tests/python_utils/port_server.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Manage TCP ports for unit tests; started by run_tests.py"""
 
 import argparse
@@ -27,17 +26,14 @@
 import threading
 import platform
 
-
 # increment this number whenever making a change to ensure that
 # the changes are picked up by running CI servers
 # note that all changes must be backwards compatible
 _MY_VERSION = 20
 
-
 if len(sys.argv) == 2 and sys.argv[1] == 'dump_version':
-  print _MY_VERSION
-  sys.exit(0)
-
+    print _MY_VERSION
+    sys.exit(0)
 
 argp = argparse.ArgumentParser(description='Server for httpcli_test')
 argp.add_argument('-p', '--port', default=12345, type=int)
@@ -45,11 +41,11 @@
 args = argp.parse_args()
 
 if args.logfile is not None:
-  sys.stdin.close()
-  sys.stderr.close()
-  sys.stdout.close()
-  sys.stderr = open(args.logfile, 'w')
-  sys.stdout = sys.stderr
+    sys.stdin.close()
+    sys.stderr.close()
+    sys.stdout.close()
+    sys.stderr = open(args.logfile, 'w')
+    sys.stdout = sys.stderr
 
 print 'port server running on port %d' % args.port
 
@@ -57,67 +53,85 @@
 in_use = {}
 mu = threading.Lock()
 
+# Cronet restricts the following ports to be used (see
+# https://cs.chromium.org/chromium/src/net/base/port_util.cc). When one of these
+# ports is used in a Cronet test, the test would fail (see issue #12149). These
+# ports must be excluded from pool.
+cronet_restricted_ports = [
+    1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87,
+    95, 101, 102, 103, 104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139,
+    143, 179, 389, 465, 512, 513, 514, 515, 526, 530, 531, 532, 540, 556, 563,
+    587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667, 6668,
+    6669, 6697
+]
+
+
 def can_connect(port):
-  # this test is only really useful on unices where SO_REUSE_PORT is available
-  # so on Windows, where this test is expensive, skip it
-  if platform.system() == 'Windows': return False
-  s = socket.socket()
-  try:
-    s.connect(('localhost', port))
-    return True
-  except socket.error, e:
-    return False
-  finally:
-    s.close()
+    # this test is only really useful on unices where SO_REUSE_PORT is available
+    # so on Windows, where this test is expensive, skip it
+    if platform.system() == 'Windows': return False
+    s = socket.socket()
+    try:
+        s.connect(('localhost', port))
+        return True
+    except socket.error, e:
+        return False
+    finally:
+        s.close()
+
 
 def can_bind(port, proto):
-  s = socket.socket(proto, socket.SOCK_STREAM)
-  s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
-  try:
-    s.bind(('localhost', port))
-    return True
-  except socket.error, e:
-    return False
-  finally:
-    s.close()
+    s = socket.socket(proto, socket.SOCK_STREAM)
+    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+    try:
+        s.bind(('localhost', port))
+        return True
+    except socket.error, e:
+        return False
+    finally:
+        s.close()
 
 
 def refill_pool(max_timeout, req):
-  """Scan for ports not marked for being in use"""
-  chk = list(range(1025, 32766))
-  random.shuffle(chk)
-  for i in chk:
-    if len(pool) > 100: break
-    if i in in_use:
-      age = time.time() - in_use[i]
-      if age < max_timeout:
-        continue
-      req.log_message("kill old request %d" % i)
-      del in_use[i]
-    if can_bind(i, socket.AF_INET) and can_bind(i, socket.AF_INET6) and not can_connect(i):
-      req.log_message("found available port %d" % i)
-      pool.append(i)
+    """Scan for ports not marked for being in use"""
+    chk = [
+        port for port in list(range(1025, 32766))
+        if port not in cronet_restricted_ports
+    ]
+    random.shuffle(chk)
+    for i in chk:
+        if len(pool) > 100: break
+        if i in in_use:
+            age = time.time() - in_use[i]
+            if age < max_timeout:
+                continue
+            req.log_message("kill old request %d" % i)
+            del in_use[i]
+        if can_bind(i, socket.AF_INET) and can_bind(
+                i, socket.AF_INET6) and not can_connect(i):
+            req.log_message("found available port %d" % i)
+            pool.append(i)
 
 
 def allocate_port(req):
-  global pool
-  global in_use
-  global mu
-  mu.acquire()
-  max_timeout = 600
-  while not pool:
-    refill_pool(max_timeout, req)
-    if not pool:
-      req.log_message("failed to find ports: retrying soon")
-      mu.release()
-      time.sleep(1)
-      mu.acquire()
-      max_timeout /= 2
-  port = pool[0]
-  pool = pool[1:]
-  in_use[port] = time.time()
-  mu.release()
-  return port
+    global pool
+    global in_use
+    global mu
+    mu.acquire()
+    max_timeout = 600
+    while not pool:
+        refill_pool(max_timeout, req)
+        if not pool:
+            req.log_message("failed to find ports: retrying soon")
+            mu.release()
+            time.sleep(1)
+            mu.acquire()
+            max_timeout /= 2
+    port = pool[0]
+    pool = pool[1:]
+    in_use[port] = time.time()
+    mu.release()
+    return port
 
 
 keep_running = True
@@ -125,61 +139,68 @@
 
 class Handler(BaseHTTPRequestHandler):
 
-  def setup(self):
-    # If the client is unreachable for 5 seconds, close the connection
-    self.timeout = 5
-    BaseHTTPRequestHandler.setup(self)
+    def setup(self):
+        # If the client is unreachable for 5 seconds, close the connection
+        self.timeout = 5
+        BaseHTTPRequestHandler.setup(self)
 
-  def do_GET(self):
-    global keep_running
-    global mu
-    if self.path == '/get':
-      # allocate a new port, it will stay bound for ten minutes and until
-      # it's unused
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      p = allocate_port(self)
-      self.log_message('allocated port %d' % p)
-      self.wfile.write('%d' % p)
-    elif self.path[0:6] == '/drop/':
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      p = int(self.path[6:])
-      mu.acquire()
-      if p in in_use:
-        del in_use[p]
-        pool.append(p)
-        k = 'known'
-      else:
-        k = 'unknown'
-      mu.release()
-      self.log_message('drop %s port %d' % (k, p))
-    elif self.path == '/version_number':
-      # fetch a version string and the current process pid
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      self.wfile.write(_MY_VERSION)
-    elif self.path == '/dump':
-      # yaml module is not installed on Macs and Windows machines by default
-      # so we import it lazily (/dump action is only used for debugging)
-      import yaml
-      self.send_response(200)
-      self.send_header('Content-Type', 'text/plain')
-      self.end_headers()
-      mu.acquire()
-      now = time.time()
-      out = yaml.dump({'pool': pool, 'in_use': dict((k, now - v) for k, v in in_use.items())})
-      mu.release()
-      self.wfile.write(out)
-    elif self.path == '/quitquitquit':
-      self.send_response(200)
-      self.end_headers()
-      self.server.shutdown()
+    def do_GET(self):
+        global keep_running
+        global mu
+        if self.path == '/get':
+            # allocate a new port, it will stay bound for ten minutes and until
+            # it's unused
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            p = allocate_port(self)
+            self.log_message('allocated port %d' % p)
+            self.wfile.write('%d' % p)
+        elif self.path[0:6] == '/drop/':
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            p = int(self.path[6:])
+            mu.acquire()
+            if p in in_use:
+                del in_use[p]
+                pool.append(p)
+                k = 'known'
+            else:
+                k = 'unknown'
+            mu.release()
+            self.log_message('drop %s port %d' % (k, p))
+        elif self.path == '/version_number':
+            # fetch a version string and the current process pid
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            self.wfile.write(_MY_VERSION)
+        elif self.path == '/dump':
+            # yaml module is not installed on Macs and Windows machines by default
+            # so we import it lazily (/dump action is only used for debugging)
+            import yaml
+            self.send_response(200)
+            self.send_header('Content-Type', 'text/plain')
+            self.end_headers()
+            mu.acquire()
+            now = time.time()
+            out = yaml.dump({
+                'pool':
+                pool,
+                'in_use':
+                dict((k, now - v) for k, v in in_use.items())
+            })
+            mu.release()
+            self.wfile.write(out)
+        elif self.path == '/quitquitquit':
+            self.send_response(200)
+            self.end_headers()
+            self.server.shutdown()
+
 
 class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
-  """Handle requests in a separate thread"""
+    """Handle requests in a separate thread"""
+
 
 ThreadedHTTPServer(('', args.port), Handler).serve_forever()
diff --git a/tools/run_tests/python_utils/report_utils.py b/tools/run_tests/python_utils/report_utils.py
index a386780..e4fddb8 100644
--- a/tools/run_tests/python_utils/report_utils.py
+++ b/tools/run_tests/python_utils/report_utils.py
@@ -11,17 +11,16 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Generate XML and HTML test reports."""
 
 from __future__ import print_function
 
 try:
-  from mako.runtime import Context
-  from mako.template import Template
-  from mako import exceptions
+    from mako.runtime import Context
+    from mako.template import Template
+    from mako import exceptions
 except (ImportError):
-  pass  # Mako not installed but it is ok.
+    pass  # Mako not installed but it is ok.
 import datetime
 import os
 import string
@@ -30,111 +29,127 @@
 
 
 def _filter_msg(msg, output_format):
-  """Filters out nonprintable and illegal characters from the message."""
-  if output_format in ['XML', 'HTML']:
-    # keep whitespaces but remove formfeed and vertical tab characters
-    # that make XML report unparseable.
-    filtered_msg = filter(
-        lambda x: x in string.printable and x != '\f' and x != '\v',
-        msg.decode('UTF-8', 'ignore'))
-    if output_format == 'HTML':
-      filtered_msg = filtered_msg.replace('"', '&quot;')
-    return filtered_msg
-  else:
-    return msg
+    """Filters out nonprintable and illegal characters from the message."""
+    if output_format in ['XML', 'HTML']:
+        # keep whitespaces but remove formfeed and vertical tab characters
+        # that make XML report unparseable.
+        filtered_msg = filter(
+            lambda x: x in string.printable and x != '\f' and x != '\v',
+            msg.decode('UTF-8', 'ignore'))
+        if output_format == 'HTML':
+            filtered_msg = filtered_msg.replace('"', '&quot;')
+        return filtered_msg
+    else:
+        return msg
 
 
 def new_junit_xml_tree():
-  return ET.ElementTree(ET.Element('testsuites'))
+    return ET.ElementTree(ET.Element('testsuites'))
 
-def render_junit_xml_report(resultset, report_file, suite_package='grpc',
+
+def render_junit_xml_report(resultset,
+                            report_file,
+                            suite_package='grpc',
                             suite_name='tests'):
-  """Generate JUnit-like XML report."""
-  tree = new_junit_xml_tree()
-  append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
-  create_xml_report_file(tree, report_file)
+    """Generate JUnit-like XML report."""
+    tree = new_junit_xml_tree()
+    append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
+    create_xml_report_file(tree, report_file)
+
 
 def create_xml_report_file(tree, report_file):
-  """Generate JUnit-like report file from xml tree ."""
-  # ensure the report directory exists
-  report_dir = os.path.dirname(os.path.abspath(report_file))
-  if not os.path.exists(report_dir):
-    os.makedirs(report_dir)
-  tree.write(report_file, encoding='UTF-8')
+    """Generate JUnit-like report file from xml tree ."""
+    # ensure the report directory exists
+    report_dir = os.path.dirname(os.path.abspath(report_file))
+    if not os.path.exists(report_dir):
+        os.makedirs(report_dir)
+    tree.write(report_file, encoding='UTF-8')
+
 
 def append_junit_xml_results(tree, resultset, suite_package, suite_name, id):
-  """Append a JUnit-like XML report tree with test results as a new suite."""
-  testsuite = ET.SubElement(tree.getroot(), 'testsuite',
-                            id=id, package=suite_package, name=suite_name,
-                            timestamp=datetime.datetime.now().isoformat())
-  failure_count  = 0
-  error_count = 0
-  for shortname, results in six.iteritems(resultset):
-    for result in results:
-      xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
-      if result.elapsed_time:
-        xml_test.set('time', str(result.elapsed_time))
-      filtered_msg =  _filter_msg(result.message, 'XML')
-      if result.state == 'FAILED':
-        ET.SubElement(xml_test, 'failure', message='Failure').text = filtered_msg
-        failure_count += 1
-      elif result.state == 'TIMEOUT':
-        ET.SubElement(xml_test, 'error', message='Timeout').text = filtered_msg
-        error_count += 1
-      elif result.state == 'SKIPPED':
-        ET.SubElement(xml_test, 'skipped', message='Skipped')
-  testsuite.set('failures', str(failure_count))
-  testsuite.set('errors', str(error_count))
+    """Append a JUnit-like XML report tree with test results as a new suite."""
+    testsuite = ET.SubElement(
+        tree.getroot(),
+        'testsuite',
+        id=id,
+        package=suite_package,
+        name=suite_name,
+        timestamp=datetime.datetime.now().isoformat())
+    failure_count = 0
+    error_count = 0
+    for shortname, results in six.iteritems(resultset):
+        for result in results:
+            xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
+            if result.elapsed_time:
+                xml_test.set('time', str(result.elapsed_time))
+            filtered_msg = _filter_msg(result.message, 'XML')
+            if result.state == 'FAILED':
+                ET.SubElement(
+                    xml_test, 'failure', message='Failure').text = filtered_msg
+                failure_count += 1
+            elif result.state == 'TIMEOUT':
+                ET.SubElement(
+                    xml_test, 'error', message='Timeout').text = filtered_msg
+                error_count += 1
+            elif result.state == 'SKIPPED':
+                ET.SubElement(xml_test, 'skipped', message='Skipped')
+    testsuite.set('failures', str(failure_count))
+    testsuite.set('errors', str(error_count))
 
-def render_interop_html_report(
-  client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
-  http2_server_cases, resultset,
-  num_failures, cloud_to_prod, prod_servers, http2_interop):
-  """Generate HTML report for interop tests."""
-  template_file = 'tools/run_tests/interop/interop_html_report.template'
-  try:
-    mytemplate = Template(filename=template_file, format_exceptions=True)
-  except NameError:
-    print('Mako template is not installed. Skipping HTML report generation.')
-    return
-  except IOError as e:
-    print('Failed to find the template %s: %s' % (template_file, e))
-    return
 
-  sorted_test_cases = sorted(test_cases)
-  sorted_auth_test_cases = sorted(auth_test_cases)
-  sorted_http2_cases = sorted(http2_cases)
-  sorted_http2_server_cases = sorted(http2_server_cases)
-  sorted_client_langs = sorted(client_langs)
-  sorted_server_langs = sorted(server_langs)
-  sorted_prod_servers = sorted(prod_servers)
+def render_interop_html_report(client_langs, server_langs, test_cases,
+                               auth_test_cases, http2_cases, http2_server_cases,
+                               resultset, num_failures, cloud_to_prod,
+                               prod_servers, http2_interop):
+    """Generate HTML report for interop tests."""
+    template_file = 'tools/run_tests/interop/interop_html_report.template'
+    try:
+        mytemplate = Template(filename=template_file, format_exceptions=True)
+    except NameError:
+        print(
+            'Mako template is not installed. Skipping HTML report generation.')
+        return
+    except IOError as e:
+        print('Failed to find the template %s: %s' % (template_file, e))
+        return
 
-  args = {'client_langs': sorted_client_langs,
-          'server_langs': sorted_server_langs,
-          'test_cases': sorted_test_cases,
-          'auth_test_cases': sorted_auth_test_cases,
-          'http2_cases': sorted_http2_cases,
-          'http2_server_cases': sorted_http2_server_cases,
-          'resultset': resultset,
-          'num_failures': num_failures,
-          'cloud_to_prod': cloud_to_prod,
-          'prod_servers': sorted_prod_servers,
-          'http2_interop': http2_interop}
+    sorted_test_cases = sorted(test_cases)
+    sorted_auth_test_cases = sorted(auth_test_cases)
+    sorted_http2_cases = sorted(http2_cases)
+    sorted_http2_server_cases = sorted(http2_server_cases)
+    sorted_client_langs = sorted(client_langs)
+    sorted_server_langs = sorted(server_langs)
+    sorted_prod_servers = sorted(prod_servers)
 
-  html_report_out_dir = 'reports'
-  if not os.path.exists(html_report_out_dir):
-    os.mkdir(html_report_out_dir)
-  html_file_path = os.path.join(html_report_out_dir, 'index.html')
-  try:
-    with open(html_file_path, 'w') as output_file:
-      mytemplate.render_context(Context(output_file, **args))
-  except:
-    print(exceptions.text_error_template().render())
-    raise
+    args = {
+        'client_langs': sorted_client_langs,
+        'server_langs': sorted_server_langs,
+        'test_cases': sorted_test_cases,
+        'auth_test_cases': sorted_auth_test_cases,
+        'http2_cases': sorted_http2_cases,
+        'http2_server_cases': sorted_http2_server_cases,
+        'resultset': resultset,
+        'num_failures': num_failures,
+        'cloud_to_prod': cloud_to_prod,
+        'prod_servers': sorted_prod_servers,
+        'http2_interop': http2_interop
+    }
+
+    html_report_out_dir = 'reports'
+    if not os.path.exists(html_report_out_dir):
+        os.mkdir(html_report_out_dir)
+    html_file_path = os.path.join(html_report_out_dir, 'index.html')
+    try:
+        with open(html_file_path, 'w') as output_file:
+            mytemplate.render_context(Context(output_file, **args))
+    except:
+        print(exceptions.text_error_template().render())
+        raise
+
 
 def render_perf_profiling_results(output_filepath, profile_names):
-  with open(output_filepath, 'w') as output_file:
-    output_file.write('<ul>\n')
-    for name in profile_names:
-      output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
-    output_file.write('</ul>\n')
+    with open(output_filepath, 'w') as output_file:
+        output_file.write('<ul>\n')
+        for name in profile_names:
+            output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
+        output_file.write('</ul>\n')
diff --git a/tools/run_tests/python_utils/start_port_server.py b/tools/run_tests/python_utils/start_port_server.py
index 786103c..37995ac 100644
--- a/tools/run_tests/python_utils/start_port_server.py
+++ b/tools/run_tests/python_utils/start_port_server.py
@@ -22,10 +22,10 @@
 import tempfile
 import time
 
-
 # must be synchronized with test/core/utils/port_server_client.h
 _PORT_SERVER_PORT = 32766
 
+
 def start_port_server():
     # check if a compatible port server is running
     # if incompatible (version mismatch) ==> start a new one
@@ -33,9 +33,8 @@
     # otherwise, leave it up
     try:
         version = int(
-            urllib.urlopen(
-                'http://localhost:%d/version_number' %
-                _PORT_SERVER_PORT).read())
+            urllib.urlopen('http://localhost:%d/version_number' %
+                           _PORT_SERVER_PORT).read())
         logging.info('detected port server running version %d', version)
         running = True
     except Exception as e:
@@ -44,16 +43,16 @@
     if running:
         current_version = int(
             subprocess.check_output([
-                sys.executable, os.path.abspath(
-                    'tools/run_tests/python_utils/port_server.py'),
+                sys.executable,
+                os.path.abspath('tools/run_tests/python_utils/port_server.py'),
                 'dump_version'
             ]))
         logging.info('my port server is version %d', current_version)
         running = (version >= current_version)
         if not running:
             logging.info('port_server version mismatch: killing the old one')
-            urllib.urlopen('http://localhost:%d/quitquitquit' %
-                           _PORT_SERVER_PORT).read()
+            urllib.urlopen(
+                'http://localhost:%d/quitquitquit' % _PORT_SERVER_PORT).read()
             time.sleep(1)
     if not running:
         fd, logfile = tempfile.mkstemp()
@@ -62,7 +61,8 @@
         args = [
             sys.executable,
             os.path.abspath('tools/run_tests/python_utils/port_server.py'),
-            '-p', '%d' % _PORT_SERVER_PORT, '-l', logfile
+            '-p',
+            '%d' % _PORT_SERVER_PORT, '-l', logfile
         ]
         env = dict(os.environ)
         env['BUILD_ID'] = 'pleaseDontKillMeJenkins'
diff --git a/tools/run_tests/python_utils/upload_test_results.py b/tools/run_tests/python_utils/upload_test_results.py
index ea97bc0..a2dd1c6 100644
--- a/tools/run_tests/python_utils/upload_test_results.py
+++ b/tools/run_tests/python_utils/upload_test_results.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Helper to upload Jenkins test results to BQ"""
 
 from __future__ import print_function
@@ -23,8 +22,8 @@
 import time
 import uuid
 
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
@@ -35,55 +34,60 @@
 _PARTITION_TYPE = 'DAY'
 _PROJECT_ID = 'grpc-testing'
 _RESULTS_SCHEMA = [
-  ('job_name', 'STRING', 'Name of Jenkins job'),
-  ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
-  ('build_url', 'STRING', 'URL of Jenkins job'),
-  ('test_name', 'STRING', 'Individual test name'),
-  ('language', 'STRING', 'Language of test'),
-  ('platform', 'STRING', 'Platform used for test'),
-  ('config', 'STRING', 'Config used for test'),
-  ('compiler', 'STRING', 'Compiler used for test'),
-  ('iomgr_platform', 'STRING', 'Iomgr used for test'),
-  ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
-  ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
-  ('elapsed_time', 'FLOAT', 'How long test took to run'),
-  ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
-  ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
-  ('return_code', 'INTEGER', 'Exit code of test'),
+    ('job_name', 'STRING', 'Name of Jenkins job'),
+    ('build_id', 'INTEGER', 'Build ID of Jenkins job'),
+    ('build_url', 'STRING', 'URL of Jenkins job'),
+    ('test_name', 'STRING', 'Individual test name'),
+    ('language', 'STRING', 'Language of test'),
+    ('platform', 'STRING', 'Platform used for test'),
+    ('config', 'STRING', 'Config used for test'),
+    ('compiler', 'STRING', 'Compiler used for test'),
+    ('iomgr_platform', 'STRING', 'Iomgr used for test'),
+    ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+    ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+    ('elapsed_time', 'FLOAT', 'How long test took to run'),
+    ('cpu_estimated', 'FLOAT', 'Estimated CPU usage of test'),
+    ('cpu_measured', 'FLOAT', 'Actual CPU usage of test'),
+    ('return_code', 'INTEGER', 'Exit code of test'),
 ]
 _INTEROP_RESULTS_SCHEMA = [
-  ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
-  ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
-  ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
-  ('test_name', 'STRING', 'Unique test name combining client, server, and test_name'),
-  ('suite', 'STRING', 'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
-  ('client', 'STRING', 'Client language'),
-  ('server', 'STRING', 'Server host name'),
-  ('test_case', 'STRING', 'Name of test case'),
-  ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
-  ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
-  ('elapsed_time', 'FLOAT', 'How long test took to run'),
+    ('job_name', 'STRING', 'Name of Jenkins/Kokoro job'),
+    ('build_id', 'INTEGER', 'Build ID of Jenkins/Kokoro job'),
+    ('build_url', 'STRING', 'URL of Jenkins/Kokoro job'),
+    ('test_name', 'STRING',
+     'Unique test name combining client, server, and test_name'),
+    ('suite', 'STRING',
+     'Test suite: cloud_to_cloud, cloud_to_prod, or cloud_to_prod_auth'),
+    ('client', 'STRING', 'Client language'),
+    ('server', 'STRING', 'Server host name'),
+    ('test_case', 'STRING', 'Name of test case'),
+    ('result', 'STRING', 'Test result: PASSED, TIMEOUT, FAILED, or SKIPPED'),
+    ('timestamp', 'TIMESTAMP', 'Timestamp of test run'),
+    ('elapsed_time', 'FLOAT', 'How long test took to run'),
 ]
 
 
 def _get_build_metadata(test_results):
-  """Add Jenkins/Kokoro build metadata to test_results based on environment
+    """Add Jenkins/Kokoro build metadata to test_results based on environment
   variables set by Jenkins/Kokoro.
   """
-  build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
-  build_url = os.getenv('BUILD_URL') or os.getenv('KOKORO_BUILD_URL')
-  job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
+    build_id = os.getenv('BUILD_ID') or os.getenv('KOKORO_BUILD_NUMBER')
+    build_url = os.getenv('BUILD_URL')
+    if os.getenv('KOKORO_BUILD_ID'):
+        build_url = 'https://sponge.corp.google.com/invocation?id=%s' % os.getenv(
+            'KOKORO_BUILD_ID')
+    job_name = os.getenv('JOB_BASE_NAME') or os.getenv('KOKORO_JOB_NAME')
 
-  if build_id:
-    test_results['build_id'] = build_id
-  if build_url:
-    test_results['build_url'] = build_url
-  if job_name:
-    test_results['job_name'] = job_name
+    if build_id:
+        test_results['build_id'] = build_id
+    if build_url:
+        test_results['build_url'] = build_url
+    if job_name:
+        test_results['job_name'] = job_name
 
 
 def upload_results_to_bq(resultset, bq_table, args, platform):
-  """Upload test results to a BQ table.
+    """Upload test results to a BQ table.
 
   Args:
       resultset: dictionary generated by jobset.run
@@ -91,77 +95,97 @@
       args: args in run_tests.py, generated by argparse
       platform: string name of platform tests were run on
   """
-  bq = big_query_utils.create_big_query()
-  big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _RESULTS_SCHEMA, _DESCRIPTION,
-                                           partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
+    bq = big_query_utils.create_big_query()
+    big_query_utils.create_partitioned_table(
+        bq,
+        _PROJECT_ID,
+        _DATASET_ID,
+        bq_table,
+        _RESULTS_SCHEMA,
+        _DESCRIPTION,
+        partition_type=_PARTITION_TYPE,
+        expiration_ms=_EXPIRATION_MS)
 
-  for shortname, results in six.iteritems(resultset):
-    for result in results:
-      test_results = {}
-      _get_build_metadata(test_results)
-      test_results['compiler'] = args.compiler
-      test_results['config'] = args.config
-      test_results['cpu_estimated'] = result.cpu_estimated
-      test_results['cpu_measured'] = result.cpu_measured
-      test_results['elapsed_time'] = '%.2f' % result.elapsed_time
-      test_results['iomgr_platform'] = args.iomgr_platform
-      # args.language is a list, but will always have one element in the contexts
-      # this function is used.
-      test_results['language'] = args.language[0]
-      test_results['platform'] = platform
-      test_results['result'] = result.state
-      test_results['return_code'] = result.returncode
-      test_results['test_name'] = shortname
-      test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+    for shortname, results in six.iteritems(resultset):
+        for result in results:
+            test_results = {}
+            _get_build_metadata(test_results)
+            test_results['compiler'] = args.compiler
+            test_results['config'] = args.config
+            test_results['cpu_estimated'] = result.cpu_estimated
+            test_results['cpu_measured'] = result.cpu_measured
+            test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+            test_results['iomgr_platform'] = args.iomgr_platform
+            # args.language is a list, but will always have one element in the contexts
+            # this function is used.
+            test_results['language'] = args.language[0]
+            test_results['platform'] = platform
+            test_results['result'] = result.state
+            test_results['return_code'] = result.returncode
+            test_results['test_name'] = shortname
+            test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
 
-      row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+            row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
 
-      # TODO(jtattermusch): rows are inserted one by one, very inefficient
-      max_retries = 3
-      for attempt in range(max_retries):
-        if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
-          break
-        else:
-          if attempt < max_retries - 1:
-            print('Error uploading result to bigquery, will retry.')
-          else:
-            print('Error uploading result to bigquery, all attempts failed.')
-            sys.exit(1)
+            # TODO(jtattermusch): rows are inserted one by one, very inefficient
+            max_retries = 3
+            for attempt in range(max_retries):
+                if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+                                               bq_table, [row]):
+                    break
+                else:
+                    if attempt < max_retries - 1:
+                        print('Error uploading result to bigquery, will retry.')
+                    else:
+                        print(
+                            'Error uploading result to bigquery, all attempts failed.'
+                        )
+                        sys.exit(1)
 
 
 def upload_interop_results_to_bq(resultset, bq_table, args):
-  """Upload interop test results to a BQ table.
+    """Upload interop test results to a BQ table.
 
   Args:
       resultset: dictionary generated by jobset.run
       bq_table: string name of table to create/upload results to in BQ
       args: args in run_interop_tests.py, generated by argparse
   """
-  bq = big_query_utils.create_big_query()
-  big_query_utils.create_partitioned_table(bq, _PROJECT_ID, _DATASET_ID, bq_table, _INTEROP_RESULTS_SCHEMA, _DESCRIPTION,
-                                           partition_type=_PARTITION_TYPE, expiration_ms= _EXPIRATION_MS)
+    bq = big_query_utils.create_big_query()
+    big_query_utils.create_partitioned_table(
+        bq,
+        _PROJECT_ID,
+        _DATASET_ID,
+        bq_table,
+        _INTEROP_RESULTS_SCHEMA,
+        _DESCRIPTION,
+        partition_type=_PARTITION_TYPE,
+        expiration_ms=_EXPIRATION_MS)
 
-  for shortname, results in six.iteritems(resultset):
-    for result in results:
-      test_results = {}
-      _get_build_metadata(test_results)
-      test_results['elapsed_time'] = '%.2f' % result.elapsed_time
-      test_results['result'] = result.state
-      test_results['test_name'] = shortname
-      test_results['suite'] = shortname.split(':')[0]
-      test_results['client'] = shortname.split(':')[1]
-      test_results['server'] = shortname.split(':')[2]
-      test_results['test_case'] = shortname.split(':')[3]
-      test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
-      row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
-      # TODO(jtattermusch): rows are inserted one by one, very inefficient
-      max_retries = 3
-      for attempt in range(max_retries):
-        if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, bq_table, [row]):
-          break
-        else:
-          if attempt < max_retries - 1:
-            print('Error uploading result to bigquery, will retry.')
-          else:
-            print('Error uploading result to bigquery, all attempts failed.')
-            sys.exit(1)
+    for shortname, results in six.iteritems(resultset):
+        for result in results:
+            test_results = {}
+            _get_build_metadata(test_results)
+            test_results['elapsed_time'] = '%.2f' % result.elapsed_time
+            test_results['result'] = result.state
+            test_results['test_name'] = shortname
+            test_results['suite'] = shortname.split(':')[0]
+            test_results['client'] = shortname.split(':')[1]
+            test_results['server'] = shortname.split(':')[2]
+            test_results['test_case'] = shortname.split(':')[3]
+            test_results['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S')
+            row = big_query_utils.make_row(str(uuid.uuid4()), test_results)
+            # TODO(jtattermusch): rows are inserted one by one, very inefficient
+            max_retries = 3
+            for attempt in range(max_retries):
+                if big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+                                               bq_table, [row]):
+                    break
+                else:
+                    if attempt < max_retries - 1:
+                        print('Error uploading result to bigquery, will retry.')
+                    else:
+                        print(
+                            'Error uploading result to bigquery, all attempts failed.'
+                        )
+                        sys.exit(1)
diff --git a/tools/run_tests/python_utils/watch_dirs.py b/tools/run_tests/python_utils/watch_dirs.py
index 7bd085e..d2ad303 100755
--- a/tools/run_tests/python_utils/watch_dirs.py
+++ b/tools/run_tests/python_utils/watch_dirs.py
@@ -11,7 +11,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Helper to watch a (set) of directories for modifications."""
 
 import os
@@ -19,42 +18,42 @@
 
 
 class DirWatcher(object):
-  """Helper to watch a (set) of directories for modifications."""
+    """Helper to watch a (set) of directories for modifications."""
 
-  def __init__(self, paths):
-    if isinstance(paths, basestring):
-      paths = [paths]
-    self._done = False
-    self.paths = list(paths)
-    self.lastrun = time.time()
-    self._cache = self._calculate()
+    def __init__(self, paths):
+        if isinstance(paths, basestring):
+            paths = [paths]
+        self._done = False
+        self.paths = list(paths)
+        self.lastrun = time.time()
+        self._cache = self._calculate()
 
-  def _calculate(self):
-    """Walk over all subscribed paths, check most recent mtime."""
-    most_recent_change = None
-    for path in self.paths:
-      if not os.path.exists(path):
-        continue
-      if not os.path.isdir(path):
-        continue
-      for root, _, files in os.walk(path):
-        for f in files:
-          if f and f[0] == '.': continue
-          try:
-            st = os.stat(os.path.join(root, f))
-          except OSError as e:
-            if e.errno == os.errno.ENOENT:
-              continue
-            raise
-          if most_recent_change is None:
-            most_recent_change = st.st_mtime
-          else:
-            most_recent_change = max(most_recent_change, st.st_mtime)
-    return most_recent_change
+    def _calculate(self):
+        """Walk over all subscribed paths, check most recent mtime."""
+        most_recent_change = None
+        for path in self.paths:
+            if not os.path.exists(path):
+                continue
+            if not os.path.isdir(path):
+                continue
+            for root, _, files in os.walk(path):
+                for f in files:
+                    if f and f[0] == '.': continue
+                    try:
+                        st = os.stat(os.path.join(root, f))
+                    except OSError as e:
+                        if e.errno == os.errno.ENOENT:
+                            continue
+                        raise
+                    if most_recent_change is None:
+                        most_recent_change = st.st_mtime
+                    else:
+                        most_recent_change = max(most_recent_change,
+                                                 st.st_mtime)
+        return most_recent_change
 
-  def most_recent_change(self):
-    if time.time() - self.lastrun > 1:
-      self._cache = self._calculate()
-      self.lastrun = time.time()
-    return self._cache
-
+    def most_recent_change(self):
+        if time.time() - self.lastrun > 1:
+            self._cache = self._calculate()
+            self.lastrun = time.time()
+        return self._cache
diff --git a/tools/run_tests/run_build_statistics.py b/tools/run_tests/run_build_statistics.py
index 1e957b6..4055332 100755
--- a/tools/run_tests/run_build_statistics.py
+++ b/tools/run_tests/run_build_statistics.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Tool to get build statistics from Jenkins and upload to BigQuery."""
 
 from __future__ import print_function
@@ -27,43 +26,42 @@
 import sys
 import urllib
 
-
-gcp_utils_dir = os.path.abspath(os.path.join(
-    os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 import big_query_utils
 
-
 _PROJECT_ID = 'grpc-testing'
 _HAS_MATRIX = True
-_BUILDS = {'gRPC_interop_master': not _HAS_MATRIX,
-           'gRPC_master_linux': not _HAS_MATRIX,
-           'gRPC_master_macos': not _HAS_MATRIX,
-           'gRPC_master_windows': not _HAS_MATRIX,
-           'gRPC_performance_master': not _HAS_MATRIX,
-           'gRPC_portability_master_linux': not _HAS_MATRIX,
-           'gRPC_portability_master_windows': not _HAS_MATRIX,
-           'gRPC_master_asanitizer_c': not _HAS_MATRIX,
-           'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
-           'gRPC_master_msan_c': not _HAS_MATRIX,
-           'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
-           'gRPC_master_tsan_cpp': not _HAS_MATRIX,
-           'gRPC_interop_pull_requests': not _HAS_MATRIX,
-           'gRPC_performance_pull_requests': not _HAS_MATRIX,
-           'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
-           'gRPC_portability_pr_win': not _HAS_MATRIX,
-           'gRPC_pull_requests_linux': not _HAS_MATRIX,
-           'gRPC_pull_requests_macos': not _HAS_MATRIX,
-           'gRPC_pr_win': not _HAS_MATRIX,
-           'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
-           'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
-           'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
-           'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
-           'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
+_BUILDS = {
+    'gRPC_interop_master': not _HAS_MATRIX,
+    'gRPC_master_linux': not _HAS_MATRIX,
+    'gRPC_master_macos': not _HAS_MATRIX,
+    'gRPC_master_windows': not _HAS_MATRIX,
+    'gRPC_performance_master': not _HAS_MATRIX,
+    'gRPC_portability_master_linux': not _HAS_MATRIX,
+    'gRPC_portability_master_windows': not _HAS_MATRIX,
+    'gRPC_master_asanitizer_c': not _HAS_MATRIX,
+    'gRPC_master_asanitizer_cpp': not _HAS_MATRIX,
+    'gRPC_master_msan_c': not _HAS_MATRIX,
+    'gRPC_master_tsanitizer_c': not _HAS_MATRIX,
+    'gRPC_master_tsan_cpp': not _HAS_MATRIX,
+    'gRPC_interop_pull_requests': not _HAS_MATRIX,
+    'gRPC_performance_pull_requests': not _HAS_MATRIX,
+    'gRPC_portability_pull_requests_linux': not _HAS_MATRIX,
+    'gRPC_portability_pr_win': not _HAS_MATRIX,
+    'gRPC_pull_requests_linux': not _HAS_MATRIX,
+    'gRPC_pull_requests_macos': not _HAS_MATRIX,
+    'gRPC_pr_win': not _HAS_MATRIX,
+    'gRPC_pull_requests_asan_c': not _HAS_MATRIX,
+    'gRPC_pull_requests_asan_cpp': not _HAS_MATRIX,
+    'gRPC_pull_requests_msan_c': not _HAS_MATRIX,
+    'gRPC_pull_requests_tsan_c': not _HAS_MATRIX,
+    'gRPC_pull_requests_tsan_cpp': not _HAS_MATRIX,
 }
 _URL_BASE = 'https://grpc-testing.appspot.com/job'
 
-# This is a dynamic list where known and active issues should be added. 
+# This is a dynamic list where known and active issues should be added.
 # Fixed ones should be removed.
 # Also try not to add multiple messages from the same failure.
 _KNOWN_ERRORS = [
@@ -99,147 +97,156 @@
 
 
 def _scrape_for_known_errors(html):
-  error_list = []
-  for known_error in _KNOWN_ERRORS:
-    errors = re.findall(known_error, html)
-    this_error_count = len(errors)
-    if this_error_count > 0: 
-      error_list.append({'description': known_error,
-                         'count': this_error_count})
-      print('====> %d failures due to %s' % (this_error_count, known_error))
-  return error_list
+    error_list = []
+    for known_error in _KNOWN_ERRORS:
+        errors = re.findall(known_error, html)
+        this_error_count = len(errors)
+        if this_error_count > 0:
+            error_list.append({
+                'description': known_error,
+                'count': this_error_count
+            })
+            print('====> %d failures due to %s' % (this_error_count,
+                                                   known_error))
+    return error_list
 
 
 def _no_report_files_found(html):
-  return _NO_REPORT_FILES_FOUND_ERROR in html
+    return _NO_REPORT_FILES_FOUND_ERROR in html
 
 
 def _get_last_processed_buildnumber(build_name):
-  query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (
-      _PROJECT_ID, _DATASET_ID, build_name)
-  query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
-  page = bq.jobs().getQueryResults(
-      pageToken=None,
-      **query_job['jobReference']).execute(num_retries=3)
-  if page['rows'][0]['f'][0]['v']:
-    return int(page['rows'][0]['f'][0]['v'])
-  return 0
+    query = 'SELECT max(build_number) FROM [%s:%s.%s];' % (_PROJECT_ID,
+                                                           _DATASET_ID,
+                                                           build_name)
+    query_job = big_query_utils.sync_query_job(bq, _PROJECT_ID, query)
+    page = bq.jobs().getQueryResults(
+        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+    if page['rows'][0]['f'][0]['v']:
+        return int(page['rows'][0]['f'][0]['v'])
+    return 0
 
 
 def _process_matrix(build, url_base):
-  matrix_list = []
-  for matrix in build.get_matrix_runs():
-    matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*', 
-                          matrix.name).groups()[0]
-    matrix_tuple = matrix_str.split(',')
-    json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
-        url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
-    console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
-        url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
-    matrix_dict = {'name': matrix_str,
-                   'duration': matrix.get_duration().total_seconds()}
-    matrix_dict.update(_process_build(json_url, console_url))
-    matrix_list.append(matrix_dict)
+    matrix_list = []
+    for matrix in build.get_matrix_runs():
+        matrix_str = re.match('.*\\xc2\\xbb ((?:[^,]+,?)+) #.*',
+                              matrix.name).groups()[0]
+        matrix_tuple = matrix_str.split(',')
+        json_url = '%s/config=%s,language=%s,platform=%s/testReport/api/json' % (
+            url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+        console_url = '%s/config=%s,language=%s,platform=%s/consoleFull' % (
+            url_base, matrix_tuple[0], matrix_tuple[1], matrix_tuple[2])
+        matrix_dict = {
+            'name': matrix_str,
+            'duration': matrix.get_duration().total_seconds()
+        }
+        matrix_dict.update(_process_build(json_url, console_url))
+        matrix_list.append(matrix_dict)
 
-  return matrix_list 
+    return matrix_list
 
 
 def _process_build(json_url, console_url):
-  build_result = {}
-  error_list = []
-  try:
-    html = urllib.urlopen(json_url).read()
-    test_result = json.loads(html)
-    print('====> Parsing result from %s' % json_url)
-    failure_count = test_result['failCount']
-    build_result['pass_count'] = test_result['passCount']
-    build_result['failure_count'] = failure_count
-    # This means Jenkins failure occurred.
-    build_result['no_report_files_found'] = _no_report_files_found(html)
-    # Only check errors if Jenkins failure occurred.
-    if build_result['no_report_files_found']:
-      error_list = _scrape_for_known_errors(html)
-  except Exception as e:
-    print('====> Got exception for %s: %s.' % (json_url, str(e)))   
-    print('====> Parsing errors from %s.' % console_url)
-    html = urllib.urlopen(console_url).read()
-    build_result['pass_count'] = 0  
-    build_result['failure_count'] = 1
-    # In this case, the string doesn't exist in the result html but the fact 
-    # that we fail to parse the result html indicates Jenkins failure and hence 
-    # no report files were generated.
-    build_result['no_report_files_found'] = True
-    error_list = _scrape_for_known_errors(html)
+    build_result = {}
+    error_list = []
+    try:
+        html = urllib.urlopen(json_url).read()
+        test_result = json.loads(html)
+        print('====> Parsing result from %s' % json_url)
+        failure_count = test_result['failCount']
+        build_result['pass_count'] = test_result['passCount']
+        build_result['failure_count'] = failure_count
+        # This means Jenkins failure occurred.
+        build_result['no_report_files_found'] = _no_report_files_found(html)
+        # Only check errors if Jenkins failure occurred.
+        if build_result['no_report_files_found']:
+            error_list = _scrape_for_known_errors(html)
+    except Exception as e:
+        print('====> Got exception for %s: %s.' % (json_url, str(e)))
+        print('====> Parsing errors from %s.' % console_url)
+        html = urllib.urlopen(console_url).read()
+        build_result['pass_count'] = 0
+        build_result['failure_count'] = 1
+        # In this case, the string doesn't exist in the result html but the fact
+        # that we fail to parse the result html indicates Jenkins failure and hence
+        # no report files were generated.
+        build_result['no_report_files_found'] = True
+        error_list = _scrape_for_known_errors(html)
 
-  if error_list:
-    build_result['error'] = error_list
-  elif build_result['no_report_files_found']:
-    build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
-  else:
-    build_result['error'] = [{'description': '', 'count': 0}]
+    if error_list:
+        build_result['error'] = error_list
+    elif build_result['no_report_files_found']:
+        build_result['error'] = [{'description': _UNKNOWN_ERROR, 'count': 1}]
+    else:
+        build_result['error'] = [{'description': '', 'count': 0}]
 
-  return build_result 
+    return build_result
 
 
 # parse command line
 argp = argparse.ArgumentParser(description='Get build statistics.')
 argp.add_argument('-u', '--username', default='jenkins')
-argp.add_argument('-b', '--builds', 
-                  choices=['all'] + sorted(_BUILDS.keys()),
-                  nargs='+',
-                  default=['all'])
+argp.add_argument(
+    '-b',
+    '--builds',
+    choices=['all'] + sorted(_BUILDS.keys()),
+    nargs='+',
+    default=['all'])
 args = argp.parse_args()
 
 J = Jenkins('https://grpc-testing.appspot.com', args.username, 'apiToken')
 bq = big_query_utils.create_big_query()
 
 for build_name in _BUILDS.keys() if 'all' in args.builds else args.builds:
-  print('====> Build: %s' % build_name)
-  # Since get_last_completed_build() always fails due to malformatted string
-  # error, we use get_build_metadata() instead.
-  job = None
-  try:
-    job = J[build_name]
-  except Exception as e:
-    print('====> Failed to get build %s: %s.' % (build_name, str(e)))
-    continue
-  last_processed_build_number = _get_last_processed_buildnumber(build_name)
-  last_complete_build_number = job.get_last_completed_buildnumber()
-  # To avoid processing all builds for a project never looked at. In this case,
-  # only examine 10 latest builds.
-  starting_build_number = max(last_processed_build_number+1, 
-                              last_complete_build_number-9)
-  for build_number in xrange(starting_build_number, 
-                             last_complete_build_number+1):
-    print('====> Processing %s build %d.' % (build_name, build_number))
-    build = None
+    print('====> Build: %s' % build_name)
+    # Since get_last_completed_build() always fails due to malformatted string
+    # error, we use get_build_metadata() instead.
+    job = None
     try:
-      build = job.get_build_metadata(build_number)
-      print('====> Build status: %s.' % build.get_status())
-      if build.get_status() == 'ABORTED':
+        job = J[build_name]
+    except Exception as e:
+        print('====> Failed to get build %s: %s.' % (build_name, str(e)))
         continue
-      # If any build is still running, stop processing this job. Next time, we
-      # start from where it was left so that all builds are processed 
-      # sequentially.
-      if build.is_running():
-        print('====> Build %d is still running.' % build_number)
-        break
-    except KeyError:
-      print('====> Build %s is missing. Skip.' % build_number)
-      continue
-    build_result = {'build_number': build_number, 
-                    'timestamp': str(build.get_timestamp())}
-    url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
-    if _BUILDS[build_name]:  # The build has matrix, such as gRPC_master.
-      build_result['matrix'] = _process_matrix(build, url_base)
-    else:
-      json_url = '%s/testReport/api/json' % url_base
-      console_url = '%s/consoleFull' % url_base
-      build_result['duration'] = build.get_duration().total_seconds()
-      build_stat = _process_build(json_url, console_url)
-      build_result.update(build_stat)
-    rows = [big_query_utils.make_row(build_number, build_result)]
-    if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID, build_name, 
-                                       rows):
-      print('====> Error uploading result to bigquery.')
-      sys.exit(1)
+    last_processed_build_number = _get_last_processed_buildnumber(build_name)
+    last_complete_build_number = job.get_last_completed_buildnumber()
+    # To avoid processing all builds for a project never looked at. In this case,
+    # only examine 10 latest builds.
+    starting_build_number = max(last_processed_build_number + 1,
+                                last_complete_build_number - 9)
+    for build_number in xrange(starting_build_number,
+                               last_complete_build_number + 1):
+        print('====> Processing %s build %d.' % (build_name, build_number))
+        build = None
+        try:
+            build = job.get_build_metadata(build_number)
+            print('====> Build status: %s.' % build.get_status())
+            if build.get_status() == 'ABORTED':
+                continue
+            # If any build is still running, stop processing this job. Next time, we
+            # start from where it was left so that all builds are processed
+            # sequentially.
+            if build.is_running():
+                print('====> Build %d is still running.' % build_number)
+                break
+        except KeyError:
+            print('====> Build %s is missing. Skip.' % build_number)
+            continue
+        build_result = {
+            'build_number': build_number,
+            'timestamp': str(build.get_timestamp())
+        }
+        url_base = json_url = '%s/%s/%d' % (_URL_BASE, build_name, build_number)
+        if _BUILDS[build_name]:  # The build has matrix, such as gRPC_master.
+            build_result['matrix'] = _process_matrix(build, url_base)
+        else:
+            json_url = '%s/testReport/api/json' % url_base
+            console_url = '%s/consoleFull' % url_base
+            build_result['duration'] = build.get_duration().total_seconds()
+            build_stat = _process_build(json_url, console_url)
+            build_result.update(build_stat)
+        rows = [big_query_utils.make_row(build_number, build_result)]
+        if not big_query_utils.insert_rows(bq, _PROJECT_ID, _DATASET_ID,
+                                           build_name, rows):
+            print('====> Error uploading result to bigquery.')
+            sys.exit(1)
diff --git a/tools/run_tests/run_interop_tests.py b/tools/run_tests/run_interop_tests.py
index 8f46ea9..44a6ec2 100755
--- a/tools/run_tests/run_interop_tests.py
+++ b/tools/run_tests/run_interop_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run interop (cross-language) tests in parallel."""
 
 from __future__ import print_function
@@ -37,9 +36,9 @@
 import python_utils.report_utils as report_utils
 # It's ok to not import because this is only necessary to upload results to BQ.
 try:
-  from python_utils.upload_test_results import upload_interop_results_to_bq
+    from python_utils.upload_test_results import upload_interop_results_to_bq
 except ImportError as e:
-  print(e)
+    print(e)
 
 # Docker doesn't clean up after itself, so we do it on exit.
 atexit.register(lambda: subprocess.call(['stty', 'echo']))
@@ -47,22 +46,24 @@
 ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(ROOT)
 
-_DEFAULT_SERVER_PORT=8080
+_DEFAULT_SERVER_PORT = 8080
 
-_SKIP_CLIENT_COMPRESSION = ['client_compressed_unary',
-                            'client_compressed_streaming']
+_SKIP_CLIENT_COMPRESSION = [
+    'client_compressed_unary', 'client_compressed_streaming'
+]
 
-_SKIP_SERVER_COMPRESSION = ['server_compressed_unary',
-                            'server_compressed_streaming']
+_SKIP_SERVER_COMPRESSION = [
+    'server_compressed_unary', 'server_compressed_streaming'
+]
 
 _SKIP_COMPRESSION = _SKIP_CLIENT_COMPRESSION + _SKIP_SERVER_COMPRESSION
 
-_SKIP_ADVANCED = ['status_code_and_message',
-                  'custom_metadata',
-                  'unimplemented_method',
-                  'unimplemented_service']
+_SKIP_ADVANCED = [
+    'status_code_and_message', 'custom_metadata', 'unimplemented_method',
+    'unimplemented_service'
+]
 
-_TEST_TIMEOUT = 3*60
+_TEST_TIMEOUT = 3 * 60
 
 # disable this test on core-based languages,
 # see https://github.com/grpc/grpc/issues/9779
@@ -77,977 +78,1059 @@
 
 class CXXLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.http2_cwd = None
-    self.safename = 'cxx'
+    def __init__(self):
+        self.client_cwd = None
+        self.server_cwd = None
+        self.http2_cwd = None
+        self.safename = 'cxx'
 
-  def client_cmd(self, args):
-    return ['bins/opt/interop_client'] + args
+    def client_cmd(self, args):
+        return ['bins/opt/interop_client'] + args
 
-  def client_cmd_http2interop(self, args):
-    return ['bins/opt/http2_client'] + args
+    def client_cmd_http2interop(self, args):
+        return ['bins/opt/http2_client'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['bins/opt/interop_server'] + args
+    def server_cmd(self, args):
+        return ['bins/opt/interop_server'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return []
+    def unimplemented_test_cases_server(self):
+        return []
 
-  def __str__(self):
-    return 'c++'
+    def __str__(self):
+        return 'c++'
 
 
 class CSharpLanguage:
 
-  def __init__(self):
-    self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
-    self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/net45'
+        self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/net45'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
+    def client_cmd(self, args):
+        return ['mono', 'Grpc.IntegrationTesting.Client.exe'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
+    def server_cmd(self, args):
+        return ['mono', 'Grpc.IntegrationTesting.Server.exe'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'csharp'
+    def __str__(self):
+        return 'csharp'
 
 
 class CSharpCoreCLRLanguage:
 
-  def __init__(self):
-    self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
-    self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = 'src/csharp/Grpc.IntegrationTesting.Client/bin/Debug/netcoreapp1.0'
+        self.server_cwd = 'src/csharp/Grpc.IntegrationTesting.Server/bin/Debug/netcoreapp1.0'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
+    def client_cmd(self, args):
+        return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Client.dll'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
+    def server_cmd(self, args):
+        return ['dotnet', 'exec', 'Grpc.IntegrationTesting.Server.dll'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'csharpcoreclr'
+    def __str__(self):
+        return 'csharpcoreclr'
 
 
 class JavaLanguage:
 
-  def __init__(self):
-    self.client_cwd = '../grpc-java'
-    self.server_cwd = '../grpc-java'
-    self.http2_cwd = '../grpc-java'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = '../grpc-java'
+        self.server_cwd = '../grpc-java'
+        self.http2_cwd = '../grpc-java'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['./run-test-client.sh'] + args
+    def client_cmd(self, args):
+        return ['./run-test-client.sh'] + args
 
-  def client_cmd_http2interop(self, args):
-    return ['./interop-testing/build/install/grpc-interop-testing/bin/http2-client'] + args
+    def client_cmd_http2interop(self, args):
+        return [
+            './interop-testing/build/install/grpc-interop-testing/bin/http2-client'
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['./run-test-server.sh'] + args
+    def server_cmd(self, args):
+        return ['./run-test-server.sh'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return []
+    def unimplemented_test_cases(self):
+        return []
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'java'
+    def __str__(self):
+        return 'java'
 
 
 class JavaOkHttpClient:
 
-  def __init__(self):
-    self.client_cwd = '../grpc-java'
-    self.safename = 'java'
+    def __init__(self):
+        self.client_cwd = '../grpc-java'
+        self.safename = 'java'
 
-  def client_cmd(self, args):
-    return ['./run-test-client.sh', '--use_okhttp=true'] + args
+    def client_cmd(self, args):
+        return ['./run-test-client.sh', '--use_okhttp=true'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_DATA_FRAME_PADDING
 
-  def __str__(self):
-    return 'javaokhttp'
+    def __str__(self):
+        return 'javaokhttp'
 
 
 class GoLanguage:
 
-  def __init__(self):
-    # TODO: this relies on running inside docker
-    self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
-    self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
-    self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
-    self.safename = str(self)
+    def __init__(self):
+        # TODO: this relies on running inside docker
+        self.client_cwd = '/go/src/google.golang.org/grpc/interop/client'
+        self.server_cwd = '/go/src/google.golang.org/grpc/interop/server'
+        self.http2_cwd = '/go/src/google.golang.org/grpc/interop/http2'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['go', 'run', 'client.go'] + args
+    def client_cmd(self, args):
+        return ['go', 'run', 'client.go'] + args
 
-  def client_cmd_http2interop(self, args):
-    return ['go', 'run', 'negative_http2_client.go'] + args
+    def client_cmd_http2interop(self, args):
+        return ['go', 'run', 'negative_http2_client.go'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['go', 'run', 'server.go'] + args
+    def server_cmd(self, args):
+        return ['go', 'run', 'server.go'] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'go'
+    def __str__(self):
+        return 'go'
+
 
 class Http2Server:
-  """Represents the HTTP/2 Interop Test server
+    """Represents the HTTP/2 Interop Test server
 
   This pretends to be a language in order to be built and run, but really it
   isn't.
   """
-  def __init__(self):
-    self.server_cwd = None
-    self.safename = str(self)
 
-  def server_cmd(self, args):
-    return ['python test/http2_test/http2_test_server.py']
+    def __init__(self):
+        self.server_cwd = None
+        self.safename = str(self)
 
-  def cloud_to_prod_env(self):
-    return {}
+    def server_cmd(self, args):
+        return ['python test/http2_test/http2_test_server.py']
 
-  def global_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases_server(self):
-    return _TEST_CASES
+    def unimplemented_test_cases(self):
+        return _TEST_CASES + _SKIP_DATA_FRAME_PADDING
 
-  def __str__(self):
-    return 'http2'
+    def unimplemented_test_cases_server(self):
+        return _TEST_CASES
+
+    def __str__(self):
+        return 'http2'
+
 
 class Http2Client:
-  """Represents the HTTP/2 Interop Test
+    """Represents the HTTP/2 Interop Test
 
   This pretends to be a language in order to be built and run, but really it
   isn't.
   """
-  def __init__(self):
-    self.client_cwd = None
-    self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
+    def __init__(self):
+        self.client_cwd = None
+        self.safename = str(self)
 
-  def cloud_to_prod_env(self):
-    return {}
+    def client_cmd(self, args):
+        return ['tools/http2_interop/http2_interop.test', '-test.v'] + args
 
-  def global_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _TEST_CASES
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases_server(self):
-    return _TEST_CASES
+    def unimplemented_test_cases(self):
+        return _TEST_CASES
 
-  def __str__(self):
-    return 'http2'
+    def unimplemented_test_cases_server(self):
+        return _TEST_CASES
+
+    def __str__(self):
+        return 'http2'
+
 
 class NodeLanguage:
 
-  def __init__(self):
-    self.client_cwd = '../grpc-node'
-    self.server_cwd = '../grpc-node'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = '../grpc-node'
+        self.server_cwd = '../grpc-node'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+    def client_cmd(self, args):
+        return [
+            'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
             'node', '--require', './test/fixtures/native_native',
-            'test/interop/interop_client.js'] + args
+            'test/interop/interop_client.js'
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
+    def server_cmd(self, args):
+        return [
+            'packages/grpc-native-core/deps/grpc/tools/run_tests/interop/with_nvm.sh',
             'node', '--require', './test/fixtures/native_native',
-            'test/interop/interop_server.js'] + args
+            'test/interop/interop_server.js'
+        ] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'node'
+    def __str__(self):
+        return 'node'
 
 
 class PHPLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['src/php/bin/interop_client.sh'] + args
+    def client_cmd(self, args):
+        return ['src/php/bin/interop_client.sh'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return []
+    def unimplemented_test_cases_server(self):
+        return []
 
-  def __str__(self):
-    return 'php'
+    def __str__(self):
+        return 'php'
 
 
 class PHP7Language:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['src/php/bin/interop_client.sh'] + args
+    def client_cmd(self, args):
+        return ['src/php/bin/interop_client.sh'] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return []
+    def unimplemented_test_cases_server(self):
+        return []
 
-  def __str__(self):
-    return 'php7'
+    def __str__(self):
+        return 'php7'
+
 
 class ObjcLanguage:
 
-  def __init__(self):
-    self.client_cwd = 'src/objective-c/tests'
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = 'src/objective-c/tests'
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    # from args, extract the server port and craft xcodebuild command out of it
-    for arg in args:
-      port = re.search('--server_port=(\d+)', arg)
-      if port:
-        portnum = port.group(1)
-        cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test'%portnum
-        return [cmdline]
+    def client_cmd(self, args):
+        # from args, extract the server port and craft xcodebuild command out of it
+        for arg in args:
+            port = re.search('--server_port=(\d+)', arg)
+            if port:
+                portnum = port.group(1)
+                cmdline = 'pod install && xcodebuild -workspace Tests.xcworkspace -scheme InteropTestsLocalSSL -destination name="iPhone 6" HOST_PORT_LOCALSSL=localhost:%s test' % portnum
+                return [cmdline]
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    # ObjC test runs all cases with the same command. It ignores the testcase
-    # cmdline argument. Here we return all but one test cases as unimplemented,
-    # and depend upon ObjC test's behavior that it runs all cases even when
-    # we tell it to run just one.
-    return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        # ObjC test runs all cases with the same command. It ignores the testcase
+        # cmdline argument. Here we return all but one test cases as unimplemented,
+        # and depend upon ObjC test's behavior that it runs all cases even when
+        # we tell it to run just one.
+        return _TEST_CASES[1:] + _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'objc'
+    def __str__(self):
+        return 'objc'
+
 
 class RubyLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.server_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return ['tools/run_tests/interop/with_rvm.sh',
-            'ruby', 'src/ruby/pb/test/client.rb'] + args
+    def client_cmd(self, args):
+        return [
+            'tools/run_tests/interop/with_rvm.sh', 'ruby',
+            'src/ruby/pb/test/client.rb'
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return ['tools/run_tests/interop/with_rvm.sh',
-            'ruby', 'src/ruby/pb/test/server.rb'] + args
+    def server_cmd(self, args):
+        return [
+            'tools/run_tests/interop/with_rvm.sh', 'ruby',
+            'src/ruby/pb/test/server.rb'
+        ] + args
 
-  def global_env(self):
-    return {}
+    def global_env(self):
+        return {}
 
-  def unimplemented_test_cases(self):
-    return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_SERVER_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'ruby'
+    def __str__(self):
+        return 'ruby'
+
 
 class PythonLanguage:
 
-  def __init__(self):
-    self.client_cwd = None
-    self.server_cwd = None
-    self.http2_cwd = None
-    self.safename = str(self)
+    def __init__(self):
+        self.client_cwd = None
+        self.server_cwd = None
+        self.http2_cwd = None
+        self.safename = str(self)
 
-  def client_cmd(self, args):
-    return [
-        'py27/bin/python',
-        'src/python/grpcio_tests/setup.py',
-        'run_interop',
-        '--client',
-        '--args="{}"'.format(' '.join(args))
-    ]
+    def client_cmd(self, args):
+        return [
+            'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+            'run_interop', '--client', '--args="{}"'.format(' '.join(args))
+        ]
 
-  def client_cmd_http2interop(self, args):
-    return [ 'py27/bin/python',
-              'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
-           ] + args
+    def client_cmd_http2interop(self, args):
+        return [
+            'py27/bin/python',
+            'src/python/grpcio_tests/tests/http2/negative_http2_client.py',
+        ] + args
 
-  def cloud_to_prod_env(self):
-    return {}
+    def cloud_to_prod_env(self):
+        return {}
 
-  def server_cmd(self, args):
-    return [
-        'py27/bin/python',
-        'src/python/grpcio_tests/setup.py',
-        'run_interop',
-        '--server',
-        '--args="{}"'.format(' '.join(args))
-    ]
+    def server_cmd(self, args):
+        return [
+            'py27/bin/python', 'src/python/grpcio_tests/setup.py',
+            'run_interop', '--server', '--args="{}"'.format(' '.join(args))
+        ]
 
-  def global_env(self):
-    return {'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
-            'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)}
+    def global_env(self):
+        return {
+            'LD_LIBRARY_PATH': '{}/libs/opt'.format(DOCKER_WORKDIR_ROOT),
+            'PYTHONPATH': '{}/src/python/gens'.format(DOCKER_WORKDIR_ROOT)
+        }
 
-  def unimplemented_test_cases(self):
-    return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
+    def unimplemented_test_cases(self):
+        return _SKIP_COMPRESSION + _SKIP_DATA_FRAME_PADDING
 
-  def unimplemented_test_cases_server(self):
-    return _SKIP_COMPRESSION
+    def unimplemented_test_cases_server(self):
+        return _SKIP_COMPRESSION
 
-  def __str__(self):
-    return 'python'
+    def __str__(self):
+        return 'python'
 
 
 _LANGUAGES = {
-    'c++' : CXXLanguage(),
-    'csharp' : CSharpLanguage(),
-    'csharpcoreclr' : CSharpCoreCLRLanguage(),
-    'go' : GoLanguage(),
-    'java' : JavaLanguage(),
-    'javaokhttp' : JavaOkHttpClient(),
-    'node' : NodeLanguage(),
-    'php' :  PHPLanguage(),
-    'php7' :  PHP7Language(),
-    'objc' : ObjcLanguage(),
-    'ruby' : RubyLanguage(),
-    'python' : PythonLanguage(),
+    'c++': CXXLanguage(),
+    'csharp': CSharpLanguage(),
+    'csharpcoreclr': CSharpCoreCLRLanguage(),
+    'go': GoLanguage(),
+    'java': JavaLanguage(),
+    'javaokhttp': JavaOkHttpClient(),
+    'node': NodeLanguage(),
+    'php': PHPLanguage(),
+    'php7': PHP7Language(),
+    'objc': ObjcLanguage(),
+    'ruby': RubyLanguage(),
+    'python': PythonLanguage(),
 }
 
 # languages supported as cloud_to_cloud servers
-_SERVERS = ['c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python']
+_SERVERS = [
+    'c++', 'node', 'csharp', 'csharpcoreclr', 'java', 'go', 'ruby', 'python'
+]
 
-_TEST_CASES = ['large_unary', 'empty_unary', 'ping_pong',
-               'empty_stream', 'client_streaming', 'server_streaming',
-               'cancel_after_begin', 'cancel_after_first_response',
-               'timeout_on_sleeping_server', 'custom_metadata',
-               'status_code_and_message', 'unimplemented_method',
-               'client_compressed_unary', 'server_compressed_unary',
-               'client_compressed_streaming', 'server_compressed_streaming',
-               'unimplemented_service']
+_TEST_CASES = [
+    'large_unary', 'empty_unary', 'ping_pong', 'empty_stream',
+    'client_streaming', 'server_streaming', 'cancel_after_begin',
+    'cancel_after_first_response', 'timeout_on_sleeping_server',
+    'custom_metadata', 'status_code_and_message', 'unimplemented_method',
+    'client_compressed_unary', 'server_compressed_unary',
+    'client_compressed_streaming', 'server_compressed_streaming',
+    'unimplemented_service'
+]
 
-_AUTH_TEST_CASES = ['compute_engine_creds', 'jwt_token_creds',
-                    'oauth2_auth_token', 'per_rpc_creds']
+_AUTH_TEST_CASES = [
+    'compute_engine_creds', 'jwt_token_creds', 'oauth2_auth_token',
+    'per_rpc_creds'
+]
 
 _HTTP2_TEST_CASES = ['tls', 'framing']
 
-_HTTP2_SERVER_TEST_CASES = ['rst_after_header', 'rst_after_data', 'rst_during_data',
-                               'goaway', 'ping', 'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test']
+_HTTP2_SERVER_TEST_CASES = [
+    'rst_after_header', 'rst_after_data', 'rst_during_data', 'goaway', 'ping',
+    'max_streams', 'data_frame_padding', 'no_df_padding_sanity_test'
+]
 
-_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = { 'data_frame_padding': 'large_unary', 'no_df_padding_sanity_test': 'large_unary' }
+_GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES = {
+    'data_frame_padding': 'large_unary',
+    'no_df_padding_sanity_test': 'large_unary'
+}
 
-_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys()
+_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES.keys(
+)
 
-_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = ['java', 'go', 'python', 'c++']
+_LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES = [
+    'java', 'go', 'python', 'c++'
+]
 
 DOCKER_WORKDIR_ROOT = '/var/local/git/grpc'
 
+
 def docker_run_cmdline(cmdline, image, docker_args=[], cwd=None, environ=None):
-  """Wraps given cmdline array to create 'docker run' cmdline from it."""
-  docker_cmdline = ['docker', 'run', '-i', '--rm=true']
+    """Wraps given cmdline array to create 'docker run' cmdline from it."""
+    docker_cmdline = ['docker', 'run', '-i', '--rm=true']
 
-  # turn environ into -e docker args
-  if environ:
-    for k,v in environ.items():
-      docker_cmdline += ['-e', '%s=%s' % (k,v)]
+    # turn environ into -e docker args
+    if environ:
+        for k, v in environ.items():
+            docker_cmdline += ['-e', '%s=%s' % (k, v)]
 
-  # set working directory
-  workdir = DOCKER_WORKDIR_ROOT
-  if cwd:
-    workdir = os.path.join(workdir, cwd)
-  docker_cmdline += ['-w', workdir]
+    # set working directory
+    workdir = DOCKER_WORKDIR_ROOT
+    if cwd:
+        workdir = os.path.join(workdir, cwd)
+    docker_cmdline += ['-w', workdir]
 
-  docker_cmdline += docker_args + [image] + cmdline
-  return docker_cmdline
+    docker_cmdline += docker_args + [image] + cmdline
+    return docker_cmdline
 
 
 def manual_cmdline(docker_cmdline, docker_image):
-  """Returns docker cmdline adjusted for manual invocation."""
-  print_cmdline = []
-  for item in docker_cmdline:
-    if item.startswith('--name='):
-      continue
-    if item == docker_image:
-      item = "$docker_image"
-    item = item.replace('"', '\\"')
-    # add quotes when necessary
-    if any(character.isspace() for character in item):
-      item = "\"%s\"" % item
-    print_cmdline.append(item)
-  return ' '.join(print_cmdline)
+    """Returns docker cmdline adjusted for manual invocation."""
+    print_cmdline = []
+    for item in docker_cmdline:
+        if item.startswith('--name='):
+            continue
+        if item == docker_image:
+            item = "$docker_image"
+        item = item.replace('"', '\\"')
+        # add quotes when necessary
+        if any(character.isspace() for character in item):
+            item = "\"%s\"" % item
+        print_cmdline.append(item)
+    return ' '.join(print_cmdline)
 
 
 def write_cmdlog_maybe(cmdlog, filename):
-  """Returns docker cmdline adjusted for manual invocation."""
-  if cmdlog:
-    with open(filename, 'w') as logfile:
-      logfile.write('#!/bin/bash\n')
-      logfile.writelines("%s\n" % line for line in cmdlog)
-    print('Command log written to file %s' % filename)
+    """Returns docker cmdline adjusted for manual invocation."""
+    if cmdlog:
+        with open(filename, 'w') as logfile:
+            logfile.write('#!/bin/bash\n')
+            logfile.writelines("%s\n" % line for line in cmdlog)
+        print('Command log written to file %s' % filename)
 
 
 def bash_cmdline(cmdline):
-  """Creates bash -c cmdline from args list."""
-  # Use login shell:
-  # * makes error messages clearer if executables are missing
-  return ['bash', '-c', ' '.join(cmdline)]
+    """Creates bash -c cmdline from args list."""
+    # Use login shell:
+    # * makes error messages clearer if executables are missing
+    return ['bash', '-c', ' '.join(cmdline)]
 
 
 def auth_options(language, test_case):
-  """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
+    """Returns (cmdline, env) tuple with cloud_to_prod_auth test options."""
 
-  language = str(language)
-  cmdargs = []
-  env = {}
+    language = str(language)
+    cmdargs = []
+    env = {}
 
-  # TODO(jtattermusch): this file path only works inside docker
-  key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
-  oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
-  key_file_arg = '--service_account_key_file=%s' % key_filepath
-  default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
+    # TODO(jtattermusch): this file path only works inside docker
+    key_filepath = '/root/service_account/GrpcTesting-726eb1347f15.json'
+    oauth_scope_arg = '--oauth_scope=https://www.googleapis.com/auth/xapi.zoo'
+    key_file_arg = '--service_account_key_file=%s' % key_filepath
+    default_account_arg = '--default_service_account=830293263384-compute@developer.gserviceaccount.com'
 
-  if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
-    if language in ['csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python', 'ruby']:
-      env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
-    else:
-      cmdargs += [key_file_arg]
+    if test_case in ['jwt_token_creds', 'per_rpc_creds', 'oauth2_auth_token']:
+        if language in [
+                'csharp', 'csharpcoreclr', 'node', 'php', 'php7', 'python',
+                'ruby'
+        ]:
+            env['GOOGLE_APPLICATION_CREDENTIALS'] = key_filepath
+        else:
+            cmdargs += [key_file_arg]
 
-  if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
-    cmdargs += [oauth_scope_arg]
+    if test_case in ['per_rpc_creds', 'oauth2_auth_token']:
+        cmdargs += [oauth_scope_arg]
 
-  if test_case == 'oauth2_auth_token' and language == 'c++':
-    # C++ oauth2 test uses GCE creds and thus needs to know the default account
-    cmdargs += [default_account_arg]
+    if test_case == 'oauth2_auth_token' and language == 'c++':
+        # C++ oauth2 test uses GCE creds and thus needs to know the default account
+        cmdargs += [default_account_arg]
 
-  if test_case == 'compute_engine_creds':
-    cmdargs += [oauth_scope_arg, default_account_arg]
+    if test_case == 'compute_engine_creds':
+        cmdargs += [oauth_scope_arg, default_account_arg]
 
-  return (cmdargs, env)
+    return (cmdargs, env)
 
 
 def _job_kill_handler(job):
-  if job._spec.container_name:
-    dockerjob.docker_kill(job._spec.container_name)
-    # When the job times out and we decide to kill it,
-    # we need to wait a before restarting the job
-    # to prevent "container name already in use" error.
-    # TODO(jtattermusch): figure out a cleaner way to to this.
-    time.sleep(2)
+    if job._spec.container_name:
+        dockerjob.docker_kill(job._spec.container_name)
+        # When the job times out and we decide to kill it,
+        # we need to wait a before restarting the job
+        # to prevent "container name already in use" error.
+        # TODO(jtattermusch): figure out a cleaner way to to this.
+        time.sleep(2)
 
 
-def cloud_to_prod_jobspec(language, test_case, server_host_name,
-                          server_host_detail, docker_image=None, auth=False,
+def cloud_to_prod_jobspec(language,
+                          test_case,
+                          server_host_name,
+                          server_host_detail,
+                          docker_image=None,
+                          auth=False,
                           manual_cmd_log=None):
-  """Creates jobspec for cloud-to-prod interop test"""
-  container_name = None
-  cmdargs = [
-      '--server_host=%s' % server_host_detail[0],
-      '--server_host_override=%s' % server_host_detail[1],
-      '--server_port=443',
-      '--use_tls=true',
-      '--test_case=%s' % test_case]
-  environ = dict(language.cloud_to_prod_env(), **language.global_env())
-  if auth:
-    auth_cmdargs, auth_env = auth_options(language, test_case)
-    cmdargs += auth_cmdargs
-    environ.update(auth_env)
-  cmdline = bash_cmdline(language.client_cmd(cmdargs))
-  cwd = language.client_cwd
-
-  if docker_image:
-    container_name = dockerjob.random_name('interop_client_%s' %
-                                           language.safename)
-    cmdline = docker_run_cmdline(cmdline,
-                                 image=docker_image,
-                                 cwd=cwd,
-                                 environ=environ,
-                                 docker_args=['--net=host',
-                                              '--name=%s' % container_name])
-    if manual_cmd_log is not None:
-      if manual_cmd_log == []:
-        manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
-      manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
-    cwd = None
-    environ = None
-
-  suite_name='cloud_to_prod_auth' if auth else 'cloud_to_prod'
-  test_job = jobset.JobSpec(
-          cmdline=cmdline,
-          cwd=cwd,
-          environ=environ,
-          shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
-                                     test_case),
-          timeout_seconds=_TEST_TIMEOUT,
-          flake_retries=4 if args.allow_flakes else 0,
-          timeout_retries=2 if args.allow_flakes else 0,
-          kill_handler=_job_kill_handler)
-  if docker_image:
-    test_job.container_name = container_name
-  return test_job
-
-
-def cloud_to_cloud_jobspec(language, test_case, server_name, server_host,
-                           server_port, docker_image=None, insecure=False,
-                           manual_cmd_log=None):
-  """Creates jobspec for cloud-to-cloud interop test"""
-  interop_only_options = [
-      '--server_host_override=foo.test.google.fr',
-      '--use_tls=%s' % ('false' if insecure else 'true'),
-      '--use_test_ca=true',
-  ]
-
-  client_test_case = test_case
-  if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
-    client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[test_case]
-  if client_test_case in language.unimplemented_test_cases():
-    print('asking client %s to run unimplemented test case %s' % (repr(language), client_test_case))
-    sys.exit(1)
-
-  common_options = [
-      '--test_case=%s' % client_test_case,
-      '--server_host=%s' % server_host,
-      '--server_port=%s' % server_port,
-  ]
-
-  if test_case in _HTTP2_SERVER_TEST_CASES:
-    if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
-      client_options = interop_only_options + common_options
-      cmdline = bash_cmdline(language.client_cmd(client_options))
-      cwd = language.client_cwd
-    else:
-      cmdline = bash_cmdline(language.client_cmd_http2interop(common_options))
-      cwd = language.http2_cwd
-  else:
-    cmdline = bash_cmdline(language.client_cmd(common_options+interop_only_options))
+    """Creates jobspec for cloud-to-prod interop test"""
+    container_name = None
+    cmdargs = [
+        '--server_host=%s' % server_host_detail[0],
+        '--server_host_override=%s' % server_host_detail[1],
+        '--server_port=443', '--use_tls=true',
+        '--test_case=%s' % test_case
+    ]
+    environ = dict(language.cloud_to_prod_env(), **language.global_env())
+    if auth:
+        auth_cmdargs, auth_env = auth_options(language, test_case)
+        cmdargs += auth_cmdargs
+        environ.update(auth_env)
+    cmdline = bash_cmdline(language.client_cmd(cmdargs))
     cwd = language.client_cwd
 
-  environ = language.global_env()
-  if docker_image and language.safename != 'objc':
-    # we can't run client in docker for objc.
-    container_name = dockerjob.random_name('interop_client_%s' % language.safename)
-    cmdline = docker_run_cmdline(cmdline,
-                                 image=docker_image,
-                                 environ=environ,
-                                 cwd=cwd,
-                                 docker_args=['--net=host',
-                                              '--name=%s' % container_name])
-    if manual_cmd_log is not None:
-      if manual_cmd_log == []:
-        manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
-      manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
-    cwd = None
+    if docker_image:
+        container_name = dockerjob.random_name(
+            'interop_client_%s' % language.safename)
+        cmdline = docker_run_cmdline(
+            cmdline,
+            image=docker_image,
+            cwd=cwd,
+            environ=environ,
+            docker_args=['--net=host',
+                         '--name=%s' % container_name])
+        if manual_cmd_log is not None:
+            if manual_cmd_log == []:
+                manual_cmd_log.append(
+                    'echo "Testing ${docker_image:=%s}"' % docker_image)
+            manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+        cwd = None
+        environ = None
 
-  test_job = jobset.JobSpec(
-          cmdline=cmdline,
-          cwd=cwd,
-          environ=environ,
-          shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
-                                                        test_case),
-          timeout_seconds=_TEST_TIMEOUT,
-          flake_retries=4 if args.allow_flakes else 0,
-          timeout_retries=2 if args.allow_flakes else 0,
-          kill_handler=_job_kill_handler)
-  if docker_image:
-    test_job.container_name = container_name
-  return test_job
+    suite_name = 'cloud_to_prod_auth' if auth else 'cloud_to_prod'
+    test_job = jobset.JobSpec(
+        cmdline=cmdline,
+        cwd=cwd,
+        environ=environ,
+        shortname='%s:%s:%s:%s' % (suite_name, language, server_host_name,
+                                   test_case),
+        timeout_seconds=_TEST_TIMEOUT,
+        flake_retries=4 if args.allow_flakes else 0,
+        timeout_retries=2 if args.allow_flakes else 0,
+        kill_handler=_job_kill_handler)
+    if docker_image:
+        test_job.container_name = container_name
+    return test_job
+
+
+def cloud_to_cloud_jobspec(language,
+                           test_case,
+                           server_name,
+                           server_host,
+                           server_port,
+                           docker_image=None,
+                           insecure=False,
+                           manual_cmd_log=None):
+    """Creates jobspec for cloud-to-cloud interop test"""
+    interop_only_options = [
+        '--server_host_override=foo.test.google.fr',
+        '--use_tls=%s' % ('false' if insecure else 'true'),
+        '--use_test_ca=true',
+    ]
+
+    client_test_case = test_case
+    if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+        client_test_case = _GRPC_CLIENT_TEST_CASES_FOR_HTTP2_SERVER_TEST_CASES[
+            test_case]
+    if client_test_case in language.unimplemented_test_cases():
+        print('asking client %s to run unimplemented test case %s' %
+              (repr(language), client_test_case))
+        sys.exit(1)
+
+    common_options = [
+        '--test_case=%s' % client_test_case,
+        '--server_host=%s' % server_host,
+        '--server_port=%s' % server_port,
+    ]
+
+    if test_case in _HTTP2_SERVER_TEST_CASES:
+        if test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+            client_options = interop_only_options + common_options
+            cmdline = bash_cmdline(language.client_cmd(client_options))
+            cwd = language.client_cwd
+        else:
+            cmdline = bash_cmdline(
+                language.client_cmd_http2interop(common_options))
+            cwd = language.http2_cwd
+    else:
+        cmdline = bash_cmdline(
+            language.client_cmd(common_options + interop_only_options))
+        cwd = language.client_cwd
+
+    environ = language.global_env()
+    if docker_image and language.safename != 'objc':
+        # we can't run client in docker for objc.
+        container_name = dockerjob.random_name(
+            'interop_client_%s' % language.safename)
+        cmdline = docker_run_cmdline(
+            cmdline,
+            image=docker_image,
+            environ=environ,
+            cwd=cwd,
+            docker_args=['--net=host',
+                         '--name=%s' % container_name])
+        if manual_cmd_log is not None:
+            if manual_cmd_log == []:
+                manual_cmd_log.append(
+                    'echo "Testing ${docker_image:=%s}"' % docker_image)
+            manual_cmd_log.append(manual_cmdline(cmdline, docker_image))
+        cwd = None
+
+    test_job = jobset.JobSpec(
+        cmdline=cmdline,
+        cwd=cwd,
+        environ=environ,
+        shortname='cloud_to_cloud:%s:%s_server:%s' % (language, server_name,
+                                                      test_case),
+        timeout_seconds=_TEST_TIMEOUT,
+        flake_retries=4 if args.allow_flakes else 0,
+        timeout_retries=2 if args.allow_flakes else 0,
+        kill_handler=_job_kill_handler)
+    if docker_image:
+        test_job.container_name = container_name
+    return test_job
 
 
 def server_jobspec(language, docker_image, insecure=False, manual_cmd_log=None):
-  """Create jobspec for running a server"""
-  container_name = dockerjob.random_name('interop_server_%s' % language.safename)
-  cmdline = bash_cmdline(
-      language.server_cmd(['--port=%s' % _DEFAULT_SERVER_PORT,
-                           '--use_tls=%s' % ('false' if insecure else 'true')]))
-  environ = language.global_env()
-  docker_args = ['--name=%s' % container_name]
-  if language.safename == 'http2':
-    # we are running the http2 interop server. Open next N ports beginning
-    # with the server port. These ports are used for http2 interop test
-    # (one test case per port).
-    docker_args += list(
-        itertools.chain.from_iterable(('-p', str(_DEFAULT_SERVER_PORT + i))
-                                      for i in range(
-                                          len(_HTTP2_SERVER_TEST_CASES))))
-    # Enable docker's healthcheck mechanism.
-    # This runs a Python script inside the container every second. The script
-    # pings the http2 server to verify it is ready. The 'health-retries' flag
-    # specifies the number of consecutive failures before docker will report
-    # the container's status as 'unhealthy'. Prior to the first 'health_retries'
-    # failures or the first success, the status will be 'starting'. 'docker ps'
-    # or 'docker inspect' can be used to see the health of the container on the
-    # command line.
-    docker_args += [
-        '--health-cmd=python test/http2_test/http2_server_health_check.py '
-        '--server_host=%s --server_port=%d'
-        % ('localhost', _DEFAULT_SERVER_PORT),
-        '--health-interval=1s',
-        '--health-retries=5',
-        '--health-timeout=10s',
-    ]
+    """Create jobspec for running a server"""
+    container_name = dockerjob.random_name(
+        'interop_server_%s' % language.safename)
+    cmdline = bash_cmdline(
+        language.server_cmd([
+            '--port=%s' % _DEFAULT_SERVER_PORT,
+            '--use_tls=%s' % ('false' if insecure else 'true')
+        ]))
+    environ = language.global_env()
+    docker_args = ['--name=%s' % container_name]
+    if language.safename == 'http2':
+        # we are running the http2 interop server. Open next N ports beginning
+        # with the server port. These ports are used for http2 interop test
+        # (one test case per port).
+        docker_args += list(
+            itertools.chain.from_iterable(
+                ('-p', str(_DEFAULT_SERVER_PORT + i))
+                for i in range(len(_HTTP2_SERVER_TEST_CASES))))
+        # Enable docker's healthcheck mechanism.
+        # This runs a Python script inside the container every second. The script
+        # pings the http2 server to verify it is ready. The 'health-retries' flag
+        # specifies the number of consecutive failures before docker will report
+        # the container's status as 'unhealthy'. Prior to the first 'health_retries'
+        # failures or the first success, the status will be 'starting'. 'docker ps'
+        # or 'docker inspect' can be used to see the health of the container on the
+        # command line.
+        docker_args += [
+            '--health-cmd=python test/http2_test/http2_server_health_check.py '
+            '--server_host=%s --server_port=%d' % ('localhost',
+                                                   _DEFAULT_SERVER_PORT),
+            '--health-interval=1s',
+            '--health-retries=5',
+            '--health-timeout=10s',
+        ]
 
-  else:
-    docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
+    else:
+        docker_args += ['-p', str(_DEFAULT_SERVER_PORT)]
 
-  docker_cmdline = docker_run_cmdline(cmdline,
-                                      image=docker_image,
-                                      cwd=language.server_cwd,
-                                      environ=environ,
-                                      docker_args=docker_args)
-  if manual_cmd_log is not None:
-      if manual_cmd_log == []:
-        manual_cmd_log.append('echo "Testing ${docker_image:=%s}"' % docker_image)
-      manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
-  server_job = jobset.JobSpec(
-          cmdline=docker_cmdline,
-          environ=environ,
-          shortname='interop_server_%s' % language,
-          timeout_seconds=30*60)
-  server_job.container_name = container_name
-  return server_job
+    docker_cmdline = docker_run_cmdline(
+        cmdline,
+        image=docker_image,
+        cwd=language.server_cwd,
+        environ=environ,
+        docker_args=docker_args)
+    if manual_cmd_log is not None:
+        if manual_cmd_log == []:
+            manual_cmd_log.append(
+                'echo "Testing ${docker_image:=%s}"' % docker_image)
+        manual_cmd_log.append(manual_cmdline(docker_cmdline, docker_image))
+    server_job = jobset.JobSpec(
+        cmdline=docker_cmdline,
+        environ=environ,
+        shortname='interop_server_%s' % language,
+        timeout_seconds=30 * 60)
+    server_job.container_name = container_name
+    return server_job
 
 
 def build_interop_image_jobspec(language, tag=None):
-  """Creates jobspec for building interop docker image for a language"""
-  if not tag:
-    tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
-  env = {'INTEROP_IMAGE': tag,
-         'BASE_NAME': 'grpc_interop_%s' % language.safename}
-  if not args.travis:
-    env['TTY_FLAG'] = '-t'
-  # This env variable is used to get around the github rate limit
-  # error when running the PHP `composer install` command
-  host_file = '%s/.composer/auth.json' % os.environ['HOME']
-  if language.safename == 'php' and os.path.exists(host_file):
-    env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
-      '-v %s:/root/.composer/auth.json:ro' % host_file
-  build_job = jobset.JobSpec(
-          cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
-          environ=env,
-          shortname='build_docker_%s' % (language),
-          timeout_seconds=30*60)
-  build_job.tag = tag
-  return build_job
+    """Creates jobspec for building interop docker image for a language"""
+    if not tag:
+        tag = 'grpc_interop_%s:%s' % (language.safename, uuid.uuid4())
+    env = {
+        'INTEROP_IMAGE': tag,
+        'BASE_NAME': 'grpc_interop_%s' % language.safename
+    }
+    if not args.travis:
+        env['TTY_FLAG'] = '-t'
+    # This env variable is used to get around the github rate limit
+    # error when running the PHP `composer install` command
+    host_file = '%s/.composer/auth.json' % os.environ['HOME']
+    if language.safename == 'php' and os.path.exists(host_file):
+        env['BUILD_INTEROP_DOCKER_EXTRA_ARGS'] = \
+          '-v %s:/root/.composer/auth.json:ro' % host_file
+    build_job = jobset.JobSpec(
+        cmdline=['tools/run_tests/dockerize/build_interop_image.sh'],
+        environ=env,
+        shortname='build_docker_%s' % (language),
+        timeout_seconds=30 * 60)
+    build_job.tag = tag
+    return build_job
 
 
 def aggregate_http2_results(stdout):
-  match = re.search(r'\{"cases[^\]]*\]\}', stdout)
-  if not match:
-    return None
+    match = re.search(r'\{"cases[^\]]*\]\}', stdout)
+    if not match:
+        return None
 
-  results = json.loads(match.group(0))
-  skipped = 0
-  passed = 0
-  failed = 0
-  failed_cases = []
-  for case in results['cases']:
-    if case.get('skipped', False):
-      skipped += 1
-    else:
-      if case.get('passed', False):
-        passed += 1
-      else:
-        failed += 1
-        failed_cases.append(case.get('name', "NONAME"))
-  return {
-    'passed': passed,
-    'failed': failed,
-    'skipped': skipped,
-    'failed_cases': ', '.join(failed_cases),
-    'percent': 1.0 * passed / (passed + failed)
-  }
+    results = json.loads(match.group(0))
+    skipped = 0
+    passed = 0
+    failed = 0
+    failed_cases = []
+    for case in results['cases']:
+        if case.get('skipped', False):
+            skipped += 1
+        else:
+            if case.get('passed', False):
+                passed += 1
+            else:
+                failed += 1
+                failed_cases.append(case.get('name', "NONAME"))
+    return {
+        'passed': passed,
+        'failed': failed,
+        'skipped': skipped,
+        'failed_cases': ', '.join(failed_cases),
+        'percent': 1.0 * passed / (passed + failed)
+    }
+
 
 # A dictionary of prod servers to test.
 # Format: server_name: (server_host, server_host_override, errors_allowed)
 # TODO(adelez): implement logic for errors_allowed where if the indicated tests
 # fail, they don't impact the overall test result.
 prod_servers = {
-    'default': ('216.239.32.254',
-                'grpc-test.sandbox.googleapis.com', False),
-    'gateway_v2': ('216.239.32.254',
-                   'grpc-test2.sandbox.googleapis.com', True),
+    'default': ('216.239.32.254', 'grpc-test.sandbox.googleapis.com', False),
+    'gateway_v2': ('216.239.32.254', 'grpc-test2.sandbox.googleapis.com', True),
     'cloud_gateway': ('216.239.32.255', 'grpc-test.sandbox.googleapis.com',
                       False),
     'cloud_gateway_v2': ('216.239.32.255', 'grpc-test2.sandbox.googleapis.com',
                          True),
-    'gateway_v4': ('216.239.32.254',
-                   'grpc-test4.sandbox.googleapis.com', True),
+    'gateway_v4': ('216.239.32.254', 'grpc-test4.sandbox.googleapis.com', True),
     'cloud_gateway_v4': ('216.239.32.255', 'grpc-test4.sandbox.googleapis.com',
                          True),
 }
 
 argp = argparse.ArgumentParser(description='Run interop tests.')
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES),
-                  nargs='+',
-                  default=['all'],
-                  help='Clients to run. Objc client can be only run on OSX.')
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES),
+    nargs='+',
+    default=['all'],
+    help='Clients to run. Objc client can be only run on OSX.')
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('--cloud_to_prod',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run cloud_to_prod tests.')
-argp.add_argument('--cloud_to_prod_auth',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run cloud_to_prod_auth tests.')
-argp.add_argument('--prod_servers',
-                  choices=prod_servers.keys(),
-                  default=['default'],
-                  nargs='+',
-                  help=('The servers to run cloud_to_prod and '
-                        'cloud_to_prod_auth tests against.'))
-argp.add_argument('-s', '--server',
-                  choices=['all'] + sorted(_SERVERS),
-                  nargs='+',
-                  help='Run cloud_to_cloud servers in a separate docker ' +
-                       'image. Servers can only be started automatically if ' +
-                       '--use_docker option is enabled.',
-                  default=[])
-argp.add_argument('--override_server',
-                  action='append',
-                  type=lambda kv: kv.split('='),
-                  help='Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
-                  default=[])
-argp.add_argument('-t', '--travis',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('-v', '--verbose',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('--use_docker',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run all the interop tests under docker. That provides ' +
-                  'additional isolation and prevents the need to install ' +
-                  'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--manual_run',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Prepare things for running interop tests manually. ' +
-                  'Preserve docker images after building them and skip '
-                  'actually running the tests. Only print commands to run by ' +
-                  'hand.')
-argp.add_argument('--http2_interop',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
-argp.add_argument('--http2_server_interop',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Enable HTTP/2 server edge case testing. (Includes positive and negative tests')
-argp.add_argument('--insecure',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Whether to use secure channel.')
-argp.add_argument('--internal_ci',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help=('Put reports into subdirectories to improve '
-                        'presentation of results by Internal CI.'))
-argp.add_argument('--bq_result_table',
-                  default='',
-                  type=str,
-                  nargs='?',
-                  help='Upload test results to a specified BQ table.')
+argp.add_argument(
+    '--cloud_to_prod',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run cloud_to_prod tests.')
+argp.add_argument(
+    '--cloud_to_prod_auth',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run cloud_to_prod_auth tests.')
+argp.add_argument(
+    '--prod_servers',
+    choices=prod_servers.keys(),
+    default=['default'],
+    nargs='+',
+    help=('The servers to run cloud_to_prod and '
+          'cloud_to_prod_auth tests against.'))
+argp.add_argument(
+    '-s',
+    '--server',
+    choices=['all'] + sorted(_SERVERS),
+    nargs='+',
+    help='Run cloud_to_cloud servers in a separate docker ' +
+    'image. Servers can only be started automatically if ' +
+    '--use_docker option is enabled.',
+    default=[])
+argp.add_argument(
+    '--override_server',
+    action='append',
+    type=lambda kv: kv.split('='),
+    help=
+    'Use servername=HOST:PORT to explicitly specify a server. E.g. csharp=localhost:50000',
+    default=[])
+argp.add_argument(
+    '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+    '-v', '--verbose', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--use_docker',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run all the interop tests under docker. That provides ' +
+    'additional isolation and prevents the need to install ' +
+    'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+    '--allow_flakes',
+    default=False,
+    action='store_const',
+    const=True,
+    help=
+    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+    '--manual_run',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Prepare things for running interop tests manually. ' +
+    'Preserve docker images after building them and skip '
+    'actually running the tests. Only print commands to run by ' + 'hand.')
+argp.add_argument(
+    '--http2_interop',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Enable HTTP/2 client edge case testing. (Bad client, good server)')
+argp.add_argument(
+    '--http2_server_interop',
+    default=False,
+    action='store_const',
+    const=True,
+    help=
+    'Enable HTTP/2 server edge case testing. (Includes positive and negative tests'
+)
+argp.add_argument(
+    '--insecure',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Whether to use secure channel.')
+argp.add_argument(
+    '--internal_ci',
+    default=False,
+    action='store_const',
+    const=True,
+    help=('Put reports into subdirectories to improve '
+          'presentation of results by Internal CI.'))
+argp.add_argument(
+    '--bq_result_table',
+    default='',
+    type=str,
+    nargs='?',
+    help='Upload test results to a specified BQ table.')
 args = argp.parse_args()
 
-servers = set(s for s in itertools.chain.from_iterable(_SERVERS
-                                                       if x == 'all' else [x]
-                                                       for x in args.server))
+servers = set(
+    s
+    for s in itertools.chain.from_iterable(
+        _SERVERS if x == 'all' else [x] for x in args.server))
 
 if args.use_docker:
-  if not args.travis:
-    print('Seen --use_docker flag, will run interop tests under docker.')
-    print('')
-    print('IMPORTANT: The changes you are testing need to be locally committed')
-    print('because only the committed changes in the current branch will be')
-    print('copied to the docker environment.')
-    time.sleep(5)
+    if not args.travis:
+        print('Seen --use_docker flag, will run interop tests under docker.')
+        print('')
+        print(
+            'IMPORTANT: The changes you are testing need to be locally committed'
+        )
+        print(
+            'because only the committed changes in the current branch will be')
+        print('copied to the docker environment.')
+        time.sleep(5)
 
 if args.manual_run and not args.use_docker:
-  print('--manual_run is only supported with --use_docker option enabled.')
-  sys.exit(1)
+    print('--manual_run is only supported with --use_docker option enabled.')
+    sys.exit(1)
 
 if not args.use_docker and servers:
-  print('Running interop servers is only supported with --use_docker option enabled.')
-  sys.exit(1)
-
+    print(
+        'Running interop servers is only supported with --use_docker option enabled.'
+    )
+    sys.exit(1)
 
 # we want to include everything but objc in 'all'
 # because objc won't run on non-mac platforms
 all_but_objc = set(six.iterkeys(_LANGUAGES)) - set(['objc'])
 languages = set(_LANGUAGES[l]
                 for l in itertools.chain.from_iterable(
-                    all_but_objc if x == 'all' else [x]
-                    for x in args.language))
+                    all_but_objc if x == 'all' else [x] for x in args.language))
 
 languages_http2_clients_for_http2_server_interop = set()
 if args.http2_server_interop:
-  languages_http2_clients_for_http2_server_interop = set(
-      _LANGUAGES[l] for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
-      if 'all' in args.language or l in args.language)
+    languages_http2_clients_for_http2_server_interop = set(
+        _LANGUAGES[l]
+        for l in _LANGUAGES_WITH_HTTP2_CLIENTS_FOR_HTTP2_SERVER_TEST_CASES
+        if 'all' in args.language or l in args.language)
 
 http2Interop = Http2Client() if args.http2_interop else None
 http2InteropServer = Http2Server() if args.http2_server_interop else None
 
-docker_images={}
+docker_images = {}
 if args.use_docker:
-  # languages for which to build docker images
-  languages_to_build = set(
-      _LANGUAGES[k] for k in set([str(l) for l in languages] + [s for s in servers]))
-  languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
+    # languages for which to build docker images
+    languages_to_build = set(
+        _LANGUAGES[k]
+        for k in set([str(l) for l in languages] + [s for s in servers]))
+    languages_to_build = languages_to_build | languages_http2_clients_for_http2_server_interop
 
-  if args.http2_interop:
-    languages_to_build.add(http2Interop)
+    if args.http2_interop:
+        languages_to_build.add(http2Interop)
 
-  if args.http2_server_interop:
-    languages_to_build.add(http2InteropServer)
+    if args.http2_server_interop:
+        languages_to_build.add(http2InteropServer)
 
-  build_jobs = []
-  for l in languages_to_build:
-    if str(l) == 'objc':
-      # we don't need to build a docker image for objc
-      continue
-    job = build_interop_image_jobspec(l)
-    docker_images[str(l)] = job.tag
-    build_jobs.append(job)
+    build_jobs = []
+    for l in languages_to_build:
+        if str(l) == 'objc':
+            # we don't need to build a docker image for objc
+            continue
+        job = build_interop_image_jobspec(l)
+        docker_images[str(l)] = job.tag
+        build_jobs.append(job)
 
-  if build_jobs:
-    jobset.message('START', 'Building interop docker images.', do_newline=True)
-    if args.verbose:
-      print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
+    if build_jobs:
+        jobset.message(
+            'START', 'Building interop docker images.', do_newline=True)
+        if args.verbose:
+            print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs))
 
-    num_failures, _ = jobset.run(
-        build_jobs, newline_on_success=True, maxjobs=args.jobs)
-    if num_failures == 0:
-      jobset.message('SUCCESS', 'All docker images built successfully.',
-                     do_newline=True)
-    else:
-      jobset.message('FAILED', 'Failed to build interop docker images.',
-                     do_newline=True)
-      for image in six.itervalues(docker_images):
-        dockerjob.remove_image(image, skip_nonexistent=True)
-      sys.exit(1)
+        num_failures, _ = jobset.run(
+            build_jobs, newline_on_success=True, maxjobs=args.jobs)
+        if num_failures == 0:
+            jobset.message(
+                'SUCCESS',
+                'All docker images built successfully.',
+                do_newline=True)
+        else:
+            jobset.message(
+                'FAILED',
+                'Failed to build interop docker images.',
+                do_newline=True)
+            for image in six.itervalues(docker_images):
+                dockerjob.remove_image(image, skip_nonexistent=True)
+            sys.exit(1)
 
 server_manual_cmd_log = [] if args.manual_run else None
 client_manual_cmd_log = [] if args.manual_run else None
@@ -1056,214 +1139,236 @@
 server_jobs = {}
 server_addresses = {}
 try:
-  for s in servers:
-    lang = str(s)
-    spec = server_jobspec(_LANGUAGES[lang], docker_images.get(lang),
-                          args.insecure, manual_cmd_log=server_manual_cmd_log)
-    if not args.manual_run:
-      job = dockerjob.DockerJob(spec)
-      server_jobs[lang] = job
-      server_addresses[lang] = ('localhost', job.mapped_port(_DEFAULT_SERVER_PORT))
-    else:
-      # don't run the server, set server port to a placeholder value
-      server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
-  http2_server_job = None
-  if args.http2_server_interop:
-    # launch a HTTP2 server emulator that creates edge cases
-    lang = str(http2InteropServer)
-    spec = server_jobspec(http2InteropServer, docker_images.get(lang),
-                          manual_cmd_log=server_manual_cmd_log)
-    if not args.manual_run:
-      http2_server_job = dockerjob.DockerJob(spec)
-      server_jobs[lang] = http2_server_job
-    else:
-      # don't run the server, set server port to a placeholder value
-      server_addresses[lang] = ('localhost', '${SERVER_PORT}')
-
-  jobs = []
-  if args.cloud_to_prod:
-    if args.insecure:
-      print('TLS is always enabled for cloud_to_prod scenarios.')
-    for server_host_name in args.prod_servers:
-      for language in languages:
-        for test_case in _TEST_CASES:
-          if not test_case in language.unimplemented_test_cases():
-            if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
-              test_job = cloud_to_prod_jobspec(
-                  language, test_case, server_host_name,
-                  prod_servers[server_host_name],
-                  docker_image=docker_images.get(str(language)),
-                  manual_cmd_log=client_manual_cmd_log)
-              jobs.append(test_job)
-
-      if args.http2_interop:
-        for test_case in _HTTP2_TEST_CASES:
-          test_job = cloud_to_prod_jobspec(
-              http2Interop, test_case, server_host_name,
-              prod_servers[server_host_name],
-              docker_image=docker_images.get(str(http2Interop)),
-              manual_cmd_log=client_manual_cmd_log)
-          jobs.append(test_job)
-
-  if args.cloud_to_prod_auth:
-    if args.insecure:
-      print('TLS is always enabled for cloud_to_prod scenarios.')
-    for server_host_name in args.prod_servers:
-      for language in languages:
-        for test_case in _AUTH_TEST_CASES:
-          if not test_case in language.unimplemented_test_cases():
-            test_job = cloud_to_prod_jobspec(
-                language, test_case, server_host_name,
-                prod_servers[server_host_name],
-                docker_image=docker_images.get(str(language)), auth=True,
-                manual_cmd_log=client_manual_cmd_log)
-            jobs.append(test_job)
-
-  for server in args.override_server:
-    server_name = server[0]
-    (server_host, server_port) = server[1].split(':')
-    server_addresses[server_name] = (server_host, server_port)
-
-  for server_name, server_address in server_addresses.items():
-    (server_host, server_port) = server_address
-    server_language = _LANGUAGES.get(server_name, None)
-    skip_server = []  # test cases unimplemented by server
-    if server_language:
-      skip_server = server_language.unimplemented_test_cases_server()
-    for language in languages:
-      for test_case in _TEST_CASES:
-        if not test_case in language.unimplemented_test_cases():
-          if not test_case in skip_server:
-            test_job = cloud_to_cloud_jobspec(language,
-                                              test_case,
-                                              server_name,
-                                              server_host,
-                                              server_port,
-                                              docker_image=docker_images.get(str(language)),
-                                              insecure=args.insecure,
-                                              manual_cmd_log=client_manual_cmd_log)
-            jobs.append(test_job)
-
-    if args.http2_interop:
-      for test_case in _HTTP2_TEST_CASES:
-        if server_name == "go":
-          # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
-          continue
-        test_job = cloud_to_cloud_jobspec(http2Interop,
-                                          test_case,
-                                          server_name,
-                                          server_host,
-                                          server_port,
-                                          docker_image=docker_images.get(str(http2Interop)),
-                                          insecure=args.insecure,
-                                          manual_cmd_log=client_manual_cmd_log)
-        jobs.append(test_job)
-
-  if args.http2_server_interop:
-    if not args.manual_run:
-      http2_server_job.wait_for_healthy(timeout_seconds=600)
-    for language in languages_http2_clients_for_http2_server_interop:
-      for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(_HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
-        offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
-        server_port = _DEFAULT_SERVER_PORT+offset
+    for s in servers:
+        lang = str(s)
+        spec = server_jobspec(
+            _LANGUAGES[lang],
+            docker_images.get(lang),
+            args.insecure,
+            manual_cmd_log=server_manual_cmd_log)
         if not args.manual_run:
-          server_port = http2_server_job.mapped_port(server_port)
-        test_job = cloud_to_cloud_jobspec(language,
-                                          test_case,
-                                          str(http2InteropServer),
-                                          'localhost',
-                                          server_port,
-                                          docker_image=docker_images.get(str(language)),
-                                          manual_cmd_log=client_manual_cmd_log)
-        jobs.append(test_job)
-    for language in languages:
-      # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
-      # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
-      # than specialized http2 clients, reusing existing test implementations.
-      # For example, in the "data_frame_padding" test, use language's gRPC
-      # interop clients and make them think that theyre running "large_unary"
-      # test case. This avoids implementing a new test case in each language.
-      for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
-        if test_case not in language.unimplemented_test_cases():
-          offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
-          server_port = _DEFAULT_SERVER_PORT+offset
-          if not args.manual_run:
-            server_port = http2_server_job.mapped_port(server_port)
-          if not args.insecure:
-            print(('Creating grpc cient to http2 server test case with insecure connection, even though'
-                   ' args.insecure is False. Http2 test server only supports insecure connections.'))
-          test_job = cloud_to_cloud_jobspec(language,
-                                            test_case,
-                                            str(http2InteropServer),
-                                            'localhost',
-                                            server_port,
-                                            docker_image=docker_images.get(str(language)),
-                                            insecure=True,
-                                            manual_cmd_log=client_manual_cmd_log)
-          jobs.append(test_job)
+            job = dockerjob.DockerJob(spec)
+            server_jobs[lang] = job
+            server_addresses[lang] = ('localhost',
+                                      job.mapped_port(_DEFAULT_SERVER_PORT))
+        else:
+            # don't run the server, set server port to a placeholder value
+            server_addresses[lang] = ('localhost', '${SERVER_PORT}')
 
-  if not jobs:
-    print('No jobs to run.')
-    for image in six.itervalues(docker_images):
-      dockerjob.remove_image(image, skip_nonexistent=True)
-    sys.exit(1)
+    http2_server_job = None
+    if args.http2_server_interop:
+        # launch a HTTP2 server emulator that creates edge cases
+        lang = str(http2InteropServer)
+        spec = server_jobspec(
+            http2InteropServer,
+            docker_images.get(lang),
+            manual_cmd_log=server_manual_cmd_log)
+        if not args.manual_run:
+            http2_server_job = dockerjob.DockerJob(spec)
+            server_jobs[lang] = http2_server_job
+        else:
+            # don't run the server, set server port to a placeholder value
+            server_addresses[lang] = ('localhost', '${SERVER_PORT}')
 
-  if args.manual_run:
-    print('All tests will skipped --manual_run option is active.')
+    jobs = []
+    if args.cloud_to_prod:
+        if args.insecure:
+            print('TLS is always enabled for cloud_to_prod scenarios.')
+        for server_host_name in args.prod_servers:
+            for language in languages:
+                for test_case in _TEST_CASES:
+                    if not test_case in language.unimplemented_test_cases():
+                        if not test_case in _SKIP_ADVANCED + _SKIP_COMPRESSION:
+                            test_job = cloud_to_prod_jobspec(
+                                language,
+                                test_case,
+                                server_host_name,
+                                prod_servers[server_host_name],
+                                docker_image=docker_images.get(str(language)),
+                                manual_cmd_log=client_manual_cmd_log)
+                            jobs.append(test_job)
 
-  if args.verbose:
-    print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
+            if args.http2_interop:
+                for test_case in _HTTP2_TEST_CASES:
+                    test_job = cloud_to_prod_jobspec(
+                        http2Interop,
+                        test_case,
+                        server_host_name,
+                        prod_servers[server_host_name],
+                        docker_image=docker_images.get(str(http2Interop)),
+                        manual_cmd_log=client_manual_cmd_log)
+                    jobs.append(test_job)
 
-  num_failures, resultset = jobset.run(jobs, newline_on_success=True,
-                                       maxjobs=args.jobs,
-                                       skip_jobs=args.manual_run)
-  if args.bq_result_table and resultset:
-    upload_interop_results_to_bq(resultset, args.bq_result_table, args)
-  if num_failures:
-    jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  else:
-    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+    if args.cloud_to_prod_auth:
+        if args.insecure:
+            print('TLS is always enabled for cloud_to_prod scenarios.')
+        for server_host_name in args.prod_servers:
+            for language in languages:
+                for test_case in _AUTH_TEST_CASES:
+                    if not test_case in language.unimplemented_test_cases():
+                        test_job = cloud_to_prod_jobspec(
+                            language,
+                            test_case,
+                            server_host_name,
+                            prod_servers[server_host_name],
+                            docker_image=docker_images.get(str(language)),
+                            auth=True,
+                            manual_cmd_log=client_manual_cmd_log)
+                        jobs.append(test_job)
 
-  write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
-  write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+    for server in args.override_server:
+        server_name = server[0]
+        (server_host, server_port) = server[1].split(':')
+        server_addresses[server_name] = (server_host, server_port)
 
-  xml_report_name = _XML_REPORT
-  if args.internal_ci:
-    xml_report_name = _INTERNAL_CL_XML_REPORT
-  report_utils.render_junit_xml_report(resultset, xml_report_name)
+    for server_name, server_address in server_addresses.items():
+        (server_host, server_port) = server_address
+        server_language = _LANGUAGES.get(server_name, None)
+        skip_server = []  # test cases unimplemented by server
+        if server_language:
+            skip_server = server_language.unimplemented_test_cases_server()
+        for language in languages:
+            for test_case in _TEST_CASES:
+                if not test_case in language.unimplemented_test_cases():
+                    if not test_case in skip_server:
+                        test_job = cloud_to_cloud_jobspec(
+                            language,
+                            test_case,
+                            server_name,
+                            server_host,
+                            server_port,
+                            docker_image=docker_images.get(str(language)),
+                            insecure=args.insecure,
+                            manual_cmd_log=client_manual_cmd_log)
+                        jobs.append(test_job)
 
-  for name, job in resultset.items():
-    if "http2" in name:
-      job[0].http2results = aggregate_http2_results(job[0].message)
+        if args.http2_interop:
+            for test_case in _HTTP2_TEST_CASES:
+                if server_name == "go":
+                    # TODO(carl-mastrangelo): Reenable after https://github.com/grpc/grpc-go/issues/434
+                    continue
+                test_job = cloud_to_cloud_jobspec(
+                    http2Interop,
+                    test_case,
+                    server_name,
+                    server_host,
+                    server_port,
+                    docker_image=docker_images.get(str(http2Interop)),
+                    insecure=args.insecure,
+                    manual_cmd_log=client_manual_cmd_log)
+                jobs.append(test_job)
 
-  http2_server_test_cases = (
-      _HTTP2_SERVER_TEST_CASES if args.http2_server_interop else [])
+    if args.http2_server_interop:
+        if not args.manual_run:
+            http2_server_job.wait_for_healthy(timeout_seconds=600)
+        for language in languages_http2_clients_for_http2_server_interop:
+            for test_case in set(_HTTP2_SERVER_TEST_CASES) - set(
+                    _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS):
+                offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+                server_port = _DEFAULT_SERVER_PORT + offset
+                if not args.manual_run:
+                    server_port = http2_server_job.mapped_port(server_port)
+                test_job = cloud_to_cloud_jobspec(
+                    language,
+                    test_case,
+                    str(http2InteropServer),
+                    'localhost',
+                    server_port,
+                    docker_image=docker_images.get(str(language)),
+                    manual_cmd_log=client_manual_cmd_log)
+                jobs.append(test_job)
+        for language in languages:
+            # HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS is a subset of
+            # HTTP_SERVER_TEST_CASES, in which clients use their gRPC interop clients rather
+            # than specialized http2 clients, reusing existing test implementations.
+            # For example, in the "data_frame_padding" test, use language's gRPC
+            # interop clients and make them think that theyre running "large_unary"
+            # test case. This avoids implementing a new test case in each language.
+            for test_case in _HTTP2_SERVER_TEST_CASES_THAT_USE_GRPC_CLIENTS:
+                if test_case not in language.unimplemented_test_cases():
+                    offset = sorted(_HTTP2_SERVER_TEST_CASES).index(test_case)
+                    server_port = _DEFAULT_SERVER_PORT + offset
+                    if not args.manual_run:
+                        server_port = http2_server_job.mapped_port(server_port)
+                    if not args.insecure:
+                        print((
+                            'Creating grpc cient to http2 server test case with insecure connection, even though'
+                            ' args.insecure is False. Http2 test server only supports insecure connections.'
+                        ))
+                    test_job = cloud_to_cloud_jobspec(
+                        language,
+                        test_case,
+                        str(http2InteropServer),
+                        'localhost',
+                        server_port,
+                        docker_image=docker_images.get(str(language)),
+                        insecure=True,
+                        manual_cmd_log=client_manual_cmd_log)
+                    jobs.append(test_job)
 
-  report_utils.render_interop_html_report(
-      set([str(l) for l in languages]), servers, _TEST_CASES, _AUTH_TEST_CASES,
-      _HTTP2_TEST_CASES, http2_server_test_cases, resultset, num_failures,
-      args.cloud_to_prod_auth or args.cloud_to_prod, args.prod_servers,
-      args.http2_interop)
+    if not jobs:
+        print('No jobs to run.')
+        for image in six.itervalues(docker_images):
+            dockerjob.remove_image(image, skip_nonexistent=True)
+        sys.exit(1)
 
-  if num_failures:
-    sys.exit(1)
-  else:
-    sys.exit(0)
-except Exception as e:
-  print('exception occurred:')
-  traceback.print_exc(file=sys.stdout)
-finally:
-  # Check if servers are still running.
-  for server, job in server_jobs.items():
-    if not job.is_running():
-      print('Server "%s" has exited prematurely.' % server)
+    if args.manual_run:
+        print('All tests will skipped --manual_run option is active.')
 
-  dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+    if args.verbose:
+        print('Jobs to run: \n%s\n' % '\n'.join(str(job) for job in jobs))
 
-  for image in six.itervalues(docker_images):
-    if not args.manual_run:
-      print('Removing docker image %s' % image)
-      dockerjob.remove_image(image)
+    num_failures, resultset = jobset.run(
+        jobs,
+        newline_on_success=True,
+        maxjobs=args.jobs,
+        skip_jobs=args.manual_run)
+    if args.bq_result_table and resultset:
+        upload_interop_results_to_bq(resultset, args.bq_result_table, args)
+    if num_failures:
+        jobset.message('FAILED', 'Some tests failed', do_newline=True)
     else:
-      print('Preserving docker image: %s' % image)
+        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+
+    write_cmdlog_maybe(server_manual_cmd_log, 'interop_server_cmds.sh')
+    write_cmdlog_maybe(client_manual_cmd_log, 'interop_client_cmds.sh')
+
+    xml_report_name = _XML_REPORT
+    if args.internal_ci:
+        xml_report_name = _INTERNAL_CL_XML_REPORT
+    report_utils.render_junit_xml_report(resultset, xml_report_name)
+
+    for name, job in resultset.items():
+        if "http2" in name:
+            job[0].http2results = aggregate_http2_results(job[0].message)
+
+    http2_server_test_cases = (_HTTP2_SERVER_TEST_CASES
+                               if args.http2_server_interop else [])
+
+    report_utils.render_interop_html_report(
+        set([str(l) for l in languages]), servers, _TEST_CASES,
+        _AUTH_TEST_CASES, _HTTP2_TEST_CASES, http2_server_test_cases, resultset,
+        num_failures, args.cloud_to_prod_auth or args.cloud_to_prod,
+        args.prod_servers, args.http2_interop)
+
+    if num_failures:
+        sys.exit(1)
+    else:
+        sys.exit(0)
+except Exception as e:
+    print('exception occurred:')
+    traceback.print_exc(file=sys.stdout)
+finally:
+    # Check if servers are still running.
+    for server, job in server_jobs.items():
+        if not job.is_running():
+            print('Server "%s" has exited prematurely.' % server)
+
+    dockerjob.finish_jobs([j for j in six.itervalues(server_jobs)])
+
+    for image in six.itervalues(docker_images):
+        if not args.manual_run:
+            print('Removing docker image %s' % image)
+            dockerjob.remove_image(image)
+        else:
+            print('Preserving docker image: %s' % image)
diff --git a/tools/run_tests/run_microbenchmark.py b/tools/run_tests/run_microbenchmark.py
index c136af5..4e4d05c 100755
--- a/tools/run_tests/run_microbenchmark.py
+++ b/tools/run_tests/run_microbenchmark.py
@@ -23,26 +23,31 @@
 import python_utils.jobset as jobset
 import python_utils.start_port_server as start_port_server
 
-sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks', 'bm_diff'))
+sys.path.append(
+    os.path.join(
+        os.path.dirname(sys.argv[0]), '..', 'profiling', 'microbenchmarks',
+        'bm_diff'))
 import bm_constants
 
 flamegraph_dir = os.path.join(os.path.expanduser('~'), 'FlameGraph')
 
 os.chdir(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 if not os.path.exists('reports'):
-  os.makedirs('reports')
+    os.makedirs('reports')
 
 start_port_server.start_port_server()
 
+
 def fnize(s):
-  out = ''
-  for c in s:
-    if c in '<>, /':
-      if len(out) and out[-1] == '_': continue
-      out += '_'
-    else:
-      out += c
-  return out
+    out = ''
+    for c in s:
+        if c in '<>, /':
+            if len(out) and out[-1] == '_': continue
+            out += '_'
+        else:
+            out += c
+    return out
+
 
 # index html
 index_html = """
@@ -53,169 +58,210 @@
 <body>
 """
 
+
 def heading(name):
-  global index_html
-  index_html += "<h1>%s</h1>\n" % name
+    global index_html
+    index_html += "<h1>%s</h1>\n" % name
+
 
 def link(txt, tgt):
-  global index_html
-  index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
-      cgi.escape(tgt, quote=True), cgi.escape(txt))
+    global index_html
+    index_html += "<p><a href=\"%s\">%s</a></p>\n" % (
+        cgi.escape(tgt, quote=True), cgi.escape(txt))
+
 
 def text(txt):
-  global index_html
-  index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+    global index_html
+    index_html += "<p><pre>%s</pre></p>\n" % cgi.escape(txt)
+
 
 def collect_latency(bm_name, args):
-  """generate latency profiles"""
-  benchmarks = []
-  profile_analysis = []
-  cleanup = []
+    """generate latency profiles"""
+    benchmarks = []
+    profile_analysis = []
+    cleanup = []
 
-  heading('Latency Profiles: %s' % bm_name)
-  subprocess.check_call(
-      ['make', bm_name,
-       'CONFIG=basicprof', '-j', '%d' % multiprocessing.cpu_count()])
-  for line in subprocess.check_output(['bins/basicprof/%s' % bm_name,
-                                       '--benchmark_list_tests']).splitlines():
-    link(line, '%s.txt' % fnize(line))
-    benchmarks.append(
-        jobset.JobSpec(['bins/basicprof/%s' % bm_name,
-                        '--benchmark_filter=^%s$' % line,
-                        '--benchmark_min_time=0.05'],
-                       environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
-                       shortname='profile-%s' % fnize(line)))
-    profile_analysis.append(
-        jobset.JobSpec([sys.executable,
-                        'tools/profiling/latency_profile/profile_analyzer.py',
-                        '--source', '%s.trace' % fnize(line), '--fmt', 'simple',
-                        '--out', 'reports/%s.txt' % fnize(line)], timeout_seconds=20*60,
-                        shortname='analyze-%s' % fnize(line)))
-    cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
-    # periodically flush out the list of jobs: profile_analysis jobs at least
-    # consume upwards of five gigabytes of ram in some cases, and so analysing
-    # hundreds of them at once is impractical -- but we want at least some
-    # concurrency or the work takes too long
-    if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
-      # run up to half the cpu count: each benchmark can use up to two cores
-      # (one for the microbenchmark, one for the data flush)
-      jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
-      jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-      jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-      benchmarks = []
-      profile_analysis = []
-      cleanup = []
-  # run the remaining benchmarks that weren't flushed
-  if len(benchmarks):
-    jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count()/2))
-    jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-    jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+    heading('Latency Profiles: %s' % bm_name)
+    subprocess.check_call([
+        'make', bm_name, 'CONFIG=basicprof', '-j',
+        '%d' % multiprocessing.cpu_count()
+    ])
+    for line in subprocess.check_output(
+        ['bins/basicprof/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+        link(line, '%s.txt' % fnize(line))
+        benchmarks.append(
+            jobset.JobSpec(
+                [
+                    'bins/basicprof/%s' % bm_name,
+                    '--benchmark_filter=^%s$' % line,
+                    '--benchmark_min_time=0.05'
+                ],
+                environ={'LATENCY_TRACE': '%s.trace' % fnize(line)},
+                shortname='profile-%s' % fnize(line)))
+        profile_analysis.append(
+            jobset.JobSpec(
+                [
+                    sys.executable,
+                    'tools/profiling/latency_profile/profile_analyzer.py',
+                    '--source',
+                    '%s.trace' % fnize(line), '--fmt', 'simple', '--out',
+                    'reports/%s.txt' % fnize(line)
+                ],
+                timeout_seconds=20 * 60,
+                shortname='analyze-%s' % fnize(line)))
+        cleanup.append(jobset.JobSpec(['rm', '%s.trace' % fnize(line)]))
+        # periodically flush out the list of jobs: profile_analysis jobs at least
+        # consume upwards of five gigabytes of ram in some cases, and so analysing
+        # hundreds of them at once is impractical -- but we want at least some
+        # concurrency or the work takes too long
+        if len(benchmarks) >= min(16, multiprocessing.cpu_count()):
+            # run up to half the cpu count: each benchmark can use up to two cores
+            # (one for the microbenchmark, one for the data flush)
+            jobset.run(
+                benchmarks, maxjobs=max(1,
+                                        multiprocessing.cpu_count() / 2))
+            jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+            jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+            benchmarks = []
+            profile_analysis = []
+            cleanup = []
+    # run the remaining benchmarks that weren't flushed
+    if len(benchmarks):
+        jobset.run(benchmarks, maxjobs=max(1, multiprocessing.cpu_count() / 2))
+        jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+        jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
 
 def collect_perf(bm_name, args):
-  """generate flamegraphs"""
-  heading('Flamegraphs: %s' % bm_name)
-  subprocess.check_call(
-      ['make', bm_name,
-       'CONFIG=mutrace', '-j', '%d' % multiprocessing.cpu_count()])
-  benchmarks = []
-  profile_analysis = []
-  cleanup = []
-  for line in subprocess.check_output(['bins/mutrace/%s' % bm_name,
-                                       '--benchmark_list_tests']).splitlines():
-    link(line, '%s.svg' % fnize(line))
-    benchmarks.append(
-        jobset.JobSpec(['perf', 'record', '-o', '%s-perf.data' % fnize(line),
-                        '-g', '-F', '997',
-                        'bins/mutrace/%s' % bm_name,
-                        '--benchmark_filter=^%s$' % line,
-                        '--benchmark_min_time=10'],
-                        shortname='perf-%s' % fnize(line)))
-    profile_analysis.append(
-        jobset.JobSpec(['tools/run_tests/performance/process_local_perf_flamegraphs.sh'],
-                       environ = {
-                           'PERF_BASE_NAME': fnize(line),
-                           'OUTPUT_DIR': 'reports',
-                           'OUTPUT_FILENAME': fnize(line),
-                       },
-                       shortname='flame-%s' % fnize(line)))
-    cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
-    cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
-    # periodically flush out the list of jobs: temporary space required for this
-    # processing is large
-    if len(benchmarks) >= 20:
-      # run up to half the cpu count: each benchmark can use up to two cores
-      # (one for the microbenchmark, one for the data flush)
-      jobset.run(benchmarks, maxjobs=1)
-      jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-      jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
-      benchmarks = []
-      profile_analysis = []
-      cleanup = []
-  # run the remaining benchmarks that weren't flushed
-  if len(benchmarks):
-    jobset.run(benchmarks, maxjobs=1)
-    jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
-    jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+    """generate flamegraphs"""
+    heading('Flamegraphs: %s' % bm_name)
+    subprocess.check_call([
+        'make', bm_name, 'CONFIG=mutrace', '-j',
+        '%d' % multiprocessing.cpu_count()
+    ])
+    benchmarks = []
+    profile_analysis = []
+    cleanup = []
+    for line in subprocess.check_output(
+        ['bins/mutrace/%s' % bm_name, '--benchmark_list_tests']).splitlines():
+        link(line, '%s.svg' % fnize(line))
+        benchmarks.append(
+            jobset.JobSpec(
+                [
+                    'perf', 'record', '-o',
+                    '%s-perf.data' % fnize(line), '-g', '-F', '997',
+                    'bins/mutrace/%s' % bm_name,
+                    '--benchmark_filter=^%s$' % line, '--benchmark_min_time=10'
+                ],
+                shortname='perf-%s' % fnize(line)))
+        profile_analysis.append(
+            jobset.JobSpec(
+                [
+                    'tools/run_tests/performance/process_local_perf_flamegraphs.sh'
+                ],
+                environ={
+                    'PERF_BASE_NAME': fnize(line),
+                    'OUTPUT_DIR': 'reports',
+                    'OUTPUT_FILENAME': fnize(line),
+                },
+                shortname='flame-%s' % fnize(line)))
+        cleanup.append(jobset.JobSpec(['rm', '%s-perf.data' % fnize(line)]))
+        cleanup.append(jobset.JobSpec(['rm', '%s-out.perf' % fnize(line)]))
+        # periodically flush out the list of jobs: temporary space required for this
+        # processing is large
+        if len(benchmarks) >= 20:
+            # run up to half the cpu count: each benchmark can use up to two cores
+            # (one for the microbenchmark, one for the data flush)
+            jobset.run(benchmarks, maxjobs=1)
+            jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+            jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+            benchmarks = []
+            profile_analysis = []
+            cleanup = []
+    # run the remaining benchmarks that weren't flushed
+    if len(benchmarks):
+        jobset.run(benchmarks, maxjobs=1)
+        jobset.run(profile_analysis, maxjobs=multiprocessing.cpu_count())
+        jobset.run(cleanup, maxjobs=multiprocessing.cpu_count())
+
 
 def run_summary(bm_name, cfg, base_json_name):
-  subprocess.check_call(
-      ['make', bm_name,
-       'CONFIG=%s' % cfg, '-j', '%d' % multiprocessing.cpu_count()])
-  cmd = ['bins/%s/%s' % (cfg, bm_name),
-         '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
-         '--benchmark_out_format=json']
-  if args.summary_time is not None:
-    cmd += ['--benchmark_min_time=%d' % args.summary_time]
-  return subprocess.check_output(cmd)
+    subprocess.check_call([
+        'make', bm_name,
+        'CONFIG=%s' % cfg, '-j',
+        '%d' % multiprocessing.cpu_count()
+    ])
+    cmd = [
+        'bins/%s/%s' % (cfg, bm_name),
+        '--benchmark_out=%s.%s.json' % (base_json_name, cfg),
+        '--benchmark_out_format=json'
+    ]
+    if args.summary_time is not None:
+        cmd += ['--benchmark_min_time=%d' % args.summary_time]
+    return subprocess.check_output(cmd)
+
 
 def collect_summary(bm_name, args):
-  heading('Summary: %s [no counters]' % bm_name)
-  text(run_summary(bm_name, 'opt', bm_name))
-  heading('Summary: %s [with counters]' % bm_name)
-  text(run_summary(bm_name, 'counters', bm_name))
-  if args.bigquery_upload:
-    with open('%s.csv' % bm_name, 'w') as f:
-      f.write(subprocess.check_output(['tools/profiling/microbenchmarks/bm2bq.py',
-                                       '%s.counters.json' % bm_name,
-                                       '%s.opt.json' % bm_name]))
-    subprocess.check_call(['bq', 'load', 'microbenchmarks.microbenchmarks', '%s.csv' % bm_name])
+    heading('Summary: %s [no counters]' % bm_name)
+    text(run_summary(bm_name, 'opt', bm_name))
+    heading('Summary: %s [with counters]' % bm_name)
+    text(run_summary(bm_name, 'counters', bm_name))
+    if args.bigquery_upload:
+        with open('%s.csv' % bm_name, 'w') as f:
+            f.write(
+                subprocess.check_output([
+                    'tools/profiling/microbenchmarks/bm2bq.py',
+                    '%s.counters.json' % bm_name,
+                    '%s.opt.json' % bm_name
+                ]))
+        subprocess.check_call([
+            'bq', 'load', 'microbenchmarks.microbenchmarks',
+            '%s.csv' % bm_name
+        ])
+
 
 collectors = {
-  'latency': collect_latency,
-  'perf': collect_perf,
-  'summary': collect_summary,
+    'latency': collect_latency,
+    'perf': collect_perf,
+    'summary': collect_summary,
 }
 
 argp = argparse.ArgumentParser(description='Collect data from microbenchmarks')
-argp.add_argument('-c', '--collect',
-                  choices=sorted(collectors.keys()),
-                  nargs='*',
-                  default=sorted(collectors.keys()),
-                  help='Which collectors should be run against each benchmark')
-argp.add_argument('-b', '--benchmarks',
-                  choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-                  default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
-                  nargs='+',
-                  type=str,
-                  help='Which microbenchmarks should be run')
-argp.add_argument('--bigquery_upload',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Upload results from summary collection to bigquery')
-argp.add_argument('--summary_time',
-                  default=None,
-                  type=int,
-                  help='Minimum time to run benchmarks for the summary collection')
+argp.add_argument(
+    '-c',
+    '--collect',
+    choices=sorted(collectors.keys()),
+    nargs='*',
+    default=sorted(collectors.keys()),
+    help='Which collectors should be run against each benchmark')
+argp.add_argument(
+    '-b',
+    '--benchmarks',
+    choices=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+    default=bm_constants._AVAILABLE_BENCHMARK_TESTS,
+    nargs='+',
+    type=str,
+    help='Which microbenchmarks should be run')
+argp.add_argument(
+    '--bigquery_upload',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Upload results from summary collection to bigquery')
+argp.add_argument(
+    '--summary_time',
+    default=None,
+    type=int,
+    help='Minimum time to run benchmarks for the summary collection')
 args = argp.parse_args()
 
 try:
-  for collect in args.collect:
-    for bm_name in args.benchmarks:
-      collectors[collect](bm_name, args)
+    for collect in args.collect:
+        for bm_name in args.benchmarks:
+            collectors[collect](bm_name, args)
 finally:
-  if not os.path.exists('reports'):
-    os.makedirs('reports')
-  index_html += "</body>\n</html>\n"
-  with open('reports/index.html', 'w') as f:
-    f.write(index_html)
+    if not os.path.exists('reports'):
+        os.makedirs('reports')
+    index_html += "</body>\n</html>\n"
+    with open('reports/index.html', 'w') as f:
+        f.write(index_html)
diff --git a/tools/run_tests/run_performance_tests.py b/tools/run_tests/run_performance_tests.py
index 1bbab9e..9a9f74e 100755
--- a/tools/run_tests/run_performance_tests.py
+++ b/tools/run_tests/run_performance_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run performance tests locally or remotely."""
 
 from __future__ import print_function
@@ -37,558 +36,670 @@
 import python_utils.jobset as jobset
 import python_utils.report_utils as report_utils
 
-
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
 
-
 _REMOTE_HOST_USERNAME = 'jenkins'
 
 
 class QpsWorkerJob:
-  """Encapsulates a qps worker server job."""
+    """Encapsulates a qps worker server job."""
 
-  def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
-    self._spec = spec
-    self.language = language
-    self.host_and_port = host_and_port
-    self._job = None
-    self.perf_file_base_name = perf_file_base_name
+    def __init__(self, spec, language, host_and_port, perf_file_base_name=None):
+        self._spec = spec
+        self.language = language
+        self.host_and_port = host_and_port
+        self._job = None
+        self.perf_file_base_name = perf_file_base_name
 
-  def start(self):
-    self._job = jobset.Job(self._spec, newline_on_success=True, travis=True, add_env={})
+    def start(self):
+        self._job = jobset.Job(
+            self._spec, newline_on_success=True, travis=True, add_env={})
 
-  def is_running(self):
-    """Polls a job and returns True if given job is still running."""
-    return self._job and self._job.state() == jobset._RUNNING
+    def is_running(self):
+        """Polls a job and returns True if given job is still running."""
+        return self._job and self._job.state() == jobset._RUNNING
 
-  def kill(self):
-    if self._job:
-      self._job.kill()
-      self._job = None
+    def kill(self):
+        if self._job:
+            self._job.kill()
+            self._job = None
 
 
-def create_qpsworker_job(language, shortname=None, port=10000, remote_host=None, perf_cmd=None):
-  cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
+def create_qpsworker_job(language,
+                         shortname=None,
+                         port=10000,
+                         remote_host=None,
+                         perf_cmd=None):
+    cmdline = (language.worker_cmdline() + ['--driver_port=%s' % port])
 
-  if remote_host:
-    host_and_port='%s:%s' % (remote_host, port)
-  else:
-    host_and_port='localhost:%s' % port
+    if remote_host:
+        host_and_port = '%s:%s' % (remote_host, port)
+    else:
+        host_and_port = 'localhost:%s' % port
 
-  perf_file_base_name = None
-  if perf_cmd:
-    perf_file_base_name = '%s-%s' % (host_and_port, shortname)
-    # specify -o output file so perf.data gets collected when worker stopped
-    cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name] + cmdline
+    perf_file_base_name = None
+    if perf_cmd:
+        perf_file_base_name = '%s-%s' % (host_and_port, shortname)
+        # specify -o output file so perf.data gets collected when worker stopped
+        cmdline = perf_cmd + ['-o', '%s-perf.data' % perf_file_base_name
+                             ] + cmdline
 
-  worker_timeout = 3 * 60
-  if remote_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
-    ssh_cmd = ['ssh']
-    cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
-    ssh_cmd.extend([str(user_at_host), 'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s' % ' '.join(cmdline)])
-    cmdline = ssh_cmd
+    worker_timeout = 3 * 60
+    if remote_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+        ssh_cmd = ['ssh']
+        cmdline = ['timeout', '%s' % (worker_timeout + 30)] + cmdline
+        ssh_cmd.extend([
+            str(user_at_host),
+            'cd ~/performance_workspace/grpc/ && python tools/run_tests/start_port_server.py && %s'
+            % ' '.join(cmdline)
+        ])
+        cmdline = ssh_cmd
 
-  jobspec = jobset.JobSpec(
-      cmdline=cmdline,
-      shortname=shortname,
-      timeout_seconds=worker_timeout,  # workers get restarted after each scenario
-      verbose_success=True)
-  return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
+    jobspec = jobset.JobSpec(
+        cmdline=cmdline,
+        shortname=shortname,
+        timeout_seconds=
+        worker_timeout,  # workers get restarted after each scenario
+        verbose_success=True)
+    return QpsWorkerJob(jobspec, language, host_and_port, perf_file_base_name)
 
 
-def create_scenario_jobspec(scenario_json, workers, remote_host=None,
-                            bq_result_table=None, server_cpu_load=0):
-  """Runs one scenario using QPS driver."""
-  # setting QPS_WORKERS env variable here makes sure it works with SSH too.
-  cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
-  if bq_result_table:
-    cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
-  cmd += 'tools/run_tests/performance/run_qps_driver.sh '
-  cmd += '--scenarios_json=%s ' % pipes.quote(json.dumps({'scenarios': [scenario_json]}))
-  cmd += '--scenario_result_file=scenario_result.json '
-  if server_cpu_load != 0:
-      cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
-  if remote_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
-    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+def create_scenario_jobspec(scenario_json,
+                            workers,
+                            remote_host=None,
+                            bq_result_table=None,
+                            server_cpu_load=0):
+    """Runs one scenario using QPS driver."""
+    # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+    cmd = 'QPS_WORKERS="%s" ' % ','.join(workers)
+    if bq_result_table:
+        cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+    cmd += 'tools/run_tests/performance/run_qps_driver.sh '
+    cmd += '--scenarios_json=%s ' % pipes.quote(
+        json.dumps({
+            'scenarios': [scenario_json]
+        }))
+    cmd += '--scenario_result_file=scenario_result.json '
+    if server_cpu_load != 0:
+        cmd += '--search_param=offered_load --initial_search_value=1000 --targeted_cpu_load=%d --stride=500 --error_tolerance=0.01' % server_cpu_load
+    if remote_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+            user_at_host, pipes.quote(cmd))
 
-  return jobset.JobSpec(
-      cmdline=[cmd],
-      shortname='qps_json_driver.%s' % scenario_json['name'],
-      timeout_seconds=12*60,
-      shell=True,
-      verbose_success=True)
+    return jobset.JobSpec(
+        cmdline=[cmd],
+        shortname='qps_json_driver.%s' % scenario_json['name'],
+        timeout_seconds=12 * 60,
+        shell=True,
+        verbose_success=True)
 
 
 def create_quit_jobspec(workers, remote_host=None):
-  """Runs quit using QPS driver."""
-  # setting QPS_WORKERS env variable here makes sure it works with SSH too.
-  cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(w.host_and_port for w in workers)
-  if remote_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
-    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+    """Runs quit using QPS driver."""
+    # setting QPS_WORKERS env variable here makes sure it works with SSH too.
+    cmd = 'QPS_WORKERS="%s" bins/opt/qps_json_driver --quit' % ','.join(
+        w.host_and_port for w in workers)
+    if remote_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, remote_host)
+        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+            user_at_host, pipes.quote(cmd))
 
-  return jobset.JobSpec(
-      cmdline=[cmd],
-      shortname='qps_json_driver.quit',
-      timeout_seconds=3*60,
-      shell=True,
-      verbose_success=True)
+    return jobset.JobSpec(
+        cmdline=[cmd],
+        shortname='qps_json_driver.quit',
+        timeout_seconds=3 * 60,
+        shell=True,
+        verbose_success=True)
 
 
-def create_netperf_jobspec(server_host='localhost', client_host=None,
+def create_netperf_jobspec(server_host='localhost',
+                           client_host=None,
                            bq_result_table=None):
-  """Runs netperf benchmark."""
-  cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
-  if bq_result_table:
-    cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
-  if client_host:
-    # If netperf is running remotely, the env variables populated by Jenkins
-    # won't be available on the client, but we need them for uploading results
-    # to BigQuery.
-    jenkins_job_name = os.getenv('JOB_NAME')
-    if jenkins_job_name:
-      cmd += 'JOB_NAME="%s" ' % jenkins_job_name
-    jenkins_build_number = os.getenv('BUILD_NUMBER')
-    if jenkins_build_number:
-      cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
+    """Runs netperf benchmark."""
+    cmd = 'NETPERF_SERVER_HOST="%s" ' % server_host
+    if bq_result_table:
+        cmd += 'BQ_RESULT_TABLE="%s" ' % bq_result_table
+    if client_host:
+        # If netperf is running remotely, the env variables populated by Jenkins
+        # won't be available on the client, but we need them for uploading results
+        # to BigQuery.
+        jenkins_job_name = os.getenv('JOB_NAME')
+        if jenkins_job_name:
+            cmd += 'JOB_NAME="%s" ' % jenkins_job_name
+        jenkins_build_number = os.getenv('BUILD_NUMBER')
+        if jenkins_build_number:
+            cmd += 'BUILD_NUMBER="%s" ' % jenkins_build_number
 
-  cmd += 'tools/run_tests/performance/run_netperf.sh'
-  if client_host:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
-    cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (user_at_host, pipes.quote(cmd))
+    cmd += 'tools/run_tests/performance/run_netperf.sh'
+    if client_host:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, client_host)
+        cmd = 'ssh %s "cd ~/performance_workspace/grpc/ && "%s' % (
+            user_at_host, pipes.quote(cmd))
 
-  return jobset.JobSpec(
-      cmdline=[cmd],
-      shortname='netperf',
-      timeout_seconds=60,
-      shell=True,
-      verbose_success=True)
+    return jobset.JobSpec(
+        cmdline=[cmd],
+        shortname='netperf',
+        timeout_seconds=60,
+        shell=True,
+        verbose_success=True)
 
 
 def archive_repo(languages):
-  """Archives local version of repo including submodules."""
-  cmdline=['tar', '-cf', '../grpc.tar', '../grpc/']
-  if 'java' in languages:
-    cmdline.append('../grpc-java')
-  if 'go' in languages:
-    cmdline.append('../grpc-go')
+    """Archives local version of repo including submodules."""
+    cmdline = ['tar', '-cf', '../grpc.tar', '../grpc/']
+    if 'java' in languages:
+        cmdline.append('../grpc-java')
+    if 'go' in languages:
+        cmdline.append('../grpc-go')
 
-  archive_job = jobset.JobSpec(
-      cmdline=cmdline,
-      shortname='archive_repo',
-      timeout_seconds=3*60)
+    archive_job = jobset.JobSpec(
+        cmdline=cmdline, shortname='archive_repo', timeout_seconds=3 * 60)
 
-  jobset.message('START', 'Archiving local repository.', do_newline=True)
-  num_failures, _ = jobset.run(
-      [archive_job], newline_on_success=True, maxjobs=1)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'Archive with local repository created successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Failed to archive local repository.',
-                   do_newline=True)
-    sys.exit(1)
+    jobset.message('START', 'Archiving local repository.', do_newline=True)
+    num_failures, _ = jobset.run(
+        [archive_job], newline_on_success=True, maxjobs=1)
+    if num_failures == 0:
+        jobset.message(
+            'SUCCESS',
+            'Archive with local repository created successfully.',
+            do_newline=True)
+    else:
+        jobset.message(
+            'FAILED', 'Failed to archive local repository.', do_newline=True)
+        sys.exit(1)
 
 
 def prepare_remote_hosts(hosts, prepare_local=False):
-  """Prepares remote hosts (and maybe prepare localhost as well)."""
-  prepare_timeout = 5*60
-  prepare_jobs = []
-  for host in hosts:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
-    prepare_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
-            shortname='remote_host_prepare.%s' % host,
-            environ = {'USER_AT_HOST': user_at_host},
-            timeout_seconds=prepare_timeout))
-  if prepare_local:
-    # Prepare localhost as well
-    prepare_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/kill_workers.sh'],
-            shortname='local_prepare',
-            timeout_seconds=prepare_timeout))
-  jobset.message('START', 'Preparing hosts.', do_newline=True)
-  num_failures, _ = jobset.run(
-      prepare_jobs, newline_on_success=True, maxjobs=10)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'Prepare step completed successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Failed to prepare remote hosts.',
-                   do_newline=True)
-    sys.exit(1)
+    """Prepares remote hosts (and maybe prepare localhost as well)."""
+    prepare_timeout = 10 * 60
+    prepare_jobs = []
+    for host in hosts:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+        prepare_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/remote_host_prepare.sh'],
+                shortname='remote_host_prepare.%s' % host,
+                environ={'USER_AT_HOST': user_at_host},
+                timeout_seconds=prepare_timeout))
+    if prepare_local:
+        # Prepare localhost as well
+        prepare_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/kill_workers.sh'],
+                shortname='local_prepare',
+                timeout_seconds=prepare_timeout))
+    jobset.message('START', 'Preparing hosts.', do_newline=True)
+    num_failures, _ = jobset.run(
+        prepare_jobs, newline_on_success=True, maxjobs=10)
+    if num_failures == 0:
+        jobset.message(
+            'SUCCESS', 'Prepare step completed successfully.', do_newline=True)
+    else:
+        jobset.message(
+            'FAILED', 'Failed to prepare remote hosts.', do_newline=True)
+        sys.exit(1)
 
 
-def build_on_remote_hosts(hosts, languages=scenario_config.LANGUAGES.keys(), build_local=False):
-  """Builds performance worker on remote hosts (and maybe also locally)."""
-  build_timeout = 15*60
-  build_jobs = []
-  for host in hosts:
-    user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
-    build_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/remote_host_build.sh'] + languages,
-            shortname='remote_host_build.%s' % host,
-            environ = {'USER_AT_HOST': user_at_host, 'CONFIG': 'opt'},
-            timeout_seconds=build_timeout))
-  if build_local:
-    # Build locally as well
-    build_jobs.append(
-        jobset.JobSpec(
-            cmdline=['tools/run_tests/performance/build_performance.sh'] + languages,
-            shortname='local_build',
-            environ = {'CONFIG': 'opt'},
-            timeout_seconds=build_timeout))
-  jobset.message('START', 'Building.', do_newline=True)
-  num_failures, _ = jobset.run(
-      build_jobs, newline_on_success=True, maxjobs=10)
-  if num_failures == 0:
-    jobset.message('SUCCESS',
-                   'Built successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Build failed.',
-                   do_newline=True)
-    sys.exit(1)
+def build_on_remote_hosts(hosts,
+                          languages=scenario_config.LANGUAGES.keys(),
+                          build_local=False):
+    """Builds performance worker on remote hosts (and maybe also locally)."""
+    build_timeout = 15 * 60
+    # Kokoro VMs (which are local only) do not have caching, so they need more time to build
+    local_build_timeout = 30 * 60
+    build_jobs = []
+    for host in hosts:
+        user_at_host = '%s@%s' % (_REMOTE_HOST_USERNAME, host)
+        build_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/remote_host_build.sh'] +
+                languages,
+                shortname='remote_host_build.%s' % host,
+                environ={'USER_AT_HOST': user_at_host,
+                         'CONFIG': 'opt'},
+                timeout_seconds=build_timeout))
+    if build_local:
+        # Build locally as well
+        build_jobs.append(
+            jobset.JobSpec(
+                cmdline=['tools/run_tests/performance/build_performance.sh'] +
+                languages,
+                shortname='local_build',
+                environ={'CONFIG': 'opt'},
+                timeout_seconds=local_build_timeout))
+    jobset.message('START', 'Building.', do_newline=True)
+    num_failures, _ = jobset.run(
+        build_jobs, newline_on_success=True, maxjobs=10)
+    if num_failures == 0:
+        jobset.message('SUCCESS', 'Built successfully.', do_newline=True)
+    else:
+        jobset.message('FAILED', 'Build failed.', do_newline=True)
+        sys.exit(1)
 
 
 def create_qpsworkers(languages, worker_hosts, perf_cmd=None):
-  """Creates QPS workers (but does not start them)."""
-  if not worker_hosts:
-    # run two workers locally (for each language)
-    workers=[(None, 10000), (None, 10010)]
-  elif len(worker_hosts) == 1:
-    # run two workers on the remote host (for each language)
-    workers=[(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
-  else:
-    # run one worker per each remote host (for each language)
-    workers=[(worker_host, 10000) for worker_host in worker_hosts]
+    """Creates QPS workers (but does not start them)."""
+    if not worker_hosts:
+        # run two workers locally (for each language)
+        workers = [(None, 10000), (None, 10010)]
+    elif len(worker_hosts) == 1:
+        # run two workers on the remote host (for each language)
+        workers = [(worker_hosts[0], 10000), (worker_hosts[0], 10010)]
+    else:
+        # run one worker per each remote host (for each language)
+        workers = [(worker_host, 10000) for worker_host in worker_hosts]
 
-  return [create_qpsworker_job(language,
-                               shortname= 'qps_worker_%s_%s' % (language,
-                                                                worker_idx),
-                               port=worker[1] + language.worker_port_offset(),
-                               remote_host=worker[0],
-                               perf_cmd=perf_cmd)
-          for language in languages
-          for worker_idx, worker in enumerate(workers)]
+    return [
+        create_qpsworker_job(
+            language,
+            shortname='qps_worker_%s_%s' % (language, worker_idx),
+            port=worker[1] + language.worker_port_offset(),
+            remote_host=worker[0],
+            perf_cmd=perf_cmd)
+        for language in languages
+        for worker_idx, worker in enumerate(workers)
+    ]
 
 
-def perf_report_processor_job(worker_host, perf_base_name, output_filename, flame_graph_reports):
-  print('Creating perf report collection job for %s' % worker_host)
-  cmd = ''
-  if worker_host != 'localhost':
-    user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
-    cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
-         tools/run_tests/performance/process_remote_perf_flamegraphs.sh" \
-          % (user_at_host, output_filename, flame_graph_reports, perf_base_name)
-  else:
-    cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%s\
-          tools/run_tests/performance/process_local_perf_flamegraphs.sh" \
-          % (output_filename, flame_graph_reports, perf_base_name)
+def perf_report_processor_job(worker_host, perf_base_name, output_filename,
+                              flame_graph_reports):
+    print('Creating perf report collection job for %s' % worker_host)
+    cmd = ''
+    if worker_host != 'localhost':
+        user_at_host = "%s@%s" % (_REMOTE_HOST_USERNAME, worker_host)
+        cmd = "USER_AT_HOST=%s OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_remote_perf_flamegraphs.sh" % (
+            user_at_host, output_filename, flame_graph_reports, perf_base_name)
+    else:
+        cmd = "OUTPUT_FILENAME=%s OUTPUT_DIR=%s PERF_BASE_NAME=%stools/run_tests/performance/process_local_perf_flamegraphs.sh" % (
+            output_filename, flame_graph_reports, perf_base_name)
 
-  return jobset.JobSpec(cmdline=cmd,
-                        timeout_seconds=3*60,
-                        shell=True,
-                        verbose_success=True,
-                        shortname='process perf report')
+    return jobset.JobSpec(
+        cmdline=cmd,
+        timeout_seconds=3 * 60,
+        shell=True,
+        verbose_success=True,
+        shortname='process perf report')
 
 
 Scenario = collections.namedtuple('Scenario', 'jobspec workers name')
 
 
-def create_scenarios(languages, workers_by_lang, remote_host=None, regex='.*',
-                     category='all', bq_result_table=None,
-                     netperf=False, netperf_hosts=[], server_cpu_load=0):
-  """Create jobspecs for scenarios to run."""
-  all_workers = [worker
-                 for workers in workers_by_lang.values()
-                 for worker in workers]
-  scenarios = []
-  _NO_WORKERS = []
+def create_scenarios(languages,
+                     workers_by_lang,
+                     remote_host=None,
+                     regex='.*',
+                     category='all',
+                     bq_result_table=None,
+                     netperf=False,
+                     netperf_hosts=[],
+                     server_cpu_load=0):
+    """Create jobspecs for scenarios to run."""
+    all_workers = [
+        worker for workers in workers_by_lang.values() for worker in workers
+    ]
+    scenarios = []
+    _NO_WORKERS = []
 
-  if netperf:
-    if not netperf_hosts:
-      netperf_server='localhost'
-      netperf_client=None
-    elif len(netperf_hosts) == 1:
-      netperf_server=netperf_hosts[0]
-      netperf_client=netperf_hosts[0]
-    else:
-      netperf_server=netperf_hosts[0]
-      netperf_client=netperf_hosts[1]
-    scenarios.append(Scenario(
-        create_netperf_jobspec(server_host=netperf_server,
-                               client_host=netperf_client,
-                               bq_result_table=bq_result_table),
-        _NO_WORKERS, 'netperf'))
+    if netperf:
+        if not netperf_hosts:
+            netperf_server = 'localhost'
+            netperf_client = None
+        elif len(netperf_hosts) == 1:
+            netperf_server = netperf_hosts[0]
+            netperf_client = netperf_hosts[0]
+        else:
+            netperf_server = netperf_hosts[0]
+            netperf_client = netperf_hosts[1]
+        scenarios.append(
+            Scenario(
+                create_netperf_jobspec(
+                    server_host=netperf_server,
+                    client_host=netperf_client,
+                    bq_result_table=bq_result_table), _NO_WORKERS, 'netperf'))
 
-  for language in languages:
-    for scenario_json in language.scenarios():
-      if re.search(regex, scenario_json['name']):
-        categories = scenario_json.get('CATEGORIES', ['scalable', 'smoketest'])
-        if category in categories or category == 'all':
-          workers = workers_by_lang[str(language)][:]
-          # 'SERVER_LANGUAGE' is an indicator for this script to pick
-          # a server in different language.
-          custom_server_lang = scenario_json.get('SERVER_LANGUAGE', None)
-          custom_client_lang = scenario_json.get('CLIENT_LANGUAGE', None)
-          scenario_json = scenario_config.remove_nonproto_fields(scenario_json)
-          if custom_server_lang and custom_client_lang:
-            raise Exception('Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
+    for language in languages:
+        for scenario_json in language.scenarios():
+            if re.search(regex, scenario_json['name']):
+                categories = scenario_json.get('CATEGORIES',
+                                               ['scalable', 'smoketest'])
+                if category in categories or category == 'all':
+                    workers = workers_by_lang[str(language)][:]
+                    # 'SERVER_LANGUAGE' is an indicator for this script to pick
+                    # a server in different language.
+                    custom_server_lang = scenario_json.get(
+                        'SERVER_LANGUAGE', None)
+                    custom_client_lang = scenario_json.get(
+                        'CLIENT_LANGUAGE', None)
+                    scenario_json = scenario_config.remove_nonproto_fields(
+                        scenario_json)
+                    if custom_server_lang and custom_client_lang:
+                        raise Exception(
+                            'Cannot set both custom CLIENT_LANGUAGE and SERVER_LANGUAGE'
                             'in the same scenario')
-          if custom_server_lang:
-            if not workers_by_lang.get(custom_server_lang, []):
-              print('Warning: Skipping scenario %s as' % scenario_json['name'])
-              print('SERVER_LANGUAGE is set to %s yet the language has '
-                    'not been selected with -l' % custom_server_lang)
-              continue
-            for idx in range(0, scenario_json['num_servers']):
-              # replace first X workers by workers of a different language
-              workers[idx] = workers_by_lang[custom_server_lang][idx]
-          if custom_client_lang:
-            if not workers_by_lang.get(custom_client_lang, []):
-              print('Warning: Skipping scenario %s as' % scenario_json['name'])
-              print('CLIENT_LANGUAGE is set to %s yet the language has '
-                    'not been selected with -l' % custom_client_lang)
-              continue
-            for idx in range(scenario_json['num_servers'], len(workers)):
-              # replace all client workers by workers of a different language,
-              # leave num_server workers as they are server workers.
-              workers[idx] = workers_by_lang[custom_client_lang][idx]
-          scenario = Scenario(
-              create_scenario_jobspec(scenario_json,
-                                      [w.host_and_port for w in workers],
-                                      remote_host=remote_host,
-                                      bq_result_table=bq_result_table,
-                                      server_cpu_load=server_cpu_load),
-              workers,
-              scenario_json['name'])
-          scenarios.append(scenario)
+                    if custom_server_lang:
+                        if not workers_by_lang.get(custom_server_lang, []):
+                            print('Warning: Skipping scenario %s as' %
+                                  scenario_json['name'])
+                            print(
+                                'SERVER_LANGUAGE is set to %s yet the language has '
+                                'not been selected with -l' %
+                                custom_server_lang)
+                            continue
+                        for idx in range(0, scenario_json['num_servers']):
+                            # replace first X workers by workers of a different language
+                            workers[idx] = workers_by_lang[custom_server_lang][
+                                idx]
+                    if custom_client_lang:
+                        if not workers_by_lang.get(custom_client_lang, []):
+                            print('Warning: Skipping scenario %s as' %
+                                  scenario_json['name'])
+                            print(
+                                'CLIENT_LANGUAGE is set to %s yet the language has '
+                                'not been selected with -l' %
+                                custom_client_lang)
+                            continue
+                        for idx in range(scenario_json['num_servers'],
+                                         len(workers)):
+                            # replace all client workers by workers of a different language,
+                            # leave num_server workers as they are server workers.
+                            workers[idx] = workers_by_lang[custom_client_lang][
+                                idx]
+                    scenario = Scenario(
+                        create_scenario_jobspec(
+                            scenario_json, [w.host_and_port for w in workers],
+                            remote_host=remote_host,
+                            bq_result_table=bq_result_table,
+                            server_cpu_load=server_cpu_load), workers,
+                        scenario_json['name'])
+                    scenarios.append(scenario)
 
-  return scenarios
+    return scenarios
 
 
 def finish_qps_workers(jobs, qpsworker_jobs):
-  """Waits for given jobs to finish and eventually kills them."""
-  retries = 0
-  num_killed = 0
-  while any(job.is_running() for job in jobs):
-    for job in qpsworker_jobs:
-      if job.is_running():
-        print('QPS worker "%s" is still running.' % job.host_and_port)
-    if retries > 10:
-      print('Killing all QPS workers.')
-      for job in jobs:
-        job.kill()
-        num_killed += 1
-    retries += 1
-    time.sleep(3)
-  print('All QPS workers finished.')
-  return num_killed
+    """Waits for given jobs to finish and eventually kills them."""
+    retries = 0
+    num_killed = 0
+    while any(job.is_running() for job in jobs):
+        for job in qpsworker_jobs:
+            if job.is_running():
+                print('QPS worker "%s" is still running.' % job.host_and_port)
+        if retries > 10:
+            print('Killing all QPS workers.')
+            for job in jobs:
+                job.kill()
+                num_killed += 1
+        retries += 1
+        time.sleep(3)
+    print('All QPS workers finished.')
+    return num_killed
+
 
 profile_output_files = []
 
+
 # Collect perf text reports and flamegraphs if perf_cmd was used
 # Note the base names of perf text reports are used when creating and processing
 # perf data. The scenario name uniqifies the output name in the final
 # perf reports directory.
 # Alos, the perf profiles need to be fetched and processed after each scenario
 # in order to avoid clobbering the output files.
-def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name, flame_graph_reports):
-  perf_report_jobs = []
-  global profile_output_files
-  for host_and_port in hosts_and_base_names:
-    perf_base_name = hosts_and_base_names[host_and_port]
-    output_filename = '%s-%s' % (scenario_name, perf_base_name)
-    # from the base filename, create .svg output filename
-    host = host_and_port.split(':')[0]
-    profile_output_files.append('%s.svg' % output_filename)
-    perf_report_jobs.append(perf_report_processor_job(host, perf_base_name, output_filename, flame_graph_reports))
+def run_collect_perf_profile_jobs(hosts_and_base_names, scenario_name,
+                                  flame_graph_reports):
+    perf_report_jobs = []
+    global profile_output_files
+    for host_and_port in hosts_and_base_names:
+        perf_base_name = hosts_and_base_names[host_and_port]
+        output_filename = '%s-%s' % (scenario_name, perf_base_name)
+        # from the base filename, create .svg output filename
+        host = host_and_port.split(':')[0]
+        profile_output_files.append('%s.svg' % output_filename)
+        perf_report_jobs.append(
+            perf_report_processor_job(host, perf_base_name, output_filename,
+                                      flame_graph_reports))
 
-  jobset.message('START', 'Collecting perf reports from qps workers', do_newline=True)
-  failures, _ = jobset.run(perf_report_jobs, newline_on_success=True, maxjobs=1)
-  jobset.message('END', 'Collecting perf reports from qps workers', do_newline=True)
-  return failures
+    jobset.message(
+        'START', 'Collecting perf reports from qps workers', do_newline=True)
+    failures, _ = jobset.run(
+        perf_report_jobs, newline_on_success=True, maxjobs=1)
+    jobset.message(
+        'END', 'Collecting perf reports from qps workers', do_newline=True)
+    return failures
+
 
 def main():
-  argp = argparse.ArgumentParser(description='Run performance tests.')
-  argp.add_argument('-l', '--language',
-                    choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
-                    nargs='+',
-                    required=True,
-                    help='Languages to benchmark.')
-  argp.add_argument('--remote_driver_host',
-                    default=None,
-                    help='Run QPS driver on given host. By default, QPS driver is run locally.')
-  argp.add_argument('--remote_worker_host',
-                    nargs='+',
-                    default=[],
-                    help='Worker hosts where to start QPS workers.')
-  argp.add_argument('--dry_run',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Just list scenarios to be run, but don\'t run them.')
-  argp.add_argument('-r', '--regex', default='.*', type=str,
-                    help='Regex to select scenarios to run.')
-  argp.add_argument('--bq_result_table', default=None, type=str,
-                    help='Bigquery "dataset.table" to upload results to.')
-  argp.add_argument('--category',
-                    choices=['smoketest','all','scalable','sweep'],
-                    default='all',
-                    help='Select a category of tests to run.')
-  argp.add_argument('--netperf',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Run netperf benchmark as one of the scenarios.')
-  argp.add_argument('--server_cpu_load',
-                    default=0, type=int,
-                    help='Select a targeted server cpu load to run. 0 means ignore this flag')
-  argp.add_argument('-x', '--xml_report', default='report.xml', type=str,
-                    help='Name of XML report file to generate.')
-  argp.add_argument('--perf_args',
-                    help=('Example usage: "--perf_args=record -F 99 -g". '
-                          'Wrap QPS workers in a perf command '
-                          'with the arguments to perf specified here. '
-                          '".svg" flame graph profiles will be '
-                          'created for each Qps Worker on each scenario. '
-                          'Files will output to "<repo_root>/<args.flame_graph_reports>" '
-                          'directory. Output files from running the worker '
-                          'under perf are saved in the repo root where its ran. '
-                          'Note that the perf "-g" flag is necessary for '
-                          'flame graphs generation to work (assuming the binary '
-                          'being profiled uses frame pointers, check out '
-                          '"--call-graph dwarf" option using libunwind otherwise.) '
-                          'Also note that the entire "--perf_args=<arg(s)>" must '
-                          'be wrapped in quotes as in the example usage. '
-                          'If the "--perg_args" is unspecified, "perf" will '
-                          'not be used at all. '
-                          'See http://www.brendangregg.com/perf.html '
-                          'for more general perf examples.'))
-  argp.add_argument('--skip_generate_flamegraphs',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help=('Turn flame graph generation off. '
-                          'May be useful if "perf_args" arguments do not make sense for '
-                          'generating flamegraphs (e.g., "--perf_args=stat ...")'))
-  argp.add_argument('-f', '--flame_graph_reports', default='perf_reports', type=str,
-                    help='Name of directory to output flame graph profiles to, if any are created.')
+    argp = argparse.ArgumentParser(description='Run performance tests.')
+    argp.add_argument(
+        '-l',
+        '--language',
+        choices=['all'] + sorted(scenario_config.LANGUAGES.keys()),
+        nargs='+',
+        required=True,
+        help='Languages to benchmark.')
+    argp.add_argument(
+        '--remote_driver_host',
+        default=None,
+        help=
+        'Run QPS driver on given host. By default, QPS driver is run locally.')
+    argp.add_argument(
+        '--remote_worker_host',
+        nargs='+',
+        default=[],
+        help='Worker hosts where to start QPS workers.')
+    argp.add_argument(
+        '--dry_run',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Just list scenarios to be run, but don\'t run them.')
+    argp.add_argument(
+        '-r',
+        '--regex',
+        default='.*',
+        type=str,
+        help='Regex to select scenarios to run.')
+    argp.add_argument(
+        '--bq_result_table',
+        default=None,
+        type=str,
+        help='Bigquery "dataset.table" to upload results to.')
+    argp.add_argument(
+        '--category',
+        choices=['smoketest', 'all', 'scalable', 'sweep'],
+        default='all',
+        help='Select a category of tests to run.')
+    argp.add_argument(
+        '--netperf',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Run netperf benchmark as one of the scenarios.')
+    argp.add_argument(
+        '--server_cpu_load',
+        default=0,
+        type=int,
+        help='Select a targeted server cpu load to run. 0 means ignore this flag'
+    )
+    argp.add_argument(
+        '-x',
+        '--xml_report',
+        default='report.xml',
+        type=str,
+        help='Name of XML report file to generate.')
+    argp.add_argument(
+        '--perf_args',
+        help=('Example usage: "--perf_args=record -F 99 -g". '
+              'Wrap QPS workers in a perf command '
+              'with the arguments to perf specified here. '
+              '".svg" flame graph profiles will be '
+              'created for each Qps Worker on each scenario. '
+              'Files will output to "<repo_root>/<args.flame_graph_reports>" '
+              'directory. Output files from running the worker '
+              'under perf are saved in the repo root where its ran. '
+              'Note that the perf "-g" flag is necessary for '
+              'flame graphs generation to work (assuming the binary '
+              'being profiled uses frame pointers, check out '
+              '"--call-graph dwarf" option using libunwind otherwise.) '
+              'Also note that the entire "--perf_args=<arg(s)>" must '
+              'be wrapped in quotes as in the example usage. '
+              'If the "--perg_args" is unspecified, "perf" will '
+              'not be used at all. '
+              'See http://www.brendangregg.com/perf.html '
+              'for more general perf examples.'))
+    argp.add_argument(
+        '--skip_generate_flamegraphs',
+        default=False,
+        action='store_const',
+        const=True,
+        help=('Turn flame graph generation off. '
+              'May be useful if "perf_args" arguments do not make sense for '
+              'generating flamegraphs (e.g., "--perf_args=stat ...")'))
+    argp.add_argument(
+        '-f',
+        '--flame_graph_reports',
+        default='perf_reports',
+        type=str,
+        help=
+        'Name of directory to output flame graph profiles to, if any are created.'
+    )
+    argp.add_argument(
+        '-u',
+        '--remote_host_username',
+        default='',
+        type=str,
+        help='Use a username that isn\'t "Jenkins" to SSH into remote workers.')
 
-  args = argp.parse_args()
+    args = argp.parse_args()
 
-  languages = set(scenario_config.LANGUAGES[l]
-                  for l in itertools.chain.from_iterable(
-                        six.iterkeys(scenario_config.LANGUAGES) if x == 'all'
-                        else [x] for x in args.language))
+    global _REMOTE_HOST_USERNAME
+    if args.remote_host_username:
+        _REMOTE_HOST_USERNAME = args.remote_host_username
 
+    languages = set(
+        scenario_config.LANGUAGES[l]
+        for l in itertools.chain.from_iterable(
+            six.iterkeys(scenario_config.LANGUAGES) if x == 'all' else [x]
+            for x in args.language))
 
-  # Put together set of remote hosts where to run and build
-  remote_hosts = set()
-  if args.remote_worker_host:
-    for host in args.remote_worker_host:
-      remote_hosts.add(host)
-  if args.remote_driver_host:
-    remote_hosts.add(args.remote_driver_host)
+    # Put together set of remote hosts where to run and build
+    remote_hosts = set()
+    if args.remote_worker_host:
+        for host in args.remote_worker_host:
+            remote_hosts.add(host)
+    if args.remote_driver_host:
+        remote_hosts.add(args.remote_driver_host)
 
-  if not args.dry_run:
-    if remote_hosts:
-      archive_repo(languages=[str(l) for l in languages])
-      prepare_remote_hosts(remote_hosts, prepare_local=True)
-    else:
-      prepare_remote_hosts([], prepare_local=True)
+    if not args.dry_run:
+        if remote_hosts:
+            archive_repo(languages=[str(l) for l in languages])
+            prepare_remote_hosts(remote_hosts, prepare_local=True)
+        else:
+            prepare_remote_hosts([], prepare_local=True)
 
-  build_local = False
-  if not args.remote_driver_host:
-    build_local = True
-  if not args.dry_run:
-    build_on_remote_hosts(remote_hosts, languages=[str(l) for l in languages], build_local=build_local)
+    build_local = False
+    if not args.remote_driver_host:
+        build_local = True
+    if not args.dry_run:
+        build_on_remote_hosts(
+            remote_hosts,
+            languages=[str(l) for l in languages],
+            build_local=build_local)
 
-  perf_cmd = None
-  if args.perf_args:
-    print('Running workers under perf profiler')
-    # Expect /usr/bin/perf to be installed here, as is usual
-    perf_cmd = ['/usr/bin/perf']
-    perf_cmd.extend(re.split('\s+', args.perf_args))
+    perf_cmd = None
+    if args.perf_args:
+        print('Running workers under perf profiler')
+        # Expect /usr/bin/perf to be installed here, as is usual
+        perf_cmd = ['/usr/bin/perf']
+        perf_cmd.extend(re.split('\s+', args.perf_args))
 
-  qpsworker_jobs = create_qpsworkers(languages, args.remote_worker_host, perf_cmd=perf_cmd)
+    qpsworker_jobs = create_qpsworkers(
+        languages, args.remote_worker_host, perf_cmd=perf_cmd)
 
-  # get list of worker addresses for each language.
-  workers_by_lang = dict([(str(language), []) for language in languages])
-  for job in qpsworker_jobs:
-    workers_by_lang[str(job.language)].append(job)
+    # get list of worker addresses for each language.
+    workers_by_lang = dict([(str(language), []) for language in languages])
+    for job in qpsworker_jobs:
+        workers_by_lang[str(job.language)].append(job)
 
-  scenarios = create_scenarios(languages,
-                             workers_by_lang=workers_by_lang,
-                             remote_host=args.remote_driver_host,
-                             regex=args.regex,
-                             category=args.category,
-                             bq_result_table=args.bq_result_table,
-                             netperf=args.netperf,
-                             netperf_hosts=args.remote_worker_host,
-                             server_cpu_load=args.server_cpu_load)
+    scenarios = create_scenarios(
+        languages,
+        workers_by_lang=workers_by_lang,
+        remote_host=args.remote_driver_host,
+        regex=args.regex,
+        category=args.category,
+        bq_result_table=args.bq_result_table,
+        netperf=args.netperf,
+        netperf_hosts=args.remote_worker_host,
+        server_cpu_load=args.server_cpu_load)
 
-  if not scenarios:
-    raise Exception('No scenarios to run')
+    if not scenarios:
+        raise Exception('No scenarios to run')
 
-  total_scenario_failures = 0
-  qps_workers_killed = 0
-  merged_resultset = {}
-  perf_report_failures = 0
+    total_scenario_failures = 0
+    qps_workers_killed = 0
+    merged_resultset = {}
+    perf_report_failures = 0
 
-  for scenario in scenarios:
-    if args.dry_run:
-      print(scenario.name)
-    else:
-      scenario_failures = 0
-      try:
-        for worker in scenario.workers:
-          worker.start()
-        jobs = [scenario.jobspec]
-        if scenario.workers:
-          jobs.append(create_quit_jobspec(scenario.workers, remote_host=args.remote_driver_host))
-        scenario_failures, resultset = jobset.run(jobs, newline_on_success=True, maxjobs=1)
-        total_scenario_failures += scenario_failures
-        merged_resultset = dict(itertools.chain(six.iteritems(merged_resultset),
-                                                six.iteritems(resultset)))
-      finally:
-        # Consider qps workers that need to be killed as failures
-        qps_workers_killed += finish_qps_workers(scenario.workers, qpsworker_jobs)
+    for scenario in scenarios:
+        if args.dry_run:
+            print(scenario.name)
+        else:
+            scenario_failures = 0
+            try:
+                for worker in scenario.workers:
+                    worker.start()
+                jobs = [scenario.jobspec]
+                if scenario.workers:
+                    jobs.append(
+                        create_quit_jobspec(
+                            scenario.workers,
+                            remote_host=args.remote_driver_host))
+                scenario_failures, resultset = jobset.run(
+                    jobs, newline_on_success=True, maxjobs=1)
+                total_scenario_failures += scenario_failures
+                merged_resultset = dict(
+                    itertools.chain(
+                        six.iteritems(merged_resultset),
+                        six.iteritems(resultset)))
+            finally:
+                # Consider qps workers that need to be killed as failures
+                qps_workers_killed += finish_qps_workers(
+                    scenario.workers, qpsworker_jobs)
 
-      if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
-        workers_and_base_names = {}
-        for worker in scenario.workers:
-          if not worker.perf_file_base_name:
-            raise Exception('using perf buf perf report filename is unspecified')
-          workers_and_base_names[worker.host_and_port] = worker.perf_file_base_name
-        perf_report_failures += run_collect_perf_profile_jobs(workers_and_base_names, scenario.name, args.flame_graph_reports)
+            if perf_cmd and scenario_failures == 0 and not args.skip_generate_flamegraphs:
+                workers_and_base_names = {}
+                for worker in scenario.workers:
+                    if not worker.perf_file_base_name:
+                        raise Exception(
+                            'using perf buf perf report filename is unspecified'
+                        )
+                    workers_and_base_names[
+                        worker.host_and_port] = worker.perf_file_base_name
+                perf_report_failures += run_collect_perf_profile_jobs(
+                    workers_and_base_names, scenario.name,
+                    args.flame_graph_reports)
 
+    # Still write the index.html even if some scenarios failed.
+    # 'profile_output_files' will only have names for scenarios that passed
+    if perf_cmd and not args.skip_generate_flamegraphs:
+        # write the index fil to the output dir, with all profiles from all scenarios/workers
+        report_utils.render_perf_profiling_results(
+            '%s/index.html' % args.flame_graph_reports, profile_output_files)
 
-  # Still write the index.html even if some scenarios failed.
-  # 'profile_output_files' will only have names for scenarios that passed
-  if perf_cmd and not args.skip_generate_flamegraphs:
-    # write the index fil to the output dir, with all profiles from all scenarios/workers
-    report_utils.render_perf_profiling_results('%s/index.html' % args.flame_graph_reports, profile_output_files)
+    report_utils.render_junit_xml_report(
+        merged_resultset, args.xml_report, suite_name='benchmarks')
 
-  report_utils.render_junit_xml_report(merged_resultset, args.xml_report,
-                                       suite_name='benchmarks')
+    if total_scenario_failures > 0 or qps_workers_killed > 0:
+        print('%s scenarios failed and %s qps worker jobs killed' %
+              (total_scenario_failures, qps_workers_killed))
+        sys.exit(1)
 
-  if total_scenario_failures > 0 or qps_workers_killed > 0:
-    print('%s scenarios failed and %s qps worker jobs killed' % (total_scenario_failures, qps_workers_killed))
-    sys.exit(1)
+    if perf_report_failures > 0:
+        print('%s perf profile collection jobs failed' % perf_report_failures)
+        sys.exit(1)
 
-  if perf_report_failures > 0:
-    print('%s perf profile collection jobs failed' % perf_report_failures)
-    sys.exit(1)
 
 if __name__ == "__main__":
-  main()
+    main()
diff --git a/tools/run_tests/run_tests.py b/tools/run_tests/run_tests.py
index 766c1c0..c8e917f 100755
--- a/tools/run_tests/run_tests.py
+++ b/tools/run_tests/run_tests.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run tests in parallel."""
 
 from __future__ import print_function
@@ -46,36 +45,34 @@
 import python_utils.watch_dirs as watch_dirs
 import python_utils.start_port_server as start_port_server
 try:
-  from python_utils.upload_test_results import upload_results_to_bq
+    from python_utils.upload_test_results import upload_results_to_bq
 except (ImportError):
-  pass # It's ok to not import because this is only necessary to upload results to BQ.
+    pass  # It's ok to not import because this is only necessary to upload results to BQ.
 
-gcp_utils_dir = os.path.abspath(os.path.join(
-        os.path.dirname(__file__), '../gcp/utils'))
+gcp_utils_dir = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../gcp/utils'))
 sys.path.append(gcp_utils_dir)
 
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
 
-
 _FORCE_ENVIRON_FOR_WRAPPERS = {
-  'GRPC_VERBOSITY': 'DEBUG',
+    'GRPC_VERBOSITY': 'DEBUG',
 }
 
 _POLLING_STRATEGIES = {
-  'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
-  'mac': ['poll'],
+    'linux': ['epollex', 'epollsig', 'epoll1', 'poll', 'poll-cv'],
+    'mac': ['poll'],
 }
 
-
 BigQueryTestData = collections.namedtuple('BigQueryTestData', 'name flaky cpu')
 
 
 def get_bqtest_data(limit=None):
-  import big_query_utils
+    import big_query_utils
 
-  bq = big_query_utils.create_big_query()
-  query = """
+    bq = big_query_utils.create_big_query()
+    query = """
 SELECT
   filtered_test_name,
   SUM(result != 'PASSED' AND result != 'SKIPPED') > 0 as flaky,
@@ -88,941 +85,1085 @@
     [grpc-testing:jenkins_test_results.aggregate_results]
   WHERE
     timestamp >= DATE_ADD(CURRENT_DATE(), -1, "WEEK")
-    AND platform = '"""+platform_string()+"""'
+    AND platform = '""" + platform_string() + """'
     AND NOT REGEXP_MATCH(job_name, '.*portability.*') )
 GROUP BY
   filtered_test_name"""
-  if limit:
-    query += " limit {}".format(limit)
-  query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
-  page = bq.jobs().getQueryResults(
-      pageToken=None,
-      **query_job['jobReference']).execute(num_retries=3)
-  test_data = [BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true', float(row['f'][2]['v'])) for row in page['rows']]
-  return test_data
+    if limit:
+        query += " limit {}".format(limit)
+    query_job = big_query_utils.sync_query_job(bq, 'grpc-testing', query)
+    page = bq.jobs().getQueryResults(
+        pageToken=None, **query_job['jobReference']).execute(num_retries=3)
+    test_data = [
+        BigQueryTestData(row['f'][0]['v'], row['f'][1]['v'] == 'true',
+                         float(row['f'][2]['v'])) for row in page['rows']
+    ]
+    return test_data
 
 
 def platform_string():
-  return jobset.platform_string()
+    return jobset.platform_string()
 
 
 _DEFAULT_TIMEOUT_SECONDS = 5 * 60
 
+
 def run_shell_command(cmd, env=None, cwd=None):
-  try:
-    subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
-  except subprocess.CalledProcessError as e:
-    logging.exception("Error while running command '%s'. Exit status %d. Output:\n%s",
-                       e.cmd, e.returncode, e.output)
-    raise
+    try:
+        subprocess.check_output(cmd, shell=True, env=env, cwd=cwd)
+    except subprocess.CalledProcessError as e:
+        logging.exception(
+            "Error while running command '%s'. Exit status %d. Output:\n%s",
+            e.cmd, e.returncode, e.output)
+        raise
+
 
 def max_parallel_tests_for_current_platform():
-  # Too much test parallelization has only been seen to be a problem
-  # so far on windows.
-  if jobset.platform_string() == 'windows':
-    return 64
-  return 1024
+    # Too much test parallelization has only been seen to be a problem
+    # so far on windows.
+    if jobset.platform_string() == 'windows':
+        return 64
+    return 1024
+
 
 # SimpleConfig: just compile with CONFIG=config, and run the binary to test
 class Config(object):
 
-  def __init__(self, config, environ=None, timeout_multiplier=1, tool_prefix=[], iomgr_platform='native'):
-    if environ is None:
-      environ = {}
-    self.build_config = config
-    self.environ = environ
-    self.environ['CONFIG'] = config
-    self.tool_prefix = tool_prefix
-    self.timeout_multiplier = timeout_multiplier
-    self.iomgr_platform = iomgr_platform
+    def __init__(self,
+                 config,
+                 environ=None,
+                 timeout_multiplier=1,
+                 tool_prefix=[],
+                 iomgr_platform='native'):
+        if environ is None:
+            environ = {}
+        self.build_config = config
+        self.environ = environ
+        self.environ['CONFIG'] = config
+        self.tool_prefix = tool_prefix
+        self.timeout_multiplier = timeout_multiplier
+        self.iomgr_platform = iomgr_platform
 
-  def job_spec(self, cmdline, timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
-               shortname=None, environ={}, cpu_cost=1.0, flaky=False):
-    """Construct a jobset.JobSpec for a test under this config
+    def job_spec(self,
+                 cmdline,
+                 timeout_seconds=_DEFAULT_TIMEOUT_SECONDS,
+                 shortname=None,
+                 environ={},
+                 cpu_cost=1.0,
+                 flaky=False):
+        """Construct a jobset.JobSpec for a test under this config
 
        Args:
          cmdline:      a list of strings specifying the command line the test
                        would like to run
     """
-    actual_environ = self.environ.copy()
-    for k, v in environ.items():
-      actual_environ[k] = v
-    if not flaky and shortname and shortname in flaky_tests:
-      flaky = True
-    if shortname in shortname_to_cpu:
-      cpu_cost = shortname_to_cpu[shortname]
-    return jobset.JobSpec(cmdline=self.tool_prefix + cmdline,
-                          shortname=shortname,
-                          environ=actual_environ,
-                          cpu_cost=cpu_cost,
-                          timeout_seconds=(self.timeout_multiplier * timeout_seconds if timeout_seconds else None),
-                          flake_retries=4 if flaky or args.allow_flakes else 0,
-                          timeout_retries=1 if flaky or args.allow_flakes else 0)
+        actual_environ = self.environ.copy()
+        for k, v in environ.items():
+            actual_environ[k] = v
+        if not flaky and shortname and shortname in flaky_tests:
+            flaky = True
+        if shortname in shortname_to_cpu:
+            cpu_cost = shortname_to_cpu[shortname]
+        return jobset.JobSpec(
+            cmdline=self.tool_prefix + cmdline,
+            shortname=shortname,
+            environ=actual_environ,
+            cpu_cost=cpu_cost,
+            timeout_seconds=(self.timeout_multiplier * timeout_seconds
+                             if timeout_seconds else None),
+            flake_retries=4 if flaky or args.allow_flakes else 0,
+            timeout_retries=1 if flaky or args.allow_flakes else 0)
 
 
-def get_c_tests(travis, test_lang) :
-  out = []
-  platforms_str = 'ci_platforms' if travis else 'platforms'
-  with open('tools/run_tests/generated/tests.json') as f:
-    js = json.load(f)
-    return [tgt
-            for tgt in js
-            if tgt['language'] == test_lang and
-                platform_string() in tgt[platforms_str] and
-                not (travis and tgt['flaky'])]
+def get_c_tests(travis, test_lang):
+    out = []
+    platforms_str = 'ci_platforms' if travis else 'platforms'
+    with open('tools/run_tests/generated/tests.json') as f:
+        js = json.load(f)
+        return [
+            tgt for tgt in js
+            if tgt['language'] == test_lang and platform_string() in
+            tgt[platforms_str] and not (travis and tgt['flaky'])
+        ]
 
 
 def _check_compiler(compiler, supported_compilers):
-  if compiler not in supported_compilers:
-    raise Exception('Compiler %s not supported (on this platform).' % compiler)
+    if compiler not in supported_compilers:
+        raise Exception(
+            'Compiler %s not supported (on this platform).' % compiler)
 
 
 def _check_arch(arch, supported_archs):
-  if arch not in supported_archs:
-    raise Exception('Architecture %s not supported.' % arch)
+    if arch not in supported_archs:
+        raise Exception('Architecture %s not supported.' % arch)
 
 
 def _is_use_docker_child():
-  """Returns True if running running as a --use_docker child."""
-  return True if os.getenv('RUN_TESTS_COMMAND') else False
+    """Returns True if running running as a --use_docker child."""
+    return True if os.getenv('RUN_TESTS_COMMAND') else False
 
 
-_PythonConfigVars = collections.namedtuple(
-  '_ConfigVars', ['shell', 'builder', 'builder_prefix_arguments',
-                  'venv_relative_python', 'toolchain', 'runner'])
+_PythonConfigVars = collections.namedtuple('_ConfigVars', [
+    'shell', 'builder', 'builder_prefix_arguments', 'venv_relative_python',
+    'toolchain', 'runner'
+])
 
 
 def _python_config_generator(name, major, minor, bits, config_vars):
-  return PythonConfig(
-    name,
-    config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
-      _python_pattern_function(major=major, minor=minor, bits=bits)] + [
-      name] + config_vars.venv_relative_python + config_vars.toolchain,
-    config_vars.shell + config_vars.runner + [
-      os.path.join(name, config_vars.venv_relative_python[0])])
+    return PythonConfig(
+        name, config_vars.shell + config_vars.builder +
+        config_vars.builder_prefix_arguments + [
+            _python_pattern_function(major=major, minor=minor, bits=bits)
+        ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+        config_vars.shell + config_vars.runner +
+        [os.path.join(name, config_vars.venv_relative_python[0])])
 
 
 def _pypy_config_generator(name, major, config_vars):
-  return PythonConfig(
-    name,
-    config_vars.shell + config_vars.builder + config_vars.builder_prefix_arguments + [
-      _pypy_pattern_function(major=major)] + [
-      name] + config_vars.venv_relative_python + config_vars.toolchain,
-    config_vars.shell + config_vars.runner + [
-      os.path.join(name, config_vars.venv_relative_python[0])])
+    return PythonConfig(
+        name,
+        config_vars.shell + config_vars.builder +
+        config_vars.builder_prefix_arguments + [
+            _pypy_pattern_function(major=major)
+        ] + [name] + config_vars.venv_relative_python + config_vars.toolchain,
+        config_vars.shell + config_vars.runner +
+        [os.path.join(name, config_vars.venv_relative_python[0])])
 
 
 def _python_pattern_function(major, minor, bits):
-  # Bit-ness is handled by the test machine's environment
-  if os.name == "nt":
-    if bits == "64":
-      return '/c/Python{major}{minor}/python.exe'.format(
-        major=major, minor=minor, bits=bits)
+    # Bit-ness is handled by the test machine's environment
+    if os.name == "nt":
+        if bits == "64":
+            return '/c/Python{major}{minor}/python.exe'.format(
+                major=major, minor=minor, bits=bits)
+        else:
+            return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
+                major=major, minor=minor, bits=bits)
     else:
-      return '/c/Python{major}{minor}_{bits}bits/python.exe'.format(
-        major=major, minor=minor, bits=bits)
-  else:
-    return 'python{major}.{minor}'.format(major=major, minor=minor)
+        return 'python{major}.{minor}'.format(major=major, minor=minor)
 
 
 def _pypy_pattern_function(major):
-  if major == '2':
-    return 'pypy'
-  elif major == '3':
-    return 'pypy3'
-  else:
-    raise ValueError("Unknown PyPy major version")
+    if major == '2':
+        return 'pypy'
+    elif major == '3':
+        return 'pypy3'
+    else:
+        raise ValueError("Unknown PyPy major version")
 
 
 class CLanguage(object):
 
-  def __init__(self, make_target, test_lang):
-    self.make_target = make_target
-    self.platform = platform_string()
-    self.test_lang = test_lang
+    def __init__(self, make_target, test_lang):
+        self.make_target = make_target
+        self.platform = platform_string()
+        self.test_lang = test_lang
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    if self.platform == 'windows':
-      _check_compiler(self.args.compiler, ['default', 'cmake', 'cmake_vs2015',
-                                           'cmake_vs2017'])
-      _check_arch(self.args.arch, ['default', 'x64', 'x86'])
-      self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
-      self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
-      self._use_cmake = True
-      self._make_options = []
-    elif self.args.compiler == 'cmake':
-      _check_arch(self.args.arch, ['default'])
-      self._use_cmake = True
-      self._docker_distro = 'jessie'
-      self._make_options = []
-    else:
-      self._use_cmake = False
-      self._docker_distro, self._make_options = self._compiler_options(self.args.use_docker,
-                                                                       self.args.compiler)
-    if args.iomgr_platform == "uv":
-      cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
-      try:
-        cflags += subprocess.check_output(['pkg-config', '--cflags', 'libuv']).strip() + ' '
-      except (subprocess.CalledProcessError, OSError):
-        pass
-      try:
-        ldflags = subprocess.check_output(['pkg-config', '--libs', 'libuv']).strip() + ' '
-      except (subprocess.CalledProcessError, OSError):
-        ldflags = '-luv '
-      self._make_options += ['EXTRA_CPPFLAGS={}'.format(cflags),
-                             'EXTRA_LDLIBS={}'.format(ldflags)]
-
-  def test_specs(self):
-    out = []
-    binaries = get_c_tests(self.args.travis, self.test_lang)
-    for target in binaries:
-      if self._use_cmake and target.get('boringssl', False):
-        # cmake doesn't build boringssl tests
-        continue
-      auto_timeout_scaling = target.get('auto_timeout_scaling', True)
-      polling_strategies = (_POLLING_STRATEGIES.get(self.platform, ['all'])
-                            if target.get('uses_polling', True)
-                            else ['none'])
-      if self.args.iomgr_platform == 'uv':
-        polling_strategies = ['all']
-      for polling_strategy in polling_strategies:
-        env={'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
-                 _ROOT + '/src/core/tsi/test_creds/ca.pem',
-             'GRPC_POLL_STRATEGY': polling_strategy,
-             'GRPC_VERBOSITY': 'DEBUG'}
-        resolver = os.environ.get('GRPC_DNS_RESOLVER', None);
-        if resolver:
-          env['GRPC_DNS_RESOLVER'] = resolver
-        shortname_ext = '' if polling_strategy=='all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
-        if polling_strategy in target.get('excluded_poll_engines', []):
-          continue
-
-        timeout_scaling = 1
-        if auto_timeout_scaling:
-          config = self.args.config
-          if ('asan' in config
-              or config == 'msan'
-              or config == 'tsan'
-              or config == 'ubsan'
-              or config == 'helgrind'
-              or config == 'memcheck'):
-            # Scale overall test timeout if running under various sanitizers.
-            # scaling value is based on historical data analysis
-            timeout_scaling *= 3
-          elif polling_strategy == 'poll-cv':
-            # scale test timeout if running with poll-cv
-            # sanitizer and poll-cv scaling is not cumulative to ensure
-            # reasonable timeout values.
-            # TODO(jtattermusch): based on historical data and 5min default
-            # test timeout poll-cv scaling is currently not useful.
-            # Leaving here so it can be reintroduced if the default test timeout
-            # is decreased in the future.
-            timeout_scaling *= 1
-
-        if self.config.build_config in target['exclude_configs']:
-          continue
-        if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
-          continue
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
         if self.platform == 'windows':
-          binary = 'cmake/build/%s/%s.exe' % (_MSBUILD_CONFIG[self.config.build_config], target['name'])
+            _check_compiler(
+                self.args.compiler,
+                ['default', 'cmake', 'cmake_vs2015', 'cmake_vs2017'])
+            _check_arch(self.args.arch, ['default', 'x64', 'x86'])
+            self._cmake_generator_option = 'Visual Studio 15 2017' if self.args.compiler == 'cmake_vs2017' else 'Visual Studio 14 2015'
+            self._cmake_arch_option = 'x64' if self.args.arch == 'x64' else 'Win32'
+            self._use_cmake = True
+            self._make_options = []
+        elif self.args.compiler == 'cmake':
+            _check_arch(self.args.arch, ['default'])
+            self._use_cmake = True
+            self._docker_distro = 'jessie'
+            self._make_options = []
         else:
-          if self._use_cmake:
-            binary = 'cmake/build/%s' % target['name']
-          else:
-            binary = 'bins/%s/%s' % (self.config.build_config, target['name'])
-        cpu_cost = target['cpu_cost']
-        if cpu_cost == 'capacity':
-          cpu_cost = multiprocessing.cpu_count()
-        if os.path.isfile(binary):
-          list_test_command = None
-          filter_test_command = None
+            self._use_cmake = False
+            self._docker_distro, self._make_options = self._compiler_options(
+                self.args.use_docker, self.args.compiler)
+        if args.iomgr_platform == "uv":
+            cflags = '-DGRPC_UV -DGRPC_UV_THREAD_CHECK'
+            try:
+                cflags += subprocess.check_output(
+                    ['pkg-config', '--cflags', 'libuv']).strip() + ' '
+            except (subprocess.CalledProcessError, OSError):
+                pass
+            try:
+                ldflags = subprocess.check_output(
+                    ['pkg-config', '--libs', 'libuv']).strip() + ' '
+            except (subprocess.CalledProcessError, OSError):
+                ldflags = '-luv '
+            self._make_options += [
+                'EXTRA_CPPFLAGS={}'.format(cflags),
+                'EXTRA_LDLIBS={}'.format(ldflags)
+            ]
 
-          # these are the flag defined by gtest and benchmark framework to list
-          # and filter test runs. We use them to split each individual test
-          # into its own JobSpec, and thus into its own process.
-          if 'benchmark' in target and target['benchmark']:
-            with open(os.devnull, 'w') as fnull:
-              tests = subprocess.check_output([binary, '--benchmark_list_tests'],
-                                              stderr=fnull)
-            for line in tests.split('\n'):
-              test = line.strip()
-              if not test: continue
-              cmdline = [binary, '--benchmark_filter=%s$' % test] + target['args']
-              out.append(self.config.job_spec(cmdline,
-                                              shortname='%s %s' % (' '.join(cmdline), shortname_ext),
-                                              cpu_cost=cpu_cost,
-                                              timeout_seconds=_DEFAULT_TIMEOUT_SECONDS * timeout_scaling,
-                                              environ=env))
-          elif 'gtest' in target and target['gtest']:
-            # here we parse the output of --gtest_list_tests to build up a complete
-            # list of the tests contained in a binary for each test, we then
-            # add a job to run, filtering for just that test.
-            with open(os.devnull, 'w') as fnull:
-              tests = subprocess.check_output([binary, '--gtest_list_tests'],
-                                              stderr=fnull)
-            base = None
-            for line in tests.split('\n'):
-              i = line.find('#')
-              if i >= 0: line = line[:i]
-              if not line: continue
-              if line[0] != ' ':
-                base = line.strip()
-              else:
-                assert base is not None
-                assert line[1] == ' '
-                test = base + line.strip()
-                cmdline = [binary, '--gtest_filter=%s' % test] + target['args']
-                out.append(self.config.job_spec(cmdline,
-                                                shortname='%s %s' % (' '.join(cmdline), shortname_ext),
-                                                cpu_cost=cpu_cost,
-                                                timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
-                                                environ=env))
-          else:
-            cmdline = [binary] + target['args']
-            shortname = target.get('shortname', ' '.join(
-                          pipes.quote(arg)
-                          for arg in cmdline))
-            shortname += shortname_ext
-            out.append(self.config.job_spec(cmdline,
-                                            shortname=shortname,
-                                            cpu_cost=cpu_cost,
-                                            flaky=target.get('flaky', False),
-                                            timeout_seconds=target.get('timeout_seconds', _DEFAULT_TIMEOUT_SECONDS) * timeout_scaling,
-                                            environ=env))
-        elif self.args.regex == '.*' or self.platform == 'windows':
-          print('\nWARNING: binary not found, skipping', binary)
-    return sorted(out)
+    def test_specs(self):
+        out = []
+        binaries = get_c_tests(self.args.travis, self.test_lang)
+        for target in binaries:
+            if self._use_cmake and target.get('boringssl', False):
+                # cmake doesn't build boringssl tests
+                continue
+            auto_timeout_scaling = target.get('auto_timeout_scaling', True)
+            polling_strategies = (_POLLING_STRATEGIES.get(
+                self.platform, ['all']) if target.get('uses_polling', True) else
+                                  ['none'])
+            if self.args.iomgr_platform == 'uv':
+                polling_strategies = ['all']
+            for polling_strategy in polling_strategies:
+                env = {
+                    'GRPC_DEFAULT_SSL_ROOTS_FILE_PATH':
+                    _ROOT + '/src/core/tsi/test_creds/ca.pem',
+                    'GRPC_POLL_STRATEGY':
+                    polling_strategy,
+                    'GRPC_VERBOSITY':
+                    'DEBUG'
+                }
+                resolver = os.environ.get('GRPC_DNS_RESOLVER', None)
+                if resolver:
+                    env['GRPC_DNS_RESOLVER'] = resolver
+                shortname_ext = '' if polling_strategy == 'all' else ' GRPC_POLL_STRATEGY=%s' % polling_strategy
+                if polling_strategy in target.get('excluded_poll_engines', []):
+                    continue
 
-  def make_targets(self):
-    if self.platform == 'windows':
-      # don't build tools on windows just yet
-      return ['buildtests_%s' % self.make_target]
-    return ['buildtests_%s' % self.make_target, 'tools_%s' % self.make_target,
-            'check_epollexclusive']
+                timeout_scaling = 1
+                if auto_timeout_scaling:
+                    config = self.args.config
+                    if ('asan' in config or config == 'msan' or
+                            config == 'tsan' or config == 'ubsan' or
+                            config == 'helgrind' or config == 'memcheck'):
+                        # Scale overall test timeout if running under various sanitizers.
+                        # scaling value is based on historical data analysis
+                        timeout_scaling *= 3
+                    elif polling_strategy == 'poll-cv':
+                        # scale test timeout if running with poll-cv
+                        # sanitizer and poll-cv scaling is not cumulative to ensure
+                        # reasonable timeout values.
+                        # TODO(jtattermusch): based on historical data and 5min default
+                        # test timeout poll-cv scaling is currently not useful.
+                        # Leaving here so it can be reintroduced if the default test timeout
+                        # is decreased in the future.
+                        timeout_scaling *= 1
 
-  def make_options(self):
-    return self._make_options
+                if self.config.build_config in target['exclude_configs']:
+                    continue
+                if self.args.iomgr_platform in target.get('exclude_iomgrs', []):
+                    continue
+                if self.platform == 'windows':
+                    binary = 'cmake/build/%s/%s.exe' % (
+                        _MSBUILD_CONFIG[self.config.build_config],
+                        target['name'])
+                else:
+                    if self._use_cmake:
+                        binary = 'cmake/build/%s' % target['name']
+                    else:
+                        binary = 'bins/%s/%s' % (self.config.build_config,
+                                                 target['name'])
+                cpu_cost = target['cpu_cost']
+                if cpu_cost == 'capacity':
+                    cpu_cost = multiprocessing.cpu_count()
+                if os.path.isfile(binary):
+                    list_test_command = None
+                    filter_test_command = None
 
-  def pre_build_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
-               self._cmake_generator_option,
-               self._cmake_arch_option]]
-    elif self._use_cmake:
-      return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
-    else:
-      return []
+                    # these are the flag defined by gtest and benchmark framework to list
+                    # and filter test runs. We use them to split each individual test
+                    # into its own JobSpec, and thus into its own process.
+                    if 'benchmark' in target and target['benchmark']:
+                        with open(os.devnull, 'w') as fnull:
+                            tests = subprocess.check_output(
+                                [binary, '--benchmark_list_tests'],
+                                stderr=fnull)
+                        for line in tests.split('\n'):
+                            test = line.strip()
+                            if not test: continue
+                            cmdline = [binary,
+                                       '--benchmark_filter=%s$' % test
+                                      ] + target['args']
+                            out.append(
+                                self.config.job_spec(
+                                    cmdline,
+                                    shortname='%s %s' % (' '.join(cmdline),
+                                                         shortname_ext),
+                                    cpu_cost=cpu_cost,
+                                    timeout_seconds=target.get(
+                                        'timeout_seconds',
+                                        _DEFAULT_TIMEOUT_SECONDS) *
+                                    timeout_scaling,
+                                    environ=env))
+                    elif 'gtest' in target and target['gtest']:
+                        # here we parse the output of --gtest_list_tests to build up a complete
+                        # list of the tests contained in a binary for each test, we then
+                        # add a job to run, filtering for just that test.
+                        with open(os.devnull, 'w') as fnull:
+                            tests = subprocess.check_output(
+                                [binary, '--gtest_list_tests'], stderr=fnull)
+                        base = None
+                        for line in tests.split('\n'):
+                            i = line.find('#')
+                            if i >= 0: line = line[:i]
+                            if not line: continue
+                            if line[0] != ' ':
+                                base = line.strip()
+                            else:
+                                assert base is not None
+                                assert line[1] == ' '
+                                test = base + line.strip()
+                                cmdline = [binary,
+                                           '--gtest_filter=%s' % test
+                                          ] + target['args']
+                                out.append(
+                                    self.config.job_spec(
+                                        cmdline,
+                                        shortname='%s %s' % (' '.join(cmdline),
+                                                             shortname_ext),
+                                        cpu_cost=cpu_cost,
+                                        timeout_seconds=target.get(
+                                            'timeout_seconds',
+                                            _DEFAULT_TIMEOUT_SECONDS) *
+                                        timeout_scaling,
+                                        environ=env))
+                    else:
+                        cmdline = [binary] + target['args']
+                        shortname = target.get('shortname', ' '.join(
+                            pipes.quote(arg) for arg in cmdline))
+                        shortname += shortname_ext
+                        out.append(
+                            self.config.job_spec(
+                                cmdline,
+                                shortname=shortname,
+                                cpu_cost=cpu_cost,
+                                flaky=target.get('flaky', False),
+                                timeout_seconds=target.get(
+                                    'timeout_seconds', _DEFAULT_TIMEOUT_SECONDS)
+                                * timeout_scaling,
+                                environ=env))
+                elif self.args.regex == '.*' or self.platform == 'windows':
+                    print('\nWARNING: binary not found, skipping', binary)
+        return sorted(out)
 
-  def build_steps(self):
-    return []
+    def make_targets(self):
+        if self.platform == 'windows':
+            # don't build tools on windows just yet
+            return ['buildtests_%s' % self.make_target]
+        return [
+            'buildtests_%s' % self.make_target,
+            'tools_%s' % self.make_target, 'check_epollexclusive'
+        ]
 
-  def post_tests_steps(self):
-    if self.platform == 'windows':
-      return []
-    else:
-      return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+    def make_options(self):
+        return self._make_options
 
-  def makefile_name(self):
-    if self._use_cmake:
-      return 'cmake/build/Makefile'
-    else:
-      return 'Makefile'
+    def pre_build_steps(self):
+        if self.platform == 'windows':
+            return [[
+                'tools\\run_tests\\helper_scripts\\pre_build_cmake.bat',
+                self._cmake_generator_option, self._cmake_arch_option
+            ]]
+        elif self._use_cmake:
+            return [['tools/run_tests/helper_scripts/pre_build_cmake.sh']]
+        else:
+            return []
 
-  def _clang_make_options(self, version_suffix=''):
-    return ['CC=clang%s' % version_suffix,
+    def build_steps(self):
+        return []
+
+    def post_tests_steps(self):
+        if self.platform == 'windows':
+            return []
+        else:
+            return [['tools/run_tests/helper_scripts/post_tests_c.sh']]
+
+    def makefile_name(self):
+        if self._use_cmake:
+            return 'cmake/build/Makefile'
+        else:
+            return 'Makefile'
+
+    def _clang_make_options(self, version_suffix=''):
+        return [
+            'CC=clang%s' % version_suffix,
             'CXX=clang++%s' % version_suffix,
             'LD=clang%s' % version_suffix,
-            'LDXX=clang++%s' % version_suffix]
+            'LDXX=clang++%s' % version_suffix
+        ]
 
-  def _gcc_make_options(self, version_suffix):
-    return ['CC=gcc%s' % version_suffix,
+    def _gcc_make_options(self, version_suffix):
+        return [
+            'CC=gcc%s' % version_suffix,
             'CXX=g++%s' % version_suffix,
             'LD=gcc%s' % version_suffix,
-            'LDXX=g++%s' % version_suffix]
+            'LDXX=g++%s' % version_suffix
+        ]
 
-  def _compiler_options(self, use_docker, compiler):
-    """Returns docker distro and make options to use for given compiler."""
-    if not use_docker and not _is_use_docker_child():
-      _check_compiler(compiler, ['default'])
+    def _compiler_options(self, use_docker, compiler):
+        """Returns docker distro and make options to use for given compiler."""
+        if not use_docker and not _is_use_docker_child():
+            _check_compiler(compiler, ['default'])
 
-    if compiler == 'gcc4.9' or compiler == 'default':
-      return ('jessie', [])
-    elif compiler == 'gcc4.8':
-      return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
-    elif compiler == 'gcc5.3':
-      return ('ubuntu1604', [])
-    elif compiler == 'gcc_musl':
-      return ('alpine', [])
-    elif compiler == 'clang3.4':
-      # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
-      return ('ubuntu1404', self._clang_make_options())
-    elif compiler == 'clang3.5':
-      return ('jessie', self._clang_make_options(version_suffix='-3.5'))
-    elif compiler == 'clang3.6':
-      return ('ubuntu1604', self._clang_make_options(version_suffix='-3.6'))
-    elif compiler == 'clang3.7':
-      return ('ubuntu1604', self._clang_make_options(version_suffix='-3.7'))
-    else:
-      raise Exception('Compiler %s not supported.' % compiler)
+        if compiler == 'gcc4.9' or compiler == 'default':
+            return ('jessie', [])
+        elif compiler == 'gcc4.8':
+            return ('jessie', self._gcc_make_options(version_suffix='-4.8'))
+        elif compiler == 'gcc5.3':
+            return ('ubuntu1604', [])
+        elif compiler == 'gcc_musl':
+            return ('alpine', [])
+        elif compiler == 'clang3.4':
+            # on ubuntu1404, clang-3.4 alias doesn't exist, just use 'clang'
+            return ('ubuntu1404', self._clang_make_options())
+        elif compiler == 'clang3.5':
+            return ('jessie', self._clang_make_options(version_suffix='-3.5'))
+        elif compiler == 'clang3.6':
+            return ('ubuntu1604',
+                    self._clang_make_options(version_suffix='-3.6'))
+        elif compiler == 'clang3.7':
+            return ('ubuntu1604',
+                    self._clang_make_options(version_suffix='-3.7'))
+        else:
+            raise Exception('Compiler %s not supported.' % compiler)
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/cxx_%s_%s' % (self._docker_distro,
-                                                _docker_arch_suffix(self.args.arch))
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/cxx_%s_%s' % (
+            self._docker_distro, _docker_arch_suffix(self.args.arch))
 
-  def __str__(self):
-    return self.make_target
+    def __str__(self):
+        return self.make_target
 
 
 # This tests Node on grpc/grpc-node and will become the standard for Node testing
 class RemoteNodeLanguage(object):
 
-  def __init__(self):
-    self.platform = platform_string()
+    def __init__(self):
+        self.platform = platform_string()
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    # Note: electron ABI only depends on major and minor version, so that's all
-    # we should specify in the compiler argument
-    _check_compiler(self.args.compiler, ['default', 'node0.12',
-                                         'node4', 'node5', 'node6',
-                                         'node7', 'node8',
-                                         'electron1.3', 'electron1.6'])
-    if self.args.compiler == 'default':
-      self.runtime = 'node'
-      self.node_version = '8'
-    else:
-      if self.args.compiler.startswith('electron'):
-        self.runtime = 'electron'
-        self.node_version = self.args.compiler[8:]
-      else:
-        self.runtime = 'node'
-        # Take off the word "node"
-        self.node_version = self.args.compiler[4:]
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        # Note: electron ABI only depends on major and minor version, so that's all
+        # we should specify in the compiler argument
+        _check_compiler(self.args.compiler, [
+            'default', 'node0.12', 'node4', 'node5', 'node6', 'node7', 'node8',
+            'electron1.3', 'electron1.6'
+        ])
+        if self.args.compiler == 'default':
+            self.runtime = 'node'
+            self.node_version = '8'
+        else:
+            if self.args.compiler.startswith('electron'):
+                self.runtime = 'electron'
+                self.node_version = self.args.compiler[8:]
+            else:
+                self.runtime = 'node'
+                # Take off the word "node"
+                self.node_version = self.args.compiler[4:]
 
-  # TODO: update with Windows/electron scripts when available for grpc/grpc-node
-  def test_specs(self):
-    if self.platform == 'windows':
-      return [self.config.job_spec(['tools\\run_tests\\helper_scripts\\run_node.bat'])]
-    else:
-      return [self.config.job_spec(['tools/run_tests/helper_scripts/run_grpc-node.sh'],
-                                   None,
-                                   environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    # TODO: update with Windows/electron scripts when available for grpc/grpc-node
+    def test_specs(self):
+        if self.platform == 'windows':
+            return [
+                self.config.job_spec(
+                    ['tools\\run_tests\\helper_scripts\\run_node.bat'])
+            ]
+        else:
+            return [
+                self.config.job_spec(
+                    ['tools/run_tests/helper_scripts/run_grpc-node.sh'],
+                    None,
+                    environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+            ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return []
+    def make_targets(self):
+        return []
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return []
+    def build_steps(self):
+        return []
 
-  def post_tests_steps(self):
-    return []
+    def post_tests_steps(self):
+        return []
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/node_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'grpc-node'
+    def __str__(self):
+        return 'grpc-node'
 
 
 class PhpLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
-    self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
+        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
-  def test_specs(self):
-    return [self.config.job_spec(['src/php/bin/run_tests.sh'],
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    def test_specs(self):
+        return [
+            self.config.job_spec(
+                ['src/php/bin/run_tests.sh'],
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['static_c', 'shared_c']
+    def make_targets(self):
+        return ['static_c', 'shared_c']
 
-  def make_options(self):
-    return self._make_options;
+    def make_options(self):
+        return self._make_options
 
-  def build_steps(self):
-    return [['tools/run_tests/helper_scripts/build_php.sh']]
+    def build_steps(self):
+        return [['tools/run_tests/helper_scripts/build_php.sh']]
 
-  def post_tests_steps(self):
-    return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+    def post_tests_steps(self):
+        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/php_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'php'
+    def __str__(self):
+        return 'php'
 
 
 class Php7Language(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
-    self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
+        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
-  def test_specs(self):
-    return [self.config.job_spec(['src/php/bin/run_tests.sh'],
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
+    def test_specs(self):
+        return [
+            self.config.job_spec(
+                ['src/php/bin/run_tests.sh'],
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['static_c', 'shared_c']
+    def make_targets(self):
+        return ['static_c', 'shared_c']
 
-  def make_options(self):
-    return self._make_options;
+    def make_options(self):
+        return self._make_options
 
-  def build_steps(self):
-    return [['tools/run_tests/helper_scripts/build_php.sh']]
+    def build_steps(self):
+        return [['tools/run_tests/helper_scripts/build_php.sh']]
 
-  def post_tests_steps(self):
-    return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
+    def post_tests_steps(self):
+        return [['tools/run_tests/helper_scripts/post_tests_php.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/php7_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'php7'
+    def __str__(self):
+        return 'php7'
 
 
-class PythonConfig(collections.namedtuple('PythonConfig', [
-    'name', 'build', 'run'])):
-  """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
+class PythonConfig(
+        collections.namedtuple('PythonConfig', ['name', 'build', 'run'])):
+    """Tuple of commands (named s.t. 'what it says on the tin' applies)"""
+
 
 class PythonLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    self.pythons = self._get_pythons(self.args)
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        self.pythons = self._get_pythons(self.args)
 
-  def test_specs(self):
-    # load list of known test suites
-    with open('src/python/grpcio_tests/tests/tests.json') as tests_json_file:
-      tests_json = json.load(tests_json_file)
-    environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
-    return [self.config.job_spec(
-        config.run,
-        timeout_seconds=5*60,
-        environ=dict(list(environment.items()) +
-                     [('GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
-        shortname='%s.test.%s' % (config.name, suite_name),)
-        for suite_name in tests_json
-        for config in self.pythons]
+    def test_specs(self):
+        # load list of known test suites
+        with open(
+                'src/python/grpcio_tests/tests/tests.json') as tests_json_file:
+            tests_json = json.load(tests_json_file)
+        environment = dict(_FORCE_ENVIRON_FOR_WRAPPERS)
+        return [
+            self.config.job_spec(
+                config.run,
+                timeout_seconds=5 * 60,
+                environ=dict(
+                    list(environment.items()) + [(
+                        'GRPC_PYTHON_TESTRUNNER_FILTER', str(suite_name))]),
+                shortname='%s.test.%s' % (config.name, suite_name),
+            ) for suite_name in tests_json for config in self.pythons
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return []
+    def make_targets(self):
+        return []
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return [config.build for config in self.pythons]
+    def build_steps(self):
+        return [config.build for config in self.pythons]
 
-  def post_tests_steps(self):
-    if self.config.build_config != 'gcov':
-      return []
-    else:
-      return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
+    def post_tests_steps(self):
+        if self.config.build_config != 'gcov':
+            return []
+        else:
+            return [['tools/run_tests/helper_scripts/post_tests_python.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/python_%s_%s' % (self.python_manager_name(), _docker_arch_suffix(self.args.arch))
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/python_%s_%s' % (
+            self.python_manager_name(), _docker_arch_suffix(self.args.arch))
 
-  def python_manager_name(self):
-    if self.args.compiler in ['python3.5', 'python3.6']:
-      return 'pyenv'
-    elif self.args.compiler == 'python_alpine':
-      return 'alpine'
-    else:
-      return 'jessie'
+    def python_manager_name(self):
+        if self.args.compiler in ['python3.5', 'python3.6']:
+            return 'pyenv'
+        elif self.args.compiler == 'python_alpine':
+            return 'alpine'
+        else:
+            return 'jessie'
 
-  def _get_pythons(self, args):
-    if args.arch == 'x86':
-      bits = '32'
-    else:
-      bits = '64'
+    def _get_pythons(self, args):
+        if args.arch == 'x86':
+            bits = '32'
+        else:
+            bits = '64'
 
-    if os.name == 'nt':
-      shell = ['bash']
-      builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python_msys2.sh')]
-      builder_prefix_arguments = ['MINGW{}'.format(bits)]
-      venv_relative_python = ['Scripts/python.exe']
-      toolchain = ['mingw32']
-    else:
-      shell = []
-      builder = [os.path.abspath('tools/run_tests/helper_scripts/build_python.sh')]
-      builder_prefix_arguments = []
-      venv_relative_python = ['bin/python']
-      toolchain = ['unix']
+        if os.name == 'nt':
+            shell = ['bash']
+            builder = [
+                os.path.abspath(
+                    'tools/run_tests/helper_scripts/build_python_msys2.sh')
+            ]
+            builder_prefix_arguments = ['MINGW{}'.format(bits)]
+            venv_relative_python = ['Scripts/python.exe']
+            toolchain = ['mingw32']
+        else:
+            shell = []
+            builder = [
+                os.path.abspath(
+                    'tools/run_tests/helper_scripts/build_python.sh')
+            ]
+            builder_prefix_arguments = []
+            venv_relative_python = ['bin/python']
+            toolchain = ['unix']
 
-    runner = [os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')]
-    config_vars = _PythonConfigVars(shell, builder, builder_prefix_arguments,
-                              venv_relative_python, toolchain, runner)
-    python27_config = _python_config_generator(name='py27', major='2',
-                                               minor='7', bits=bits,
-                                               config_vars=config_vars)
-    python34_config = _python_config_generator(name='py34', major='3',
-                                               minor='4', bits=bits,
-                                               config_vars=config_vars)
-    python35_config = _python_config_generator(name='py35', major='3',
-                                               minor='5', bits=bits,
-                                               config_vars=config_vars)
-    python36_config = _python_config_generator(name='py36', major='3',
-                                               minor='6', bits=bits,
-                                               config_vars=config_vars)
-    pypy27_config = _pypy_config_generator(name='pypy', major='2',
-                                           config_vars=config_vars)
-    pypy32_config = _pypy_config_generator(name='pypy3', major='3',
-                                           config_vars=config_vars)
+        runner = [
+            os.path.abspath('tools/run_tests/helper_scripts/run_python.sh')
+        ]
+        config_vars = _PythonConfigVars(shell, builder,
+                                        builder_prefix_arguments,
+                                        venv_relative_python, toolchain, runner)
+        python27_config = _python_config_generator(
+            name='py27',
+            major='2',
+            minor='7',
+            bits=bits,
+            config_vars=config_vars)
+        python34_config = _python_config_generator(
+            name='py34',
+            major='3',
+            minor='4',
+            bits=bits,
+            config_vars=config_vars)
+        python35_config = _python_config_generator(
+            name='py35',
+            major='3',
+            minor='5',
+            bits=bits,
+            config_vars=config_vars)
+        python36_config = _python_config_generator(
+            name='py36',
+            major='3',
+            minor='6',
+            bits=bits,
+            config_vars=config_vars)
+        pypy27_config = _pypy_config_generator(
+            name='pypy', major='2', config_vars=config_vars)
+        pypy32_config = _pypy_config_generator(
+            name='pypy3', major='3', config_vars=config_vars)
 
-    if args.compiler == 'default':
-      if os.name == 'nt':
-        return (python35_config,)
-      else:
-        return (python27_config, python34_config,)
-    elif args.compiler == 'python2.7':
-      return (python27_config,)
-    elif args.compiler == 'python3.4':
-      return (python34_config,)
-    elif args.compiler == 'python3.5':
-      return (python35_config,)
-    elif args.compiler == 'python3.6':
-      return (python36_config,)
-    elif args.compiler == 'pypy':
-      return (pypy27_config,)
-    elif args.compiler == 'pypy3':
-      return (pypy32_config,)
-    elif args.compiler == 'python_alpine':
-      return (python27_config,)
-    elif args.compiler == 'all_the_cpythons':
-      return (python27_config, python34_config, python35_config,
-              python36_config,)
-    else:
-      raise Exception('Compiler %s not supported.' % args.compiler)
+        if args.compiler == 'default':
+            if os.name == 'nt':
+                return (python35_config,)
+            else:
+                return (
+                    python27_config,
+                    python34_config,
+                )
+        elif args.compiler == 'python2.7':
+            return (python27_config,)
+        elif args.compiler == 'python3.4':
+            return (python34_config,)
+        elif args.compiler == 'python3.5':
+            return (python35_config,)
+        elif args.compiler == 'python3.6':
+            return (python36_config,)
+        elif args.compiler == 'pypy':
+            return (pypy27_config,)
+        elif args.compiler == 'pypy3':
+            return (pypy32_config,)
+        elif args.compiler == 'python_alpine':
+            return (python27_config,)
+        elif args.compiler == 'all_the_cpythons':
+            return (
+                python27_config,
+                python34_config,
+                python35_config,
+                python36_config,
+            )
+        else:
+            raise Exception('Compiler %s not supported.' % args.compiler)
 
-  def __str__(self):
-    return 'python'
+    def __str__(self):
+        return 'python'
 
 
 class RubyLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
 
-  def test_specs(self):
-    tests = [self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby.sh'],
-                                  timeout_seconds=10*60,
-                                  environ=_FORCE_ENVIRON_FOR_WRAPPERS)]
-    tests.append(self.config.job_spec(['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
-                 timeout_seconds=10*60,
-                 environ=_FORCE_ENVIRON_FOR_WRAPPERS))
-    return tests
+    def test_specs(self):
+        tests = [
+            self.config.job_spec(
+                ['tools/run_tests/helper_scripts/run_ruby.sh'],
+                timeout_seconds=10 * 60,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS)
+        ]
+        tests.append(
+            self.config.job_spec(
+                ['tools/run_tests/helper_scripts/run_ruby_end2end_tests.sh'],
+                timeout_seconds=10 * 60,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+        return tests
 
-  def pre_build_steps(self):
-    return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
+    def pre_build_steps(self):
+        return [['tools/run_tests/helper_scripts/pre_build_ruby.sh']]
 
-  def make_targets(self):
-    return []
+    def make_targets(self):
+        return []
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return [['tools/run_tests/helper_scripts/build_ruby.sh']]
+    def build_steps(self):
+        return [['tools/run_tests/helper_scripts/build_ruby.sh']]
 
-  def post_tests_steps(self):
-    return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
+    def post_tests_steps(self):
+        return [['tools/run_tests/helper_scripts/post_tests_ruby.sh']]
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(self.args.arch)
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/ruby_jessie_%s' % _docker_arch_suffix(
+            self.args.arch)
 
-  def __str__(self):
-    return 'ruby'
+    def __str__(self):
+        return 'ruby'
 
 
 class CSharpLanguage(object):
 
-  def __init__(self):
-    self.platform = platform_string()
+    def __init__(self):
+        self.platform = platform_string()
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    if self.platform == 'windows':
-      _check_compiler(self.args.compiler, ['coreclr', 'default'])
-      _check_arch(self.args.arch, ['default'])
-      self._cmake_arch_option = 'x64'
-      self._make_options = []
-    else:
-      _check_compiler(self.args.compiler, ['default', 'coreclr'])
-      self._docker_distro = 'jessie'
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        if self.platform == 'windows':
+            _check_compiler(self.args.compiler, ['coreclr', 'default'])
+            _check_arch(self.args.arch, ['default'])
+            self._cmake_arch_option = 'x64'
+            self._make_options = []
+        else:
+            _check_compiler(self.args.compiler, ['default', 'coreclr'])
+            self._docker_distro = 'jessie'
 
-      if self.platform == 'mac':
-        # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
-        self._make_options = ['EMBED_OPENSSL=true']
-        if self.args.compiler != 'coreclr':
-          # On Mac, official distribution of mono is 32bit.
-          self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
-      else:
-        self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
+            if self.platform == 'mac':
+                # TODO(jtattermusch): EMBED_ZLIB=true currently breaks the mac build
+                self._make_options = ['EMBED_OPENSSL=true']
+                if self.args.compiler != 'coreclr':
+                    # On Mac, official distribution of mono is 32bit.
+                    self._make_options += ['ARCH_FLAGS=-m32', 'LDFLAGS=-m32']
+            else:
+                self._make_options = ['EMBED_OPENSSL=true', 'EMBED_ZLIB=true']
 
-  def test_specs(self):
-    with open('src/csharp/tests.json') as f:
-      tests_by_assembly = json.load(f)
+    def test_specs(self):
+        with open('src/csharp/tests.json') as f:
+            tests_by_assembly = json.load(f)
 
-    msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
-    nunit_args = ['--labels=All', '--noresult', '--workers=1']
-    assembly_subdir = 'bin/%s' % msbuild_config
-    assembly_extension = '.exe'
+        msbuild_config = _MSBUILD_CONFIG[self.config.build_config]
+        nunit_args = ['--labels=All', '--noresult', '--workers=1']
+        assembly_subdir = 'bin/%s' % msbuild_config
+        assembly_extension = '.exe'
 
-    if self.args.compiler == 'coreclr':
-      assembly_subdir += '/netcoreapp1.0'
-      runtime_cmd = ['dotnet', 'exec']
-      assembly_extension = '.dll'
-    else:
-      assembly_subdir += '/net45'
-      if self.platform == 'windows':
-        runtime_cmd = []
-      else:
-        runtime_cmd = ['mono']
+        if self.args.compiler == 'coreclr':
+            assembly_subdir += '/netcoreapp1.0'
+            runtime_cmd = ['dotnet', 'exec']
+            assembly_extension = '.dll'
+        else:
+            assembly_subdir += '/net45'
+            if self.platform == 'windows':
+                runtime_cmd = []
+            else:
+                runtime_cmd = ['mono']
 
-    specs = []
-    for assembly in six.iterkeys(tests_by_assembly):
-      assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
-                                                 assembly_subdir,
-                                                 assembly,
-                                                 assembly_extension)
-      if self.config.build_config != 'gcov' or self.platform != 'windows':
-        # normally, run each test as a separate process
-        for test in tests_by_assembly[assembly]:
-          cmdline = runtime_cmd + [assembly_file, '--test=%s' % test] + nunit_args
-          specs.append(self.config.job_spec(cmdline,
-                                            shortname='csharp.%s' % test,
-                                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
-      else:
-        # For C# test coverage, run all tests from the same assembly at once
-        # using OpenCover.Console (only works on Windows).
-        cmdline = ['src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
-                   '-target:%s' % assembly_file,
-                   '-targetdir:src\\csharp',
-                   '-targetargs:%s' % ' '.join(nunit_args),
-                   '-filter:+[Grpc.Core]*',
-                   '-register:user',
-                   '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly]
+        specs = []
+        for assembly in six.iterkeys(tests_by_assembly):
+            assembly_file = 'src/csharp/%s/%s/%s%s' % (assembly,
+                                                       assembly_subdir,
+                                                       assembly,
+                                                       assembly_extension)
+            if self.config.build_config != 'gcov' or self.platform != 'windows':
+                # normally, run each test as a separate process
+                for test in tests_by_assembly[assembly]:
+                    cmdline = runtime_cmd + [assembly_file,
+                                             '--test=%s' % test] + nunit_args
+                    specs.append(
+                        self.config.job_spec(
+                            cmdline,
+                            shortname='csharp.%s' % test,
+                            environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+            else:
+                # For C# test coverage, run all tests from the same assembly at once
+                # using OpenCover.Console (only works on Windows).
+                cmdline = [
+                    'src\\csharp\\packages\\OpenCover.4.6.519\\tools\\OpenCover.Console.exe',
+                    '-target:%s' % assembly_file, '-targetdir:src\\csharp',
+                    '-targetargs:%s' % ' '.join(nunit_args),
+                    '-filter:+[Grpc.Core]*', '-register:user',
+                    '-output:src\\csharp\\coverage_csharp_%s.xml' % assembly
+                ]
 
-        # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
-        # to prevent problems with registering the profiler.
-        run_exclusive = 1000000
-        specs.append(self.config.job_spec(cmdline,
-                                          shortname='csharp.coverage.%s' % assembly,
-                                          cpu_cost=run_exclusive,
-                                          environ=_FORCE_ENVIRON_FOR_WRAPPERS))
-    return specs
+                # set really high cpu_cost to make sure instances of OpenCover.Console run exclusively
+                # to prevent problems with registering the profiler.
+                run_exclusive = 1000000
+                specs.append(
+                    self.config.job_spec(
+                        cmdline,
+                        shortname='csharp.coverage.%s' % assembly,
+                        cpu_cost=run_exclusive,
+                        environ=_FORCE_ENVIRON_FOR_WRAPPERS))
+        return specs
 
-  def pre_build_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\pre_build_csharp.bat', self._cmake_arch_option]]
-    else:
-      return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
+    def pre_build_steps(self):
+        if self.platform == 'windows':
+            return [[
+                'tools\\run_tests\\helper_scripts\\pre_build_csharp.bat',
+                self._cmake_arch_option
+            ]]
+        else:
+            return [['tools/run_tests/helper_scripts/pre_build_csharp.sh']]
 
-  def make_targets(self):
-    return ['grpc_csharp_ext']
+    def make_targets(self):
+        return ['grpc_csharp_ext']
 
-  def make_options(self):
-    return self._make_options;
+    def make_options(self):
+        return self._make_options
 
-  def build_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
-    else:
-      return [['tools/run_tests/helper_scripts/build_csharp.sh']]
+    def build_steps(self):
+        if self.platform == 'windows':
+            return [['tools\\run_tests\\helper_scripts\\build_csharp.bat']]
+        else:
+            return [['tools/run_tests/helper_scripts/build_csharp.sh']]
 
-  def post_tests_steps(self):
-    if self.platform == 'windows':
-      return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
-    else:
-      return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
+    def post_tests_steps(self):
+        if self.platform == 'windows':
+            return [['tools\\run_tests\\helper_scripts\\post_tests_csharp.bat']]
+        else:
+            return [['tools/run_tests/helper_scripts/post_tests_csharp.sh']]
 
-  def makefile_name(self):
-    if self.platform == 'windows':
-      return 'cmake/build/%s/Makefile' % self._cmake_arch_option
-    else:
-      return 'Makefile'
+    def makefile_name(self):
+        if self.platform == 'windows':
+            return 'cmake/build/%s/Makefile' % self._cmake_arch_option
+        else:
+            return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/csharp_%s_%s' % (self._docker_distro,
-                                                   _docker_arch_suffix(self.args.arch))
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/csharp_%s_%s' % (
+            self._docker_distro, _docker_arch_suffix(self.args.arch))
 
-  def __str__(self):
-    return 'csharp'
+    def __str__(self):
+        return 'csharp'
 
 
 class ObjCLanguage(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
 
-  def test_specs(self):
-    return [
-        self.config.job_spec(['src/objective-c/tests/run_tests.sh'],
-                              timeout_seconds=60*60,
-                              shortname='objc-tests',
-                              cpu_cost=1e6,
-                              environ=_FORCE_ENVIRON_FOR_WRAPPERS),
-        self.config.job_spec(['src/objective-c/tests/run_plugin_tests.sh'],
-                              timeout_seconds=60*60,
-                              shortname='objc-plugin-tests',
-                              cpu_cost=1e6,
-                              environ=_FORCE_ENVIRON_FOR_WRAPPERS),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-helloworld',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'HelloWorld',
-                                       'EXAMPLE_PATH': 'examples/objective-c/helloworld'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-routeguide',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'RouteGuideClient',
-                                       'EXAMPLE_PATH': 'examples/objective-c/route_guide'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-authsample',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'AuthSample',
-                                       'EXAMPLE_PATH': 'examples/objective-c/auth_sample'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-sample',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'Sample',
-                                       'EXAMPLE_PATH': 'src/objective-c/examples/Sample'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-sample-frameworks',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'Sample',
-                                       'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
-                                       'FRAMEWORKS': 'YES'}),
-        self.config.job_spec(['src/objective-c/tests/build_one_example.sh'],
-                              timeout_seconds=10*60,
-                              shortname='objc-build-example-switftsample',
-                              cpu_cost=1e6,
-                              environ={'SCHEME': 'SwiftSample',
-                                       'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'}),
-    ]
+    def test_specs(self):
+        return [
+            self.config.job_spec(
+                ['src/objective-c/tests/run_tests.sh'],
+                timeout_seconds=60 * 60,
+                shortname='objc-tests',
+                cpu_cost=1e6,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+            self.config.job_spec(
+                ['src/objective-c/tests/run_plugin_tests.sh'],
+                timeout_seconds=60 * 60,
+                shortname='objc-plugin-tests',
+                cpu_cost=1e6,
+                environ=_FORCE_ENVIRON_FOR_WRAPPERS),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-helloworld',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'HelloWorld',
+                    'EXAMPLE_PATH': 'examples/objective-c/helloworld'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-routeguide',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'RouteGuideClient',
+                    'EXAMPLE_PATH': 'examples/objective-c/route_guide'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-authsample',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'AuthSample',
+                    'EXAMPLE_PATH': 'examples/objective-c/auth_sample'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-sample',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'Sample',
+                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-sample-frameworks',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'Sample',
+                    'EXAMPLE_PATH': 'src/objective-c/examples/Sample',
+                    'FRAMEWORKS': 'YES'
+                }),
+            self.config.job_spec(
+                ['src/objective-c/tests/build_one_example.sh'],
+                timeout_seconds=10 * 60,
+                shortname='objc-build-example-switftsample',
+                cpu_cost=1e6,
+                environ={
+                    'SCHEME': 'SwiftSample',
+                    'EXAMPLE_PATH': 'src/objective-c/examples/SwiftSample'
+                }),
+        ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['interop_server']
+    def make_targets(self):
+        return ['interop_server']
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return [['src/objective-c/tests/build_tests.sh']]
+    def build_steps(self):
+        return [['src/objective-c/tests/build_tests.sh']]
 
-  def post_tests_steps(self):
-    return []
+    def post_tests_steps(self):
+        return []
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return None
+    def dockerfile_dir(self):
+        return None
 
-  def __str__(self):
-    return 'objc'
+    def __str__(self):
+        return 'objc'
 
 
 class Sanity(object):
 
-  def configure(self, config, args):
-    self.config = config
-    self.args = args
-    _check_compiler(self.args.compiler, ['default'])
+    def configure(self, config, args):
+        self.config = config
+        self.args = args
+        _check_compiler(self.args.compiler, ['default'])
 
-  def test_specs(self):
-    import yaml
-    with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
-      environ={'TEST': 'true'}
-      if _is_use_docker_child():
-        environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
-      return [self.config.job_spec(cmd['script'].split(),
-                                   timeout_seconds=30*60,
-                                   environ=environ,
-                                   cpu_cost=cmd.get('cpu_cost', 1))
-              for cmd in yaml.load(f)]
+    def test_specs(self):
+        import yaml
+        with open('tools/run_tests/sanity/sanity_tests.yaml', 'r') as f:
+            environ = {'TEST': 'true'}
+            if _is_use_docker_child():
+                environ['CLANG_FORMAT_SKIP_DOCKER'] = 'true'
+            return [
+                self.config.job_spec(
+                    cmd['script'].split(),
+                    timeout_seconds=30 * 60,
+                    environ=environ,
+                    cpu_cost=cmd.get('cpu_cost', 1)) for cmd in yaml.load(f)
+            ]
 
-  def pre_build_steps(self):
-    return []
+    def pre_build_steps(self):
+        return []
 
-  def make_targets(self):
-    return ['run_dep_checks']
+    def make_targets(self):
+        return ['run_dep_checks']
 
-  def make_options(self):
-    return []
+    def make_options(self):
+        return []
 
-  def build_steps(self):
-    return []
+    def build_steps(self):
+        return []
 
-  def post_tests_steps(self):
-    return []
+    def post_tests_steps(self):
+        return []
 
-  def makefile_name(self):
-    return 'Makefile'
+    def makefile_name(self):
+        return 'Makefile'
 
-  def dockerfile_dir(self):
-    return 'tools/dockerfile/test/sanity'
+    def dockerfile_dir(self):
+        return 'tools/dockerfile/test/sanity'
 
-  def __str__(self):
-    return 'sanity'
+    def __str__(self):
+        return 'sanity'
+
 
 # different configurations we can run under
 with open('tools/run_tests/generated/configs.json') as f:
-  _CONFIGS = dict((cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
-
+    _CONFIGS = dict(
+        (cfg['config'], Config(**cfg)) for cfg in ast.literal_eval(f.read()))
 
 _LANGUAGES = {
     'c++': CLanguage('cxx', 'c++'),
@@ -1033,60 +1174,61 @@
     'python': PythonLanguage(),
     'ruby': RubyLanguage(),
     'csharp': CSharpLanguage(),
-    'objc' : ObjCLanguage(),
+    'objc': ObjCLanguage(),
     'sanity': Sanity()
-    }
-
+}
 
 _MSBUILD_CONFIG = {
     'dbg': 'Debug',
     'opt': 'Release',
     'gcov': 'Debug',
-    }
+}
 
 
 def _windows_arch_option(arch):
-  """Returns msbuild cmdline option for selected architecture."""
-  if arch == 'default' or arch == 'x86':
-    return '/p:Platform=Win32'
-  elif arch == 'x64':
-    return '/p:Platform=x64'
-  else:
-    print('Architecture %s not supported.' % arch)
-    sys.exit(1)
+    """Returns msbuild cmdline option for selected architecture."""
+    if arch == 'default' or arch == 'x86':
+        return '/p:Platform=Win32'
+    elif arch == 'x64':
+        return '/p:Platform=x64'
+    else:
+        print('Architecture %s not supported.' % arch)
+        sys.exit(1)
 
 
 def _check_arch_option(arch):
-  """Checks that architecture option is valid."""
-  if platform_string() == 'windows':
-    _windows_arch_option(arch)
-  elif platform_string() == 'linux':
-    # On linux, we need to be running under docker with the right architecture.
-    runtime_arch = platform.architecture()[0]
-    if arch == 'default':
-      return
-    elif runtime_arch == '64bit' and arch == 'x64':
-      return
-    elif runtime_arch == '32bit' and arch == 'x86':
-      return
+    """Checks that architecture option is valid."""
+    if platform_string() == 'windows':
+        _windows_arch_option(arch)
+    elif platform_string() == 'linux':
+        # On linux, we need to be running under docker with the right architecture.
+        runtime_arch = platform.architecture()[0]
+        if arch == 'default':
+            return
+        elif runtime_arch == '64bit' and arch == 'x64':
+            return
+        elif runtime_arch == '32bit' and arch == 'x86':
+            return
+        else:
+            print('Architecture %s does not match current runtime architecture.'
+                  % arch)
+            sys.exit(1)
     else:
-      print('Architecture %s does not match current runtime architecture.' % arch)
-      sys.exit(1)
-  else:
-    if args.arch != 'default':
-      print('Architecture %s not supported on current platform.' % args.arch)
-      sys.exit(1)
+        if args.arch != 'default':
+            print('Architecture %s not supported on current platform.' %
+                  args.arch)
+            sys.exit(1)
 
 
 def _docker_arch_suffix(arch):
-  """Returns suffix to dockerfile dir to use."""
-  if arch == 'default' or arch == 'x64':
-    return 'x64'
-  elif arch == 'x86':
-    return 'x86'
-  else:
-    print('Architecture %s not supported with current settings.' % arch)
-    sys.exit(1)
+    """Returns suffix to dockerfile dir to use."""
+    if arch == 'default' or arch == 'x64':
+        return 'x64'
+    elif arch == 'x86':
+        return 'x86'
+    else:
+        print('Architecture %s not supported with current settings.' % arch)
+        sys.exit(1)
 
 
 def runs_per_test_type(arg_str):
@@ -1111,478 +1253,591 @@
 
 
 def percent_type(arg_str):
-  pct = float(arg_str)
-  if pct > 100 or pct < 0:
-    raise argparse.ArgumentTypeError(
-        "'%f' is not a valid percentage in the [0, 100] range" % pct)
-  return pct
+    pct = float(arg_str)
+    if pct > 100 or pct < 0:
+        raise argparse.ArgumentTypeError(
+            "'%f' is not a valid percentage in the [0, 100] range" % pct)
+    return pct
+
 
 # This is math.isclose in python >= 3.5
 def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
-      return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+    return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
 
 
 # parse command line
 argp = argparse.ArgumentParser(description='Run grpc tests.')
-argp.add_argument('-c', '--config',
-                  choices=sorted(_CONFIGS.keys()),
-                  default='opt')
-argp.add_argument('-n', '--runs_per_test', default=1, type=runs_per_test_type,
-        help='A positive integer or "inf". If "inf", all tests will run in an '
-             'infinite loop. Especially useful in combination with "-f"')
+argp.add_argument(
+    '-c', '--config', choices=sorted(_CONFIGS.keys()), default='opt')
+argp.add_argument(
+    '-n',
+    '--runs_per_test',
+    default=1,
+    type=runs_per_test_type,
+    help='A positive integer or "inf". If "inf", all tests will run in an '
+    'infinite loop. Especially useful in combination with "-f"')
 argp.add_argument('-r', '--regex', default='.*', type=str)
 argp.add_argument('--regex_exclude', default='', type=str)
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
 argp.add_argument('-s', '--slowdown', default=1.0, type=float)
-argp.add_argument('-p', '--sample_percent', default=100.0, type=percent_type,
-                  help='Run a random sample with that percentage of tests')
-argp.add_argument('-f', '--forever',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('-t', '--travis',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('--newline_on_success',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('-l', '--language',
-                  choices=['all'] + sorted(_LANGUAGES.keys()),
-                  nargs='+',
-                  default=['all'])
-argp.add_argument('-S', '--stop_on_failure',
-                  default=False,
-                  action='store_const',
-                  const=True)
-argp.add_argument('--use_docker',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Run all the tests under docker. That provides ' +
-                  'additional isolation and prevents the need to install ' +
-                  'language specific prerequisites. Only available on Linux.')
-argp.add_argument('--allow_flakes',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Allow flaky tests to show as passing (re-runs failed tests up to five times)')
-argp.add_argument('--arch',
-                  choices=['default', 'x86', 'x64'],
-                  default='default',
-                  help='Selects architecture to target. For some platforms "default" is the only supported choice.')
-argp.add_argument('--compiler',
-                  choices=['default',
-                           'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
-                           'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7',
-                           'python2.7', 'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine', 'all_the_cpythons',
-                           'electron1.3', 'electron1.6',
-                           'coreclr',
-                           'cmake', 'cmake_vs2015', 'cmake_vs2017'],
-                  default='default',
-                  help='Selects compiler to use. Allowed values depend on the platform and language.')
-argp.add_argument('--iomgr_platform',
-                  choices=['native', 'uv'],
-                  default='native',
-                  help='Selects iomgr platform to build on')
-argp.add_argument('--build_only',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Perform all the build steps but don\'t run any tests.')
-argp.add_argument('--measure_cpu_costs', default=False, action='store_const', const=True,
-                  help='Measure the cpu costs of tests')
-argp.add_argument('--update_submodules', default=[], nargs='*',
-                  help='Update some submodules before building. If any are updated, also run generate_projects. ' +
-                       'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.')
+argp.add_argument(
+    '-p',
+    '--sample_percent',
+    default=100.0,
+    type=percent_type,
+    help='Run a random sample with that percentage of tests')
+argp.add_argument(
+    '-f', '--forever', default=False, action='store_const', const=True)
+argp.add_argument(
+    '-t', '--travis', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--newline_on_success', default=False, action='store_const', const=True)
+argp.add_argument(
+    '-l',
+    '--language',
+    choices=['all'] + sorted(_LANGUAGES.keys()),
+    nargs='+',
+    default=['all'])
+argp.add_argument(
+    '-S', '--stop_on_failure', default=False, action='store_const', const=True)
+argp.add_argument(
+    '--use_docker',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Run all the tests under docker. That provides ' +
+    'additional isolation and prevents the need to install ' +
+    'language specific prerequisites. Only available on Linux.')
+argp.add_argument(
+    '--allow_flakes',
+    default=False,
+    action='store_const',
+    const=True,
+    help=
+    'Allow flaky tests to show as passing (re-runs failed tests up to five times)'
+)
+argp.add_argument(
+    '--arch',
+    choices=['default', 'x86', 'x64'],
+    default='default',
+    help=
+    'Selects architecture to target. For some platforms "default" is the only supported choice.'
+)
+argp.add_argument(
+    '--compiler',
+    choices=[
+        'default', 'gcc4.4', 'gcc4.6', 'gcc4.8', 'gcc4.9', 'gcc5.3', 'gcc_musl',
+        'clang3.4', 'clang3.5', 'clang3.6', 'clang3.7', 'python2.7',
+        'python3.4', 'python3.5', 'python3.6', 'pypy', 'pypy3', 'python_alpine',
+        'all_the_cpythons', 'electron1.3', 'electron1.6', 'coreclr', 'cmake',
+        'cmake_vs2015', 'cmake_vs2017'
+    ],
+    default='default',
+    help=
+    'Selects compiler to use. Allowed values depend on the platform and language.'
+)
+argp.add_argument(
+    '--iomgr_platform',
+    choices=['native', 'uv'],
+    default='native',
+    help='Selects iomgr platform to build on')
+argp.add_argument(
+    '--build_only',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Perform all the build steps but don\'t run any tests.')
+argp.add_argument(
+    '--measure_cpu_costs',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Measure the cpu costs of tests')
+argp.add_argument(
+    '--update_submodules',
+    default=[],
+    nargs='*',
+    help=
+    'Update some submodules before building. If any are updated, also run generate_projects. '
+    +
+    'Submodules are specified as SUBMODULE_NAME:BRANCH; if BRANCH is omitted, master is assumed.'
+)
 argp.add_argument('-a', '--antagonists', default=0, type=int)
-argp.add_argument('-x', '--xml_report', default=None, type=str,
-        help='Generates a JUnit-compatible XML report')
-argp.add_argument('--report_suite_name', default='tests', type=str,
-        help='Test suite name to use in generated JUnit XML report')
-argp.add_argument('--quiet_success',
-                  default=False,
-                  action='store_const',
-                  const=True,
-                  help='Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. ' +
-                       'Useful when running many iterations of each test (argument -n).')
-argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
-                  help='Don\'t try to iterate over many polling strategies when they exist')
-argp.add_argument('--force_use_pollers', default=None, type=str,
-                  help='Only use the specified comma-delimited list of polling engines. '
-                  'Example: --force_use_pollers epollsig,poll '
-                  ' (This flag has no effect if --force_default_poller flag is also used)')
-argp.add_argument('--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
-argp.add_argument('--bq_result_table',
-                  default='',
-                  type=str,
-                  nargs='?',
-                  help='Upload test results to a specified BQ table.')
-argp.add_argument('--disable_auto_set_flakes', default=False, const=True, action='store_const',
-                  help='Disable rerunning historically flaky tests')
+argp.add_argument(
+    '-x',
+    '--xml_report',
+    default=None,
+    type=str,
+    help='Generates a JUnit-compatible XML report')
+argp.add_argument(
+    '--report_suite_name',
+    default='tests',
+    type=str,
+    help='Test suite name to use in generated JUnit XML report')
+argp.add_argument(
+    '--quiet_success',
+    default=False,
+    action='store_const',
+    const=True,
+    help=
+    'Don\'t print anything when a test passes. Passing tests also will not be reported in XML report. '
+    + 'Useful when running many iterations of each test (argument -n).')
+argp.add_argument(
+    '--force_default_poller',
+    default=False,
+    action='store_const',
+    const=True,
+    help='Don\'t try to iterate over many polling strategies when they exist')
+argp.add_argument(
+    '--force_use_pollers',
+    default=None,
+    type=str,
+    help='Only use the specified comma-delimited list of polling engines. '
+    'Example: --force_use_pollers epollsig,poll '
+    ' (This flag has no effect if --force_default_poller flag is also used)')
+argp.add_argument(
+    '--max_time', default=-1, type=int, help='Maximum test runtime in seconds')
+argp.add_argument(
+    '--bq_result_table',
+    default='',
+    type=str,
+    nargs='?',
+    help='Upload test results to a specified BQ table.')
+argp.add_argument(
+    '--disable_auto_set_flakes',
+    default=False,
+    const=True,
+    action='store_const',
+    help='Disable rerunning historically flaky tests')
 args = argp.parse_args()
 
 flaky_tests = set()
 shortname_to_cpu = {}
 if not args.disable_auto_set_flakes:
-  try:
-    for test in get_bqtest_data():
-      if test.flaky: flaky_tests.add(test.name)
-      if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
-  except:
-    print("Unexpected error getting flaky tests:", sys.exc_info()[0])
+    try:
+        for test in get_bqtest_data():
+            if test.flaky: flaky_tests.add(test.name)
+            if test.cpu > 0: shortname_to_cpu[test.name] = test.cpu
+    except:
+        print(
+            "Unexpected error getting flaky tests: %s" % traceback.format_exc())
 
 if args.force_default_poller:
-  _POLLING_STRATEGIES = {}
+    _POLLING_STRATEGIES = {}
 elif args.force_use_pollers:
-  _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
+    _POLLING_STRATEGIES[platform_string()] = args.force_use_pollers.split(',')
 
 jobset.measure_cpu_costs = args.measure_cpu_costs
 
 # update submodules if necessary
 need_to_regenerate_projects = False
 for spec in args.update_submodules:
-  spec = spec.split(':', 1)
-  if len(spec) == 1:
-    submodule = spec[0]
-    branch = 'master'
-  elif len(spec) == 2:
-    submodule = spec[0]
-    branch = spec[1]
-  cwd = 'third_party/%s' % submodule
-  def git(cmd, cwd=cwd):
-    print('in %s: git %s' % (cwd, cmd))
-    run_shell_command('git %s' % cmd, cwd=cwd)
-  git('fetch')
-  git('checkout %s' % branch)
-  git('pull origin %s' % branch)
-  if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
-    need_to_regenerate_projects = True
-if need_to_regenerate_projects:
-  if jobset.platform_string() == 'linux':
-    run_shell_command('tools/buildgen/generate_projects.sh')
-  else:
-    print('WARNING: may need to regenerate projects, but since we are not on')
-    print('         Linux this step is being skipped. Compilation MAY fail.')
+    spec = spec.split(':', 1)
+    if len(spec) == 1:
+        submodule = spec[0]
+        branch = 'master'
+    elif len(spec) == 2:
+        submodule = spec[0]
+        branch = spec[1]
+    cwd = 'third_party/%s' % submodule
 
+    def git(cmd, cwd=cwd):
+        print('in %s: git %s' % (cwd, cmd))
+        run_shell_command('git %s' % cmd, cwd=cwd)
+
+    git('fetch')
+    git('checkout %s' % branch)
+    git('pull origin %s' % branch)
+    if os.path.exists('src/%s/gen_build_yaml.py' % submodule):
+        need_to_regenerate_projects = True
+if need_to_regenerate_projects:
+    if jobset.platform_string() == 'linux':
+        run_shell_command('tools/buildgen/generate_projects.sh')
+    else:
+        print(
+            'WARNING: may need to regenerate projects, but since we are not on')
+        print(
+            '         Linux this step is being skipped. Compilation MAY fail.')
 
 # grab config
 run_config = _CONFIGS[args.config]
 build_config = run_config.build_config
 
 if args.travis:
-  _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
+    _FORCE_ENVIRON_FOR_WRAPPERS = {'GRPC_TRACE': 'api'}
 
 if 'all' in args.language:
-  lang_list = _LANGUAGES.keys()
+    lang_list = _LANGUAGES.keys()
 else:
-  lang_list = args.language
+    lang_list = args.language
 # We don't support code coverage on some languages
 if 'gcov' in args.config:
-  for bad in ['objc', 'sanity']:
-    if bad in lang_list:
-      lang_list.remove(bad)
+    for bad in ['objc', 'sanity']:
+        if bad in lang_list:
+            lang_list.remove(bad)
 
 languages = set(_LANGUAGES[l] for l in lang_list)
 for l in languages:
-  l.configure(run_config, args)
+    l.configure(run_config, args)
 
-language_make_options=[]
+language_make_options = []
 if any(language.make_options() for language in languages):
-  if not 'gcov' in args.config and len(languages) != 1:
-    print('languages with custom make options cannot be built simultaneously with other languages')
-    sys.exit(1)
-  else:
-    # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
-    # together, and is only used under gcov. All other configs should build languages individually.
-    language_make_options = list(set([make_option for lang in languages for make_option in lang.make_options()]))
+    if not 'gcov' in args.config and len(languages) != 1:
+        print(
+            'languages with custom make options cannot be built simultaneously with other languages'
+        )
+        sys.exit(1)
+    else:
+        # Combining make options is not clean and just happens to work. It allows C/C++ and C# to build
+        # together, and is only used under gcov. All other configs should build languages individually.
+        language_make_options = list(
+            set([
+                make_option
+                for lang in languages
+                for make_option in lang.make_options()
+            ]))
 
 if args.use_docker:
-  if not args.travis:
-    print('Seen --use_docker flag, will run tests under docker.')
-    print('')
-    print('IMPORTANT: The changes you are testing need to be locally committed')
-    print('because only the committed changes in the current branch will be')
-    print('copied to the docker environment.')
-    time.sleep(5)
+    if not args.travis:
+        print('Seen --use_docker flag, will run tests under docker.')
+        print('')
+        print(
+            'IMPORTANT: The changes you are testing need to be locally committed'
+        )
+        print(
+            'because only the committed changes in the current branch will be')
+        print('copied to the docker environment.')
+        time.sleep(5)
 
-  dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
-  if len(dockerfile_dirs) > 1:
-    if 'gcov' in args.config:
-      dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
-      print ('Using multilang_jessie_x64 docker image for code coverage for '
-             'all languages.')
+    dockerfile_dirs = set([l.dockerfile_dir() for l in languages])
+    if len(dockerfile_dirs) > 1:
+        if 'gcov' in args.config:
+            dockerfile_dir = 'tools/dockerfile/test/multilang_jessie_x64'
+            print(
+                'Using multilang_jessie_x64 docker image for code coverage for '
+                'all languages.')
+        else:
+            print(
+                'Languages to be tested require running under different docker '
+                'images.')
+            sys.exit(1)
     else:
-      print ('Languages to be tested require running under different docker '
-             'images.')
-      sys.exit(1)
-  else:
-    dockerfile_dir = next(iter(dockerfile_dirs))
+        dockerfile_dir = next(iter(dockerfile_dirs))
 
-  child_argv = [ arg for arg in sys.argv if not arg == '--use_docker' ]
-  run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(child_argv[1:])
+    child_argv = [arg for arg in sys.argv if not arg == '--use_docker']
+    run_tests_cmd = 'python tools/run_tests/run_tests.py %s' % ' '.join(
+        child_argv[1:])
 
-  env = os.environ.copy()
-  env['RUN_TESTS_COMMAND'] = run_tests_cmd
-  env['DOCKERFILE_DIR'] = dockerfile_dir
-  env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
-  if args.xml_report:
-    env['XML_REPORT'] = args.xml_report
-  if not args.travis:
-    env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
+    env = os.environ.copy()
+    env['RUN_TESTS_COMMAND'] = run_tests_cmd
+    env['DOCKERFILE_DIR'] = dockerfile_dir
+    env['DOCKER_RUN_SCRIPT'] = 'tools/run_tests/dockerize/docker_run_tests.sh'
+    if args.xml_report:
+        env['XML_REPORT'] = args.xml_report
+    if not args.travis:
+        env['TTY_FLAG'] = '-t'  # enables Ctrl-C when not on Jenkins.
 
-  subprocess.check_call('tools/run_tests/dockerize/build_docker_and_run_tests.sh',
-                        shell=True,
-                        env=env)
-  sys.exit(0)
+    subprocess.check_call(
+        'tools/run_tests/dockerize/build_docker_and_run_tests.sh',
+        shell=True,
+        env=env)
+    sys.exit(0)
 
 _check_arch_option(args.arch)
 
+
 def make_jobspec(cfg, targets, makefile='Makefile'):
-  if platform_string() == 'windows':
-    return [jobset.JobSpec(['cmake', '--build', '.',
-                            '--target', '%s' % target,
-                            '--config', _MSBUILD_CONFIG[cfg]],
-                           cwd=os.path.dirname(makefile),
-                           timeout_seconds=None) for target in targets]
-  else:
-    if targets and makefile.startswith('cmake/build/'):
-      # With cmake, we've passed all the build configuration in the pre-build step already
-      return [jobset.JobSpec([os.getenv('MAKE', 'make'),
-                              '-j', '%d' % args.jobs] +
-                             targets,
-                             cwd='cmake/build',
-                             timeout_seconds=None)]
-    if targets:
-      return [jobset.JobSpec([os.getenv('MAKE', 'make'),
-                              '-f', makefile,
-                              '-j', '%d' % args.jobs,
-                              'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' % args.slowdown,
-                              'CONFIG=%s' % cfg,
-                              'Q='] +
-                              language_make_options +
-                             ([] if not args.travis else ['JENKINS_BUILD=1']) +
-                             targets,
-                             timeout_seconds=None)]
+    if platform_string() == 'windows':
+        return [
+            jobset.JobSpec(
+                [
+                    'cmake', '--build', '.', '--target',
+                    '%s' % target, '--config', _MSBUILD_CONFIG[cfg]
+                ],
+                cwd=os.path.dirname(makefile),
+                timeout_seconds=None) for target in targets
+        ]
     else:
-      return []
+        if targets and makefile.startswith('cmake/build/'):
+            # With cmake, we've passed all the build configuration in the pre-build step already
+            return [
+                jobset.JobSpec(
+                    [os.getenv('MAKE', 'make'), '-j',
+                     '%d' % args.jobs] + targets,
+                    cwd='cmake/build',
+                    timeout_seconds=None)
+            ]
+        if targets:
+            return [
+                jobset.JobSpec(
+                    [
+                        os.getenv('MAKE', 'make'), '-f', makefile, '-j',
+                        '%d' % args.jobs,
+                        'EXTRA_DEFINES=GRPC_TEST_SLOWDOWN_MACHINE_FACTOR=%f' %
+                        args.slowdown,
+                        'CONFIG=%s' % cfg, 'Q='
+                    ] + language_make_options +
+                    ([] if not args.travis else ['JENKINS_BUILD=1']) + targets,
+                    timeout_seconds=None)
+            ]
+        else:
+            return []
+
 
 make_targets = {}
 for l in languages:
-  makefile = l.makefile_name()
-  make_targets[makefile] = make_targets.get(makefile, set()).union(
-      set(l.make_targets()))
+    makefile = l.makefile_name()
+    make_targets[makefile] = make_targets.get(makefile, set()).union(
+        set(l.make_targets()))
+
 
 def build_step_environ(cfg):
-  environ = {'CONFIG': cfg}
-  msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
-  if msbuild_cfg:
-    environ['MSBUILD_CONFIG'] = msbuild_cfg
-  return environ
+    environ = {'CONFIG': cfg}
+    msbuild_cfg = _MSBUILD_CONFIG.get(cfg)
+    if msbuild_cfg:
+        environ['MSBUILD_CONFIG'] = msbuild_cfg
+    return environ
 
-build_steps = list(set(
-                   jobset.JobSpec(cmdline, environ=build_step_environ(build_config), flake_retries=2)
-                   for l in languages
-                   for cmdline in l.pre_build_steps()))
+
+build_steps = list(
+    set(
+        jobset.JobSpec(
+            cmdline, environ=build_step_environ(build_config), flake_retries=2)
+        for l in languages
+        for cmdline in l.pre_build_steps()))
 if make_targets:
-  make_commands = itertools.chain.from_iterable(make_jobspec(build_config, list(targets), makefile) for (makefile, targets) in make_targets.items())
-  build_steps.extend(set(make_commands))
-build_steps.extend(set(
-                   jobset.JobSpec(cmdline, environ=build_step_environ(build_config), timeout_seconds=None)
-                   for l in languages
-                   for cmdline in l.build_steps()))
+    make_commands = itertools.chain.from_iterable(
+        make_jobspec(build_config, list(targets), makefile)
+        for (makefile, targets) in make_targets.items())
+    build_steps.extend(set(make_commands))
+build_steps.extend(
+    set(
+        jobset.JobSpec(
+            cmdline,
+            environ=build_step_environ(build_config),
+            timeout_seconds=None)
+        for l in languages
+        for cmdline in l.build_steps()))
 
-post_tests_steps = list(set(
-                        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
-                        for l in languages
-                        for cmdline in l.post_tests_steps()))
+post_tests_steps = list(
+    set(
+        jobset.JobSpec(cmdline, environ=build_step_environ(build_config))
+        for l in languages
+        for cmdline in l.post_tests_steps()))
 runs_per_test = args.runs_per_test
 forever = args.forever
 
 
 def _shut_down_legacy_server(legacy_server_port):
-  try:
-    version = int(urllib.request.urlopen(
-        'http://localhost:%d/version_number' % legacy_server_port,
-        timeout=10).read())
-  except:
-    pass
-  else:
-    urllib.request.urlopen(
-        'http://localhost:%d/quitquitquit' % legacy_server_port).read()
+    try:
+        version = int(
+            urllib.request.urlopen(
+                'http://localhost:%d/version_number' % legacy_server_port,
+                timeout=10).read())
+    except:
+        pass
+    else:
+        urllib.request.urlopen(
+            'http://localhost:%d/quitquitquit' % legacy_server_port).read()
 
 
 def _calculate_num_runs_failures(list_of_results):
-  """Caculate number of runs and failures for a particular test.
+    """Caculate number of runs and failures for a particular test.
 
   Args:
     list_of_results: (List) of JobResult object.
   Returns:
     A tuple of total number of runs and failures.
   """
-  num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
-  num_failures = 0
-  for jobresult in list_of_results:
-    if jobresult.retries > 0:
-      num_runs += jobresult.retries
-    if jobresult.num_failures > 0:
-      num_failures += jobresult.num_failures
-  return num_runs, num_failures
+    num_runs = len(list_of_results)  # By default, there is 1 run per JobResult.
+    num_failures = 0
+    for jobresult in list_of_results:
+        if jobresult.retries > 0:
+            num_runs += jobresult.retries
+        if jobresult.num_failures > 0:
+            num_failures += jobresult.num_failures
+    return num_runs, num_failures
 
 
 # _build_and_run results
 class BuildAndRunError(object):
 
-  BUILD = object()
-  TEST = object()
-  POST_TEST = object()
+    BUILD = object()
+    TEST = object()
+    POST_TEST = object()
 
 
 def _has_epollexclusive():
-  binary = 'bins/%s/check_epollexclusive' % args.config
-  if not os.path.exists(binary):
-    return False
-  try:
-    subprocess.check_call(binary)
-    return True
-  except subprocess.CalledProcessError, e:
-    return False
-  except OSError, e:
-    # For languages other than C and Windows the binary won't exist
-    return False
+    binary = 'bins/%s/check_epollexclusive' % args.config
+    if not os.path.exists(binary):
+        return False
+    try:
+        subprocess.check_call(binary)
+        return True
+    except subprocess.CalledProcessError, e:
+        return False
+    except OSError, e:
+        # For languages other than C and Windows the binary won't exist
+        return False
 
 
 # returns a list of things that failed (or an empty list on success)
-def _build_and_run(
-    check_cancelled, newline_on_success, xml_report=None, build_only=False):
-  """Do one pass of building & running tests."""
-  # build latest sequentially
-  num_failures, resultset = jobset.run(
-      build_steps, maxjobs=1, stop_on_failure=True,
-      newline_on_success=newline_on_success, travis=args.travis)
-  if num_failures:
-    return [BuildAndRunError.BUILD]
+def _build_and_run(check_cancelled,
+                   newline_on_success,
+                   xml_report=None,
+                   build_only=False):
+    """Do one pass of building & running tests."""
+    # build latest sequentially
+    num_failures, resultset = jobset.run(
+        build_steps,
+        maxjobs=1,
+        stop_on_failure=True,
+        newline_on_success=newline_on_success,
+        travis=args.travis)
+    if num_failures:
+        return [BuildAndRunError.BUILD]
 
-  if build_only:
-    if xml_report:
-      report_utils.render_junit_xml_report(resultset, xml_report,
-                                           suite_name=args.report_suite_name)
-    return []
+    if build_only:
+        if xml_report:
+            report_utils.render_junit_xml_report(
+                resultset, xml_report, suite_name=args.report_suite_name)
+        return []
 
-  if not args.travis and not _has_epollexclusive() and platform_string() in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string()]:
-    print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
-    _POLLING_STRATEGIES[platform_string()].remove('epollex')
+    if not args.travis and not _has_epollexclusive() and platform_string(
+    ) in _POLLING_STRATEGIES and 'epollex' in _POLLING_STRATEGIES[platform_string(
+    )]:
+        print('\n\nOmitting EPOLLEXCLUSIVE tests\n\n')
+        _POLLING_STRATEGIES[platform_string()].remove('epollex')
 
-  # start antagonists
-  antagonists = [subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
-                 for _ in range(0, args.antagonists)]
-  start_port_server.start_port_server()
-  resultset = None
-  num_test_failures = 0
-  try:
-    infinite_runs = runs_per_test == 0
-    one_run = set(
-      spec
-      for language in languages
-      for spec in language.test_specs()
-      if (re.search(args.regex, spec.shortname) and
-          (args.regex_exclude == '' or
-           not re.search(args.regex_exclude, spec.shortname))))
-    # When running on travis, we want out test runs to be as similar as possible
-    # for reproducibility purposes.
-    if args.travis and args.max_time <= 0:
-      massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
-    else:
-      # whereas otherwise, we want to shuffle things up to give all tests a
-      # chance to run.
-      massaged_one_run = list(one_run)  # random.sample needs an indexable seq.
-      num_jobs = len(massaged_one_run)
-      # for a random sample, get as many as indicated by the 'sample_percent'
-      # argument. By default this arg is 100, resulting in a shuffle of all
-      # jobs.
-      sample_size = int(num_jobs * args.sample_percent/100.0)
-      massaged_one_run = random.sample(massaged_one_run, sample_size)
-      if not isclose(args.sample_percent, 100.0):
-        assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
-        print("Running %d tests out of %d (~%d%%)" %
-              (sample_size, num_jobs, args.sample_percent))
-    if infinite_runs:
-      assert len(massaged_one_run) > 0, 'Must have at least one test for a -n inf run'
-    runs_sequence = (itertools.repeat(massaged_one_run) if infinite_runs
-                     else itertools.repeat(massaged_one_run, runs_per_test))
-    all_runs = itertools.chain.from_iterable(runs_sequence)
+    # start antagonists
+    antagonists = [
+        subprocess.Popen(['tools/run_tests/python_utils/antagonist.py'])
+        for _ in range(0, args.antagonists)
+    ]
+    start_port_server.start_port_server()
+    resultset = None
+    num_test_failures = 0
+    try:
+        infinite_runs = runs_per_test == 0
+        one_run = set(
+            spec for language in languages for spec in language.test_specs()
+            if (re.search(args.regex, spec.shortname) and
+                (args.regex_exclude == '' or
+                 not re.search(args.regex_exclude, spec.shortname))))
+        # When running on travis, we want out test runs to be as similar as possible
+        # for reproducibility purposes.
+        if args.travis and args.max_time <= 0:
+            massaged_one_run = sorted(one_run, key=lambda x: x.cpu_cost)
+        else:
+            # whereas otherwise, we want to shuffle things up to give all tests a
+            # chance to run.
+            massaged_one_run = list(
+                one_run)  # random.sample needs an indexable seq.
+            num_jobs = len(massaged_one_run)
+            # for a random sample, get as many as indicated by the 'sample_percent'
+            # argument. By default this arg is 100, resulting in a shuffle of all
+            # jobs.
+            sample_size = int(num_jobs * args.sample_percent / 100.0)
+            massaged_one_run = random.sample(massaged_one_run, sample_size)
+            if not isclose(args.sample_percent, 100.0):
+                assert args.runs_per_test == 1, "Can't do sampling (-p) over multiple runs (-n)."
+                print("Running %d tests out of %d (~%d%%)" %
+                      (sample_size, num_jobs, args.sample_percent))
+        if infinite_runs:
+            assert len(massaged_one_run
+                      ) > 0, 'Must have at least one test for a -n inf run'
+        runs_sequence = (itertools.repeat(massaged_one_run)
+                         if infinite_runs else itertools.repeat(
+                             massaged_one_run, runs_per_test))
+        all_runs = itertools.chain.from_iterable(runs_sequence)
 
-    if args.quiet_success:
-      jobset.message('START', 'Running tests quietly, only failing tests will be reported', do_newline=True)
-    num_test_failures, resultset = jobset.run(
-        all_runs, check_cancelled, newline_on_success=newline_on_success,
-        travis=args.travis, maxjobs=args.jobs, maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
-        stop_on_failure=args.stop_on_failure,
-        quiet_success=args.quiet_success, max_time=args.max_time)
-    if resultset:
-      for k, v in sorted(resultset.items()):
-        num_runs, num_failures = _calculate_num_runs_failures(v)
-        if num_failures > 0:
-          if num_failures == num_runs:  # what about infinite_runs???
-            jobset.message('FAILED', k, do_newline=True)
-          else:
+        if args.quiet_success:
             jobset.message(
-                'FLAKE', '%s [%d/%d runs flaked]' % (k, num_failures, num_runs),
+                'START',
+                'Running tests quietly, only failing tests will be reported',
                 do_newline=True)
-  finally:
-    for antagonist in antagonists:
-      antagonist.kill()
-    if args.bq_result_table and resultset:
-      upload_results_to_bq(resultset, args.bq_result_table, args, platform_string())
-    if xml_report and resultset:
-      report_utils.render_junit_xml_report(resultset, xml_report,
-                                           suite_name=args.report_suite_name)
+        num_test_failures, resultset = jobset.run(
+            all_runs,
+            check_cancelled,
+            newline_on_success=newline_on_success,
+            travis=args.travis,
+            maxjobs=args.jobs,
+            maxjobs_cpu_agnostic=max_parallel_tests_for_current_platform(),
+            stop_on_failure=args.stop_on_failure,
+            quiet_success=args.quiet_success,
+            max_time=args.max_time)
+        if resultset:
+            for k, v in sorted(resultset.items()):
+                num_runs, num_failures = _calculate_num_runs_failures(v)
+                if num_failures > 0:
+                    if num_failures == num_runs:  # what about infinite_runs???
+                        jobset.message('FAILED', k, do_newline=True)
+                    else:
+                        jobset.message(
+                            'FLAKE',
+                            '%s [%d/%d runs flaked]' % (k, num_failures,
+                                                        num_runs),
+                            do_newline=True)
+    finally:
+        for antagonist in antagonists:
+            antagonist.kill()
+        if args.bq_result_table and resultset:
+            upload_results_to_bq(resultset, args.bq_result_table, args,
+                                 platform_string())
+        if xml_report and resultset:
+            report_utils.render_junit_xml_report(
+                resultset, xml_report, suite_name=args.report_suite_name)
 
-  number_failures, _ = jobset.run(
-      post_tests_steps, maxjobs=1, stop_on_failure=False,
-      newline_on_success=newline_on_success, travis=args.travis)
+    number_failures, _ = jobset.run(
+        post_tests_steps,
+        maxjobs=1,
+        stop_on_failure=False,
+        newline_on_success=newline_on_success,
+        travis=args.travis)
 
-  out = []
-  if number_failures:
-    out.append(BuildAndRunError.POST_TEST)
-  if num_test_failures:
-    out.append(BuildAndRunError.TEST)
+    out = []
+    if number_failures:
+        out.append(BuildAndRunError.POST_TEST)
+    if num_test_failures:
+        out.append(BuildAndRunError.TEST)
 
-  return out
+    return out
 
 
 if forever:
-  success = True
-  while True:
-    dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
-    initial_time = dw.most_recent_change()
-    have_files_changed = lambda: dw.most_recent_change() != initial_time
-    previous_success = success
-    errors = _build_and_run(check_cancelled=have_files_changed,
-                            newline_on_success=False,
-                            build_only=args.build_only) == 0
-    if not previous_success and not errors:
-      jobset.message('SUCCESS',
-                     'All tests are now passing properly',
-                     do_newline=True)
-    jobset.message('IDLE', 'No change detected')
-    while not have_files_changed():
-      time.sleep(1)
+    success = True
+    while True:
+        dw = watch_dirs.DirWatcher(['src', 'include', 'test', 'examples'])
+        initial_time = dw.most_recent_change()
+        have_files_changed = lambda: dw.most_recent_change() != initial_time
+        previous_success = success
+        errors = _build_and_run(
+            check_cancelled=have_files_changed,
+            newline_on_success=False,
+            build_only=args.build_only) == 0
+        if not previous_success and not errors:
+            jobset.message(
+                'SUCCESS',
+                'All tests are now passing properly',
+                do_newline=True)
+        jobset.message('IDLE', 'No change detected')
+        while not have_files_changed():
+            time.sleep(1)
 else:
-  errors = _build_and_run(check_cancelled=lambda: False,
-                          newline_on_success=args.newline_on_success,
-                          xml_report=args.xml_report,
-                          build_only=args.build_only)
-  if not errors:
-    jobset.message('SUCCESS', 'All tests passed', do_newline=True)
-  else:
-    jobset.message('FAILED', 'Some tests failed', do_newline=True)
-  exit_code = 0
-  if BuildAndRunError.BUILD in errors:
-    exit_code |= 1
-  if BuildAndRunError.TEST in errors:
-    exit_code |= 2
-  if BuildAndRunError.POST_TEST in errors:
-    exit_code |= 4
-  sys.exit(exit_code)
+    errors = _build_and_run(
+        check_cancelled=lambda: False,
+        newline_on_success=args.newline_on_success,
+        xml_report=args.xml_report,
+        build_only=args.build_only)
+    if not errors:
+        jobset.message('SUCCESS', 'All tests passed', do_newline=True)
+    else:
+        jobset.message('FAILED', 'Some tests failed', do_newline=True)
+    exit_code = 0
+    if BuildAndRunError.BUILD in errors:
+        exit_code |= 1
+    if BuildAndRunError.TEST in errors:
+        exit_code |= 2
+    if BuildAndRunError.POST_TEST in errors:
+        exit_code |= 4
+    sys.exit(exit_code)
diff --git a/tools/run_tests/run_tests_matrix.py b/tools/run_tests/run_tests_matrix.py
index 7c58d8e..ac90bef 100755
--- a/tools/run_tests/run_tests_matrix.py
+++ b/tools/run_tests/run_tests_matrix.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Run test matrix."""
 
 from __future__ import print_function
@@ -29,14 +28,14 @@
 _ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
 os.chdir(_ROOT)
 
-_DEFAULT_RUNTESTS_TIMEOUT = 1*60*60
+_DEFAULT_RUNTESTS_TIMEOUT = 1 * 60 * 60
 
 # Set the timeout high to allow enough time for sanitizers and pre-building
 # clang docker.
-_CPP_RUNTESTS_TIMEOUT = 4*60*60
+_CPP_RUNTESTS_TIMEOUT = 4 * 60 * 60
 
 # C++ TSAN takes longer than other sanitizers
-_CPP_TSAN_RUNTESTS_TIMEOUT = 8*60*60
+_CPP_TSAN_RUNTESTS_TIMEOUT = 8 * 60 * 60
 
 # Number of jobs assigned to each run_tests.py instance
 _DEFAULT_INNER_JOBS = 2
@@ -46,448 +45,521 @@
 
 
 def _report_filename(name):
-  """Generates report file name"""
-  return 'report_%s_%s' % (name, _REPORT_SUFFIX)
+    """Generates report file name"""
+    return 'report_%s_%s' % (name, _REPORT_SUFFIX)
 
 
 def _report_filename_internal_ci(name):
-  """Generates report file name that leads to better presentation by internal CI"""
-  return '%s/%s' % (name, _REPORT_SUFFIX)
+    """Generates report file name that leads to better presentation by internal CI"""
+    return '%s/%s' % (name, _REPORT_SUFFIX)
 
 
-def _docker_jobspec(name, runtests_args=[], runtests_envs={},
+def _docker_jobspec(name,
+                    runtests_args=[],
+                    runtests_envs={},
                     inner_jobs=_DEFAULT_INNER_JOBS,
                     timeout_seconds=None):
-  """Run a single instance of run_tests.py in a docker container"""
-  if not timeout_seconds:
-    timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
-  test_job = jobset.JobSpec(
-          cmdline=['python', 'tools/run_tests/run_tests.py',
-                   '--use_docker',
-                   '-t',
-                   '-j', str(inner_jobs),
-                   '-x', _report_filename(name),
-                   '--report_suite_name', '%s' % name] + runtests_args,
-          environ=runtests_envs,
-          shortname='run_tests_%s' % name,
-          timeout_seconds=timeout_seconds)
-  return test_job
+    """Run a single instance of run_tests.py in a docker container"""
+    if not timeout_seconds:
+        timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+    test_job = jobset.JobSpec(
+        cmdline=[
+            'python', 'tools/run_tests/run_tests.py', '--use_docker', '-t',
+            '-j',
+            str(inner_jobs), '-x',
+            _report_filename(name), '--report_suite_name',
+            '%s' % name
+        ] + runtests_args,
+        environ=runtests_envs,
+        shortname='run_tests_%s' % name,
+        timeout_seconds=timeout_seconds)
+    return test_job
 
 
-def _workspace_jobspec(name, runtests_args=[], workspace_name=None,
-                       runtests_envs={}, inner_jobs=_DEFAULT_INNER_JOBS,
+def _workspace_jobspec(name,
+                       runtests_args=[],
+                       workspace_name=None,
+                       runtests_envs={},
+                       inner_jobs=_DEFAULT_INNER_JOBS,
                        timeout_seconds=None):
-  """Run a single instance of run_tests.py in a separate workspace"""
-  if not workspace_name:
-    workspace_name = 'workspace_%s' % name
-  if not timeout_seconds:
-    timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
-  env = {'WORKSPACE_NAME': workspace_name}
-  env.update(runtests_envs)
-  test_job = jobset.JobSpec(
-          cmdline=['bash',
-                   'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
-                   '-t',
-                   '-j', str(inner_jobs),
-                   '-x', '../%s' % _report_filename(name),
-                   '--report_suite_name', '%s' % name] + runtests_args,
-          environ=env,
-          shortname='run_tests_%s' % name,
-          timeout_seconds=timeout_seconds)
-  return test_job
+    """Run a single instance of run_tests.py in a separate workspace"""
+    if not workspace_name:
+        workspace_name = 'workspace_%s' % name
+    if not timeout_seconds:
+        timeout_seconds = _DEFAULT_RUNTESTS_TIMEOUT
+    env = {'WORKSPACE_NAME': workspace_name}
+    env.update(runtests_envs)
+    test_job = jobset.JobSpec(
+        cmdline=[
+            'bash', 'tools/run_tests/helper_scripts/run_tests_in_workspace.sh',
+            '-t', '-j',
+            str(inner_jobs), '-x',
+            '../%s' % _report_filename(name), '--report_suite_name',
+            '%s' % name
+        ] + runtests_args,
+        environ=env,
+        shortname='run_tests_%s' % name,
+        timeout_seconds=timeout_seconds)
+    return test_job
 
 
-def _generate_jobs(languages, configs, platforms, iomgr_platform = 'native',
-                  arch=None, compiler=None,
-                  labels=[], extra_args=[], extra_envs={},
-                  inner_jobs=_DEFAULT_INNER_JOBS,
-                  timeout_seconds=None):
-  result = []
-  for language in languages:
-    for platform in platforms:
-      for config in configs:
-        name = '%s_%s_%s_%s' % (language, platform, config, iomgr_platform)
-        runtests_args = ['-l', language,
-                         '-c', config,
-                         '--iomgr_platform', iomgr_platform]
-        if arch or compiler:
-          name += '_%s_%s' % (arch, compiler)
-          runtests_args += ['--arch', arch,
-                            '--compiler', compiler]
-        if '--build_only' in extra_args:
-          name += '_buildonly'
-        for extra_env in extra_envs:
-          name += '_%s_%s' % (extra_env, extra_envs[extra_env])
+def _generate_jobs(languages,
+                   configs,
+                   platforms,
+                   iomgr_platform='native',
+                   arch=None,
+                   compiler=None,
+                   labels=[],
+                   extra_args=[],
+                   extra_envs={},
+                   inner_jobs=_DEFAULT_INNER_JOBS,
+                   timeout_seconds=None):
+    result = []
+    for language in languages:
+        for platform in platforms:
+            for config in configs:
+                name = '%s_%s_%s_%s' % (language, platform, config,
+                                        iomgr_platform)
+                runtests_args = [
+                    '-l', language, '-c', config, '--iomgr_platform',
+                    iomgr_platform
+                ]
+                if arch or compiler:
+                    name += '_%s_%s' % (arch, compiler)
+                    runtests_args += ['--arch', arch, '--compiler', compiler]
+                if '--build_only' in extra_args:
+                    name += '_buildonly'
+                for extra_env in extra_envs:
+                    name += '_%s_%s' % (extra_env, extra_envs[extra_env])
 
-        runtests_args += extra_args
-        if platform == 'linux':
-          job = _docker_jobspec(name=name, runtests_args=runtests_args,
-                                runtests_envs=extra_envs, inner_jobs=inner_jobs,
-                                timeout_seconds=timeout_seconds)
-        else:
-          job = _workspace_jobspec(name=name, runtests_args=runtests_args,
-                                   runtests_envs=extra_envs, inner_jobs=inner_jobs,
-                                   timeout_seconds=timeout_seconds)
+                runtests_args += extra_args
+                if platform == 'linux':
+                    job = _docker_jobspec(
+                        name=name,
+                        runtests_args=runtests_args,
+                        runtests_envs=extra_envs,
+                        inner_jobs=inner_jobs,
+                        timeout_seconds=timeout_seconds)
+                else:
+                    job = _workspace_jobspec(
+                        name=name,
+                        runtests_args=runtests_args,
+                        runtests_envs=extra_envs,
+                        inner_jobs=inner_jobs,
+                        timeout_seconds=timeout_seconds)
 
-        job.labels = [platform, config, language, iomgr_platform] + labels
-        result.append(job)
-  return result
+                job.labels = [platform, config, language, iomgr_platform
+                             ] + labels
+                result.append(job)
+    return result
 
 
 def _create_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
-  test_jobs = []
-  # supported on linux only
-  test_jobs += _generate_jobs(languages=['sanity', 'php7'],
-                             configs=['dbg', 'opt'],
-                             platforms=['linux'],
-                             labels=['basictests', 'multilang'],
-                             extra_args=extra_args,
-                             inner_jobs=inner_jobs)
+    test_jobs = []
+    # supported on linux only
+    test_jobs += _generate_jobs(
+        languages=['sanity', 'php7'],
+        configs=['dbg', 'opt'],
+        platforms=['linux'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # supported on all platforms.
-  test_jobs += _generate_jobs(languages=['c'],
-                             configs=['dbg', 'opt'],
-                             platforms=['linux', 'macos', 'windows'],
-                             labels=['basictests', 'corelang'],
-                             extra_args=extra_args,
-                             inner_jobs=inner_jobs,
-                             timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # supported on all platforms.
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos', 'windows'],
+        labels=['basictests', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  test_jobs += _generate_jobs(languages=['csharp', 'python'],
-                             configs=['dbg', 'opt'],
-                             platforms=['linux', 'macos', 'windows'],
-                             labels=['basictests', 'multilang'],
-                             extra_args=extra_args,
-                             inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['csharp', 'python'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos', 'windows'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # supported on linux and mac.
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['dbg', 'opt'],
-                              platforms=['linux', 'macos'],
-                              labels=['basictests', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # supported on linux and mac.
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos'],
+        labels=['basictests', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  test_jobs += _generate_jobs(languages=['grpc-node', 'ruby', 'php'],
-                              configs=['dbg', 'opt'],
-                              platforms=['linux', 'macos'],
-                              labels=['basictests', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['grpc-node', 'ruby', 'php'],
+        configs=['dbg', 'opt'],
+        platforms=['linux', 'macos'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # supported on mac only.
-  test_jobs += _generate_jobs(languages=['objc'],
-                              configs=['dbg', 'opt'],
-                              platforms=['macos'],
-                              labels=['basictests', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    # supported on mac only.
+    test_jobs += _generate_jobs(
+        languages=['objc'],
+        configs=['dbg', 'opt'],
+        platforms=['macos'],
+        labels=['basictests', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # sanitizers
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['msan', 'asan', 'tsan', 'ubsan'],
-                              platforms=['linux'],
-                              labels=['sanitizers', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['asan'],
-                              platforms=['linux'],
-                              labels=['sanitizers', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['tsan'],
-                              platforms=['linux'],
-                              labels=['sanitizers', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
+    # sanitizers
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['msan', 'asan', 'tsan', 'ubsan'],
+        platforms=['linux'],
+        labels=['sanitizers', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['asan'],
+        platforms=['linux'],
+        labels=['sanitizers', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['tsan'],
+        platforms=['linux'],
+        labels=['sanitizers', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_TSAN_RUNTESTS_TIMEOUT)
 
-  return test_jobs
+    return test_jobs
 
 
-def _create_portability_test_jobs(extra_args=[], inner_jobs=_DEFAULT_INNER_JOBS):
-  test_jobs = []
-  # portability C x86
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='x86',
-                              compiler='default',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+def _create_portability_test_jobs(extra_args=[],
+                                  inner_jobs=_DEFAULT_INNER_JOBS):
+    test_jobs = []
+    # portability C x86
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='x86',
+        compiler='default',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # portability C and C++ on x64
-  for compiler in ['gcc4.8', 'gcc5.3', 'gcc_musl',
-                   'clang3.5', 'clang3.6', 'clang3.7']:
-    test_jobs += _generate_jobs(languages=['c', 'c++'],
-                                configs=['dbg'],
-                                platforms=['linux'],
-                                arch='x64',
-                                compiler=compiler,
-                                labels=['portability', 'corelang'],
-                                extra_args=extra_args,
-                                inner_jobs=inner_jobs,
-                                timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # portability C and C++ on x64
+    for compiler in [
+            'gcc4.8', 'gcc5.3', 'gcc_musl', 'clang3.5', 'clang3.6', 'clang3.7'
+    ]:
+        test_jobs += _generate_jobs(
+            languages=['c', 'c++'],
+            configs=['dbg'],
+            platforms=['linux'],
+            arch='x64',
+            compiler=compiler,
+            labels=['portability', 'corelang'],
+            extra_args=extra_args,
+            inner_jobs=inner_jobs,
+            timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  # portability C on Windows 64-bit (x86 is the default)
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['dbg'],
-                              platforms=['windows'],
-                              arch='x64',
-                              compiler='default',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    # portability C on Windows 64-bit (x86 is the default)
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg'],
+        platforms=['windows'],
+        arch='x64',
+        compiler='default',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  # portability C++ on Windows
-  # TODO(jtattermusch): some of the tests are failing, so we force --build_only
-  test_jobs += _generate_jobs(languages=['c++'],
-                              configs=['dbg'],
-                              platforms=['windows'],
-                              arch='default',
-                              compiler='default',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args + ['--build_only'],
-                              inner_jobs=inner_jobs)
+    # portability C++ on Windows
+    # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+    test_jobs += _generate_jobs(
+        languages=['c++'],
+        configs=['dbg'],
+        platforms=['windows'],
+        arch='default',
+        compiler='default',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args + ['--build_only'],
+        inner_jobs=inner_jobs)
 
-  # portability C and C++ on Windows using VS2017 (build only)
-  # TODO(jtattermusch): some of the tests are failing, so we force --build_only
-  test_jobs += _generate_jobs(languages=['c', 'c++'],
-                              configs=['dbg'],
-                              platforms=['windows'],
-                              arch='x64',
-                              compiler='cmake_vs2017',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args + ['--build_only'],
-                              inner_jobs=inner_jobs)
+    # portability C and C++ on Windows using VS2017 (build only)
+    # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+    test_jobs += _generate_jobs(
+        languages=['c', 'c++'],
+        configs=['dbg'],
+        platforms=['windows'],
+        arch='x64',
+        compiler='cmake_vs2017',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args + ['--build_only'],
+        inner_jobs=inner_jobs)
 
-  # C and C++ with the c-ares DNS resolver on Linux
-  test_jobs += _generate_jobs(languages=['c', 'c++'],
-                              configs=['dbg'], platforms=['linux'],
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    # C and C++ with the c-ares DNS resolver on Linux
+    test_jobs += _generate_jobs(
+        languages=['c', 'c++'],
+        configs=['dbg'],
+        platforms=['linux'],
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        extra_envs={'GRPC_DNS_RESOLVER': 'ares'},
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  # TODO(zyc): Turn on this test after adding c-ares support on windows.
-  # C with the c-ares DNS resolver on Windows
-  # test_jobs += _generate_jobs(languages=['c'],
-  #                             configs=['dbg'], platforms=['windows'],
-  #                             labels=['portability', 'corelang'],
-  #                             extra_args=extra_args,
-  #                             extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
+    # TODO(zyc): Turn on this test after adding c-ares support on windows.
+    # C with the c-ares DNS resolver on Windows
+    # test_jobs += _generate_jobs(languages=['c'],
+    #                             configs=['dbg'], platforms=['windows'],
+    #                             labels=['portability', 'corelang'],
+    #                             extra_args=extra_args,
+    #                             extra_envs={'GRPC_DNS_RESOLVER': 'ares'})
 
-  # C and C++ build with cmake on Linux
-  # TODO(jtattermusch): some of the tests are failing, so we force --build_only
-  # to make sure it's buildable at least.
-  test_jobs += _generate_jobs(languages=['c', 'c++'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='default',
-                              compiler='cmake',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args + ['--build_only'],
-                              inner_jobs=inner_jobs)
+    # C and C++ build with cmake on Linux
+    # TODO(jtattermusch): some of the tests are failing, so we force --build_only
+    # to make sure it's buildable at least.
+    test_jobs += _generate_jobs(
+        languages=['c', 'c++'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='default',
+        compiler='cmake',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args + ['--build_only'],
+        inner_jobs=inner_jobs)
 
-  test_jobs += _generate_jobs(languages=['python'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='default',
-                              compiler='python_alpine',
-                              labels=['portability', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['python'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='default',
+        compiler='python_alpine',
+        labels=['portability', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  test_jobs += _generate_jobs(languages=['csharp'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              arch='default',
-                              compiler='coreclr',
-                              labels=['portability', 'multilang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs)
+    test_jobs += _generate_jobs(
+        languages=['csharp'],
+        configs=['dbg'],
+        platforms=['linux'],
+        arch='default',
+        compiler='coreclr',
+        labels=['portability', 'multilang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs)
 
-  test_jobs += _generate_jobs(languages=['c'],
-                              configs=['dbg'],
-                              platforms=['linux'],
-                              iomgr_platform='uv',
-                              labels=['portability', 'corelang'],
-                              extra_args=extra_args,
-                              inner_jobs=inner_jobs,
-                              timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
+    test_jobs += _generate_jobs(
+        languages=['c'],
+        configs=['dbg'],
+        platforms=['linux'],
+        iomgr_platform='uv',
+        labels=['portability', 'corelang'],
+        extra_args=extra_args,
+        inner_jobs=inner_jobs,
+        timeout_seconds=_CPP_RUNTESTS_TIMEOUT)
 
-  return test_jobs
+    return test_jobs
 
 
 def _allowed_labels():
-  """Returns a list of existing job labels."""
-  all_labels = set()
-  for job in _create_test_jobs() + _create_portability_test_jobs():
-    for label in job.labels:
-      all_labels.add(label)
-  return sorted(all_labels)
+    """Returns a list of existing job labels."""
+    all_labels = set()
+    for job in _create_test_jobs() + _create_portability_test_jobs():
+        for label in job.labels:
+            all_labels.add(label)
+    return sorted(all_labels)
 
 
 def _runs_per_test_type(arg_str):
-  """Auxiliary function to parse the "runs_per_test" flag."""
-  try:
-    n = int(arg_str)
-    if n <= 0: raise ValueError
-    return n
-  except:
-    msg = '\'{}\' is not a positive integer'.format(arg_str)
-    raise argparse.ArgumentTypeError(msg)
+    """Auxiliary function to parse the "runs_per_test" flag."""
+    try:
+        n = int(arg_str)
+        if n <= 0: raise ValueError
+        return n
+    except:
+        msg = '\'{}\' is not a positive integer'.format(arg_str)
+        raise argparse.ArgumentTypeError(msg)
 
 
 if __name__ == "__main__":
-  argp = argparse.ArgumentParser(description='Run a matrix of run_tests.py tests.')
-  argp.add_argument('-j', '--jobs',
-                    default=multiprocessing.cpu_count()/_DEFAULT_INNER_JOBS,
-                    type=int,
-                    help='Number of concurrent run_tests.py instances.')
-  argp.add_argument('-f', '--filter',
-                    choices=_allowed_labels(),
-                    nargs='+',
-                    default=[],
-                    help='Filter targets to run by label with AND semantics.')
-  argp.add_argument('--exclude',
-                    choices=_allowed_labels(),
-                    nargs='+',
-                    default=[],
-                    help='Exclude targets with any of given labels.')
-  argp.add_argument('--build_only',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Pass --build_only flag to run_tests.py instances.')
-  argp.add_argument('--force_default_poller', default=False, action='store_const', const=True,
-                    help='Pass --force_default_poller to run_tests.py instances.')
-  argp.add_argument('--dry_run',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Only print what would be run.')
-  argp.add_argument('--filter_pr_tests',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Filters out tests irrelevant to pull request changes.')
-  argp.add_argument('--base_branch',
-                    default='origin/master',
-                    type=str,
-                    help='Branch that pull request is requesting to merge into')
-  argp.add_argument('--inner_jobs',
-                    default=_DEFAULT_INNER_JOBS,
-                    type=int,
-                    help='Number of jobs in each run_tests.py instance')
-  argp.add_argument('-n', '--runs_per_test', default=1, type=_runs_per_test_type,
-                    help='How many times to run each tests. >1 runs implies ' +
-                    'omitting passing test from the output & reports.')
-  argp.add_argument('--max_time', default=-1, type=int,
-                    help='Maximum amount of time to run tests for' +
-                         '(other tests will be skipped)')
-  argp.add_argument('--internal_ci',
-                    default=False,
-                    action='store_const',
-                    const=True,
-                    help='Put reports into subdirectories to improve presentation of '
-                    'results by Internal CI.')
-  argp.add_argument('--bq_result_table',
-                    default='',
-                    type=str,
-                    nargs='?',
-                    help='Upload test results to a specified BQ table.')
-  args = argp.parse_args()
+    argp = argparse.ArgumentParser(
+        description='Run a matrix of run_tests.py tests.')
+    argp.add_argument(
+        '-j',
+        '--jobs',
+        default=multiprocessing.cpu_count() / _DEFAULT_INNER_JOBS,
+        type=int,
+        help='Number of concurrent run_tests.py instances.')
+    argp.add_argument(
+        '-f',
+        '--filter',
+        choices=_allowed_labels(),
+        nargs='+',
+        default=[],
+        help='Filter targets to run by label with AND semantics.')
+    argp.add_argument(
+        '--exclude',
+        choices=_allowed_labels(),
+        nargs='+',
+        default=[],
+        help='Exclude targets with any of given labels.')
+    argp.add_argument(
+        '--build_only',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Pass --build_only flag to run_tests.py instances.')
+    argp.add_argument(
+        '--force_default_poller',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Pass --force_default_poller to run_tests.py instances.')
+    argp.add_argument(
+        '--dry_run',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Only print what would be run.')
+    argp.add_argument(
+        '--filter_pr_tests',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Filters out tests irrelevant to pull request changes.')
+    argp.add_argument(
+        '--base_branch',
+        default='origin/master',
+        type=str,
+        help='Branch that pull request is requesting to merge into')
+    argp.add_argument(
+        '--inner_jobs',
+        default=_DEFAULT_INNER_JOBS,
+        type=int,
+        help='Number of jobs in each run_tests.py instance')
+    argp.add_argument(
+        '-n',
+        '--runs_per_test',
+        default=1,
+        type=_runs_per_test_type,
+        help='How many times to run each tests. >1 runs implies ' +
+        'omitting passing test from the output & reports.')
+    argp.add_argument(
+        '--max_time',
+        default=-1,
+        type=int,
+        help='Maximum amount of time to run tests for' +
+        '(other tests will be skipped)')
+    argp.add_argument(
+        '--internal_ci',
+        default=False,
+        action='store_const',
+        const=True,
+        help='Put reports into subdirectories to improve presentation of '
+        'results by Internal CI.')
+    argp.add_argument(
+        '--bq_result_table',
+        default='',
+        type=str,
+        nargs='?',
+        help='Upload test results to a specified BQ table.')
+    args = argp.parse_args()
 
-  if args.internal_ci:
-    _report_filename = _report_filename_internal_ci  # override the function
+    if args.internal_ci:
+        _report_filename = _report_filename_internal_ci  # override the function
 
-  extra_args = []
-  if args.build_only:
-    extra_args.append('--build_only')
-  if args.force_default_poller:
-    extra_args.append('--force_default_poller')
-  if args.runs_per_test > 1:
-    extra_args.append('-n')
-    extra_args.append('%s' % args.runs_per_test)
-    extra_args.append('--quiet_success')
-  if args.max_time > 0:
-    extra_args.extend(('--max_time', '%d' % args.max_time))
-  if args.bq_result_table:
-    extra_args.append('--bq_result_table')
-    extra_args.append('%s' % args.bq_result_table)
-    extra_args.append('--measure_cpu_costs')
-    extra_args.append('--disable_auto_set_flakes')
+    extra_args = []
+    if args.build_only:
+        extra_args.append('--build_only')
+    if args.force_default_poller:
+        extra_args.append('--force_default_poller')
+    if args.runs_per_test > 1:
+        extra_args.append('-n')
+        extra_args.append('%s' % args.runs_per_test)
+        extra_args.append('--quiet_success')
+    if args.max_time > 0:
+        extra_args.extend(('--max_time', '%d' % args.max_time))
+    if args.bq_result_table:
+        extra_args.append('--bq_result_table')
+        extra_args.append('%s' % args.bq_result_table)
+        extra_args.append('--measure_cpu_costs')
+        extra_args.append('--disable_auto_set_flakes')
 
-  all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
-             _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
+    all_jobs = _create_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs) + \
+               _create_portability_test_jobs(extra_args=extra_args, inner_jobs=args.inner_jobs)
 
-  jobs = []
-  for job in all_jobs:
-    if not args.filter or all(filter in job.labels for filter in args.filter):
-      if not any(exclude_label in job.labels for exclude_label in args.exclude):
-        jobs.append(job)
+    jobs = []
+    for job in all_jobs:
+        if not args.filter or all(
+                filter in job.labels for filter in args.filter):
+            if not any(exclude_label in job.labels
+                       for exclude_label in args.exclude):
+                jobs.append(job)
 
-  if not jobs:
-    jobset.message('FAILED', 'No test suites match given criteria.',
-                   do_newline=True)
-    sys.exit(1)
+    if not jobs:
+        jobset.message(
+            'FAILED', 'No test suites match given criteria.', do_newline=True)
+        sys.exit(1)
 
-  print('IMPORTANT: The changes you are testing need to be locally committed')
-  print('because only the committed changes in the current branch will be')
-  print('copied to the docker environment or into subworkspaces.')
+    print('IMPORTANT: The changes you are testing need to be locally committed')
+    print('because only the committed changes in the current branch will be')
+    print('copied to the docker environment or into subworkspaces.')
 
-  skipped_jobs = []
+    skipped_jobs = []
 
-  if args.filter_pr_tests:
-    print('Looking for irrelevant tests to skip...')
-    relevant_jobs = filter_tests(jobs, args.base_branch)
-    if len(relevant_jobs) == len(jobs):
-      print('No tests will be skipped.')
-    else:
-      print('These tests will be skipped:')
-      skipped_jobs = list(set(jobs) - set(relevant_jobs))
-      # Sort by shortnames to make printing of skipped tests consistent
-      skipped_jobs.sort(key=lambda job: job.shortname)
-      for job in list(skipped_jobs):
-        print('  %s' % job.shortname)
-    jobs = relevant_jobs
+    if args.filter_pr_tests:
+        print('Looking for irrelevant tests to skip...')
+        relevant_jobs = filter_tests(jobs, args.base_branch)
+        if len(relevant_jobs) == len(jobs):
+            print('No tests will be skipped.')
+        else:
+            print('These tests will be skipped:')
+            skipped_jobs = list(set(jobs) - set(relevant_jobs))
+            # Sort by shortnames to make printing of skipped tests consistent
+            skipped_jobs.sort(key=lambda job: job.shortname)
+            for job in list(skipped_jobs):
+                print('  %s' % job.shortname)
+        jobs = relevant_jobs
 
-  print('Will run these tests:')
-  for job in jobs:
+    print('Will run these tests:')
+    for job in jobs:
+        if args.dry_run:
+            print('  %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+        else:
+            print('  %s' % job.shortname)
+    print
+
     if args.dry_run:
-      print('  %s: "%s"' % (job.shortname, ' '.join(job.cmdline)))
+        print('--dry_run was used, exiting')
+        sys.exit(1)
+
+    jobset.message('START', 'Running test matrix.', do_newline=True)
+    num_failures, resultset = jobset.run(
+        jobs, newline_on_success=True, travis=True, maxjobs=args.jobs)
+    # Merge skipped tests into results to show skipped tests on report.xml
+    if skipped_jobs:
+        ignored_num_skipped_failures, skipped_results = jobset.run(
+            skipped_jobs, skip_jobs=True)
+        resultset.update(skipped_results)
+    report_utils.render_junit_xml_report(
+        resultset,
+        _report_filename('aggregate_tests'),
+        suite_name='aggregate_tests')
+
+    if num_failures == 0:
+        jobset.message(
+            'SUCCESS',
+            'All run_tests.py instance finished successfully.',
+            do_newline=True)
     else:
-      print('  %s' % job.shortname)
-  print
-
-  if args.dry_run:
-    print('--dry_run was used, exiting')
-    sys.exit(1)
-
-  jobset.message('START', 'Running test matrix.', do_newline=True)
-  num_failures, resultset = jobset.run(jobs,
-                                       newline_on_success=True,
-                                       travis=True,
-                                       maxjobs=args.jobs)
-  # Merge skipped tests into results to show skipped tests on report.xml
-  if skipped_jobs:
-    ignored_num_skipped_failures, skipped_results = jobset.run(
-        skipped_jobs, skip_jobs=True)
-    resultset.update(skipped_results)
-  report_utils.render_junit_xml_report(resultset, _report_filename('aggregate_tests'),
-                                       suite_name='aggregate_tests')
-
-  if num_failures == 0:
-    jobset.message('SUCCESS', 'All run_tests.py instance finished successfully.',
-                   do_newline=True)
-  else:
-    jobset.message('FAILED', 'Some run_tests.py instance have failed.',
-                   do_newline=True)
-    sys.exit(1)
+        jobset.message(
+            'FAILED',
+            'Some run_tests.py instance have failed.',
+            do_newline=True)
+        sys.exit(1)
diff --git a/tools/run_tests/sanity/check_bazel_workspace.py b/tools/run_tests/sanity/check_bazel_workspace.py
index c7c4e86..62a6229 100755
--- a/tools/run_tests/sanity/check_bazel_workspace.py
+++ b/tools/run_tests/sanity/check_bazel_workspace.py
@@ -27,8 +27,14 @@
 git_hash_pattern = re.compile('[0-9a-f]{40}')
 
 # Parse git hashes from submodules
-git_submodules = subprocess.check_output('git submodule', shell=True).strip().split('\n')
-git_submodule_hashes = {re.search(git_hash_pattern, s).group() for s in git_submodules}
+git_submodules = subprocess.check_output(
+    'git submodule', shell=True).strip().split('\n')
+git_submodule_hashes = {
+    re.search(git_hash_pattern, s).group()
+    for s in git_submodules
+}
+
+_BAZEL_TOOLCHAINS_DEP_NAME = 'com_github_bazelbuild_bazeltoolchains'
 
 _GRPC_DEP_NAMES = [
     'boringssl',
@@ -39,6 +45,7 @@
     'com_github_google_benchmark',
     'com_github_cares_cares',
     'com_google_absl',
+    _BAZEL_TOOLCHAINS_DEP_NAME,
 ]
 
 
@@ -63,6 +70,9 @@
         return []
 
     def archive(self, **args):
+        if args['name'] == _BAZEL_TOOLCHAINS_DEP_NAME:
+            self.names_and_urls[args['name']] = 'dont care'
+            return
         self.names_and_urls[args['name']] = args['url']
 
 
@@ -82,7 +92,11 @@
     assert name in names_and_urls.keys()
 assert len(_GRPC_DEP_NAMES) == len(names_and_urls.keys())
 
-archive_urls = [names_and_urls[name] for name in names_and_urls.keys()]
+# bazeltoolschains is an exception to this sanity check,
+# we don't require that there is a corresponding git module.
+names_without_bazeltoolchains = names_and_urls.keys()
+names_without_bazeltoolchains.remove(_BAZEL_TOOLCHAINS_DEP_NAME)
+archive_urls = [names_and_urls[name] for name in names_without_bazeltoolchains]
 workspace_git_hashes = {
     re.search(git_hash_pattern, url).group()
     for url in archive_urls
@@ -96,7 +110,9 @@
 # the workspace, but not necessarily conversely. E.g. Bloaty is a dependency
 # not used by any of the targets built by Bazel.
 if len(workspace_git_hashes - git_submodule_hashes) > 0:
-    print("Found discrepancies between git submodules and Bazel WORKSPACE dependencies")
+    print(
+        "Found discrepancies between git submodules and Bazel WORKSPACE dependencies"
+    )
     sys.exit(1)
 
 # Also check that we can override each dependency
diff --git a/tools/run_tests/sanity/check_clang_tidy.sh b/tools/run_tests/sanity/check_clang_tidy.sh
index 42ab511..6c4caa1 100755
--- a/tools/run_tests/sanity/check_clang_tidy.sh
+++ b/tools/run_tests/sanity/check_clang_tidy.sh
@@ -16,6 +16,6 @@
 set -e
 
 make buildtests \
-  -j `python -c 'import multiprocessing; print multiprocessing.cpu_count()'`
-find src/core src/cpp test/core test/cpp -name '*.h' -or -name '*.cc' | \
-  xargs tools/distrib/run_clang_tidy.py $*
+  -j "$(python -c 'import multiprocessing; print multiprocessing.cpu_count()')"
+find src/core src/cpp test/core test/cpp -print0 -name '*.h' -or -name '*.cc' \
+  | xargs -0 tools/distrib/run_clang_tidy.py "$@"
diff --git a/tools/run_tests/sanity/check_owners.sh b/tools/run_tests/sanity/check_owners.sh
index b681fed..de0e092 100755
--- a/tools/run_tests/sanity/check_owners.sh
+++ b/tools/run_tests/sanity/check_owners.sh
@@ -18,12 +18,12 @@
 
 export TEST=true
 
-cd `dirname $0`/../../..
+cd "$(dirname "$0")/../../.."
 
 owners=.github/CODEOWNERS
-want_owners=`mktemp /tmp/submXXXXXX`
+want_owners=$(mktemp /tmp/submXXXXXX)
 
-tools/mkowners/mkowners.py -o $want_owners
-diff -u $owners $want_owners
+tools/mkowners/mkowners.py -o "$want_owners"
+diff -u "$owners" "$want_owners"
 
-rm $want_owners
+rm "$want_owners"
diff --git a/tools/run_tests/sanity/check_shellcheck.sh b/tools/run_tests/sanity/check_shellcheck.sh
new file mode 100755
index 0000000..f2cba18
--- /dev/null
+++ b/tools/run_tests/sanity/check_shellcheck.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+# Copyright 2017 gRPC authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+set -e
+
+ROOT="$(dirname "$0")/../../.."
+
+DIRS=(
+    'tools/run_tests/helper_scripts'
+    'tools/run_tests/sanity'
+)
+
+for dir in "${DIRS[@]}"; do
+  find "$ROOT/$dir/" -name "*.sh" -type f -print0 | xargs -n1 -0 shellcheck
+done
diff --git a/tools/run_tests/sanity/check_sources_and_headers.py b/tools/run_tests/sanity/check_sources_and_headers.py
index cae175c..6a704eb 100755
--- a/tools/run_tests/sanity/check_sources_and_headers.py
+++ b/tools/run_tests/sanity/check_sources_and_headers.py
@@ -21,71 +21,109 @@
 import sys
 
 root = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../../..'))
-with open(os.path.join(root, 'tools', 'run_tests', 'generated', 'sources_and_headers.json')) as f:
-  js = json.loads(f.read())
+with open(
+        os.path.join(root, 'tools', 'run_tests', 'generated',
+                     'sources_and_headers.json')) as f:
+    js = json.loads(f.read())
 
 re_inc1 = re.compile(r'^#\s*include\s*"([^"]*)"')
 assert re_inc1.match('#include "foo"').group(1) == 'foo'
 re_inc2 = re.compile(r'^#\s*include\s*<((grpc|grpc\+\+)/[^"]*)>')
 assert re_inc2.match('#include <grpc++/foo>').group(1) == 'grpc++/foo'
 
+
 def get_target(name):
-  for target in js:
-    if target['name'] == name:
-      return target
-  assert False, 'no target %s' % name
+    for target in js:
+        if target['name'] == name:
+            return target
+    assert False, 'no target %s' % name
+
+
+def get_headers_transitive():
+    """Computes set of headers transitively provided by each target"""
+    target_headers_transitive = {}
+    for target in js:
+        target_name = target['name']
+        assert not target_headers_transitive.has_key(target_name)
+        target_headers_transitive[target_name] = set(target['headers'])
+
+    # Make sure each target's transitive headers contain those
+    # of their dependencies. If not, add them and continue doing
+    # so until we get a full pass over all targets without any updates.
+    closure_changed = True
+    while closure_changed:
+        closure_changed = False
+        for target in js:
+            target_name = target['name']
+            for dep in target['deps']:
+                headers = target_headers_transitive[target_name]
+                old_count = len(headers)
+                headers.update(target_headers_transitive[dep])
+                if old_count != len(headers):
+                    closure_changed = True
+    return target_headers_transitive
+
+
+# precompute transitive closure of headers provided by each target
+target_headers_transitive = get_headers_transitive()
+
 
 def target_has_header(target, name):
-  if name.startswith('absl/'): return True
-  # print target['name'], name
-  if name in target['headers']:
-    return True
-  for dep in target['deps']:
-    if target_has_header(get_target(dep), name):
-      return True
-  if name in ['src/core/lib/profiling/stap_probes.h',
-              'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h']:
-    return True
-  return False
+    if name.startswith('absl/'): return True
+    # print target['name'], name
+    if name in target['headers']:
+        return True
+    for dep in target['deps']:
+        if target_has_header(get_target(dep), name):
+            return True
+    if name in [
+            'src/core/lib/profiling/stap_probes.h',
+            'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h'
+    ]:
+        return True
+    return False
+
 
 def produces_object(name):
-  return os.path.splitext(name)[1] in ['.c', '.cc']
+    return os.path.splitext(name)[1] in ['.c', '.cc']
+
 
 c_ish = {}
 obj_producer_to_source = {'c': c_ish, 'c++': c_ish, 'csharp': {}}
 
 errors = 0
 for target in js:
-  if not target['third_party']:
-    for fn in target['src']:
-      with open(os.path.join(root, fn)) as f:
-        src = f.read().splitlines()
-      for line in src:
-        m = re_inc1.match(line)
-        if m:
-          if not target_has_header(target, m.group(1)):
-            print (
-              'target %s (%s) does not name header %s as a dependency' % (
-                target['name'], fn, m.group(1)))
-            errors += 1
-        m = re_inc2.match(line)
-        if m:
-          if not target_has_header(target, 'include/' + m.group(1)):
-            print (
-              'target %s (%s) does not name header %s as a dependency' % (
-                target['name'], fn, m.group(1)))
-            errors += 1
-  if target['type'] in ['lib', 'filegroup']:
-    for fn in target['src']:
-      language = target['language']
-      if produces_object(fn):
-        obj_base = os.path.splitext(os.path.basename(fn))[0]
-        if obj_base in obj_producer_to_source[language]:
-          if obj_producer_to_source[language][obj_base] != fn:
-            print (
-              'target %s (%s) produces an aliased object file with %s' % (
-                target['name'], fn, obj_producer_to_source[language][obj_base]))
-        else:
-          obj_producer_to_source[language][obj_base] = fn
+    if not target['third_party']:
+        for fn in target['src']:
+            with open(os.path.join(root, fn)) as f:
+                src = f.read().splitlines()
+            for line in src:
+                m = re_inc1.match(line)
+                if m:
+                    if not target_has_header(target, m.group(1)):
+                        print(
+                            'target %s (%s) does not name header %s as a dependency'
+                            % (target['name'], fn, m.group(1)))
+                        errors += 1
+                m = re_inc2.match(line)
+                if m:
+                    if not target_has_header(target, 'include/' + m.group(1)):
+                        print(
+                            'target %s (%s) does not name header %s as a dependency'
+                            % (target['name'], fn, m.group(1)))
+                        errors += 1
+    if target['type'] in ['lib', 'filegroup']:
+        for fn in target['src']:
+            language = target['language']
+            if produces_object(fn):
+                obj_base = os.path.splitext(os.path.basename(fn))[0]
+                if obj_base in obj_producer_to_source[language]:
+                    if obj_producer_to_source[language][obj_base] != fn:
+                        print(
+                            'target %s (%s) produces an aliased object file with %s'
+                            % (target['name'], fn,
+                               obj_producer_to_source[language][obj_base]))
+                else:
+                    obj_producer_to_source[language][obj_base] = fn
 
 assert errors == 0
diff --git a/tools/run_tests/sanity/check_submodules.sh b/tools/run_tests/sanity/check_submodules.sh
index b573d21..ca604c6 100755
--- a/tools/run_tests/sanity/check_submodules.sh
+++ b/tools/run_tests/sanity/check_submodules.sh
@@ -19,13 +19,13 @@
 
 export TEST=true
 
-cd `dirname $0`/../../..
+cd "$(dirname "$0")/../../.."
 
-submodules=`mktemp /tmp/submXXXXXX`
-want_submodules=`mktemp /tmp/submXXXXXX`
+submodules=$(mktemp /tmp/submXXXXXX)
+want_submodules=$(mktemp /tmp/submXXXXXX)
 
-git submodule | awk '{ print $1 }' | sort > $submodules
-cat << EOF | awk '{ print $1 }' | sort > $want_submodules
+git submodule | awk '{ print $1 }' | sort > "$submodules"
+cat << EOF | awk '{ print $1 }' | sort > "$want_submodules"
  5b7683f49e1e9223cf9927b24f6fd3d6bd82e3f8 third_party/benchmark (v1.2.0)
  be2ee342d3781ddb954f91f8a7e660c6f59e87e5 third_party/boringssl (heads/chromium-stable)
  886e7d75368e3f4fab3f4d0d3584e4abfc557755 third_party/boringssl-with-bazel (version_for_cocoapods_7.0-857-g886e7d7)
@@ -38,6 +38,6 @@
  cc4bed2d74f7c8717e31f9579214ab52a9c9c610 third_party/abseil-cpp
 EOF
 
-diff -u $submodules $want_submodules
+diff -u "$submodules" "$want_submodules"
 
-rm $submodules $want_submodules
+rm "$submodules" "$want_submodules"
diff --git a/tools/run_tests/sanity/check_test_filtering.py b/tools/run_tests/sanity/check_test_filtering.py
index ff4ecba..ebbb1a9 100755
--- a/tools/run_tests/sanity/check_test_filtering.py
+++ b/tools/run_tests/sanity/check_test_filtering.py
@@ -14,7 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
 import os
 import sys
 import unittest
@@ -25,108 +24,139 @@
 from run_tests_matrix import _create_test_jobs, _create_portability_test_jobs
 import python_utils.filter_pull_request_tests as filter_pull_request_tests
 
-_LIST_OF_LANGUAGE_LABELS = ['c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby']
+_LIST_OF_LANGUAGE_LABELS = [
+    'c', 'c++', 'csharp', 'grpc-node', 'objc', 'php', 'php7', 'python', 'ruby'
+]
 _LIST_OF_PLATFORM_LABELS = ['linux', 'macos', 'windows']
 
+
 class TestFilteringTest(unittest.TestCase):
 
-  def generate_all_tests(self):
-    all_jobs = _create_test_jobs() + _create_portability_test_jobs()
-    self.assertIsNotNone(all_jobs)
-    return all_jobs
+    def generate_all_tests(self):
+        all_jobs = _create_test_jobs() + _create_portability_test_jobs()
+        self.assertIsNotNone(all_jobs)
+        return all_jobs
 
-  def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
-    """
+    def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS):
+        """
     Default args should filter no tests because changed_files is empty and
     default labels should be able to match all jobs
     :param changed_files: mock list of changed_files from pull request
     :param labels: list of job labels that should be skipped
     """
-    all_jobs = self.generate_all_tests()
-    # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
-    def _get_changed_files(foo):
-      return changed_files
-    filter_pull_request_tests._get_changed_files = _get_changed_files
-    print()
-    filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
+        all_jobs = self.generate_all_tests()
 
-    # Make sure sanity tests aren't being filtered out
-    sanity_tests_in_all_jobs = 0
-    sanity_tests_in_filtered_jobs = 0
-    for job in all_jobs:
-      if "sanity" in job.labels:
-        sanity_tests_in_all_jobs += 1
-    all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
-    for job in filtered_jobs:
-      if "sanity" in job.labels:
-        sanity_tests_in_filtered_jobs += 1
-    filtered_jobs = [job for job in filtered_jobs if "sanity" not in job.labels]
-    self.assertEquals(sanity_tests_in_all_jobs, sanity_tests_in_filtered_jobs)
+        # Replacing _get_changed_files function to allow specifying changed files in filter_tests function
+        def _get_changed_files(foo):
+            return changed_files
 
-    for label in labels:
-      for job in filtered_jobs:
-        self.assertNotIn(label, job.labels)
+        filter_pull_request_tests._get_changed_files = _get_changed_files
+        print()
+        filtered_jobs = filter_pull_request_tests.filter_tests(all_jobs, "test")
 
-    jobs_matching_labels = 0
-    for label in labels:
-      for job in all_jobs:
-        if (label in job.labels):
-          jobs_matching_labels += 1
-    self.assertEquals(len(filtered_jobs), len(all_jobs) - jobs_matching_labels)
+        # Make sure sanity tests aren't being filtered out
+        sanity_tests_in_all_jobs = 0
+        sanity_tests_in_filtered_jobs = 0
+        for job in all_jobs:
+            if "sanity" in job.labels:
+                sanity_tests_in_all_jobs += 1
+        all_jobs = [job for job in all_jobs if "sanity" not in job.labels]
+        for job in filtered_jobs:
+            if "sanity" in job.labels:
+                sanity_tests_in_filtered_jobs += 1
+        filtered_jobs = [
+            job for job in filtered_jobs if "sanity" not in job.labels
+        ]
+        self.assertEquals(sanity_tests_in_all_jobs,
+                          sanity_tests_in_filtered_jobs)
 
-  def test_individual_language_filters(self):
-    # Changing unlisted file should trigger all languages
-    self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
-    # Changing core should trigger all tests
-    self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
-    # Testing individual languages
-    self.test_filtering(['test/core/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                filter_pull_request_tests._CORE_TEST_SUITE.labels +
-                                                filter_pull_request_tests._CPP_TEST_SUITE.labels])
-    self.test_filtering(['src/cpp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                              filter_pull_request_tests._CPP_TEST_SUITE.labels])
-    self.test_filtering(['src/csharp/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                 filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
-    self.test_filtering(['src/objective-c/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                      filter_pull_request_tests._OBJC_TEST_SUITE.labels])
-    self.test_filtering(['src/php/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                              filter_pull_request_tests._PHP_TEST_SUITE.labels])
-    self.test_filtering(['src/python/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                                 filter_pull_request_tests._PYTHON_TEST_SUITE.labels])
-    self.test_filtering(['src/ruby/foo.bar'], [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                                               filter_pull_request_tests._RUBY_TEST_SUITE.labels])
+        for label in labels:
+            for job in filtered_jobs:
+                self.assertNotIn(label, job.labels)
 
-  def test_combined_language_filters(self):
-    self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'],
-                        [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                         filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._CORE_TEST_SUITE.labels])
-    self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"],
-                        [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                         filter_pull_request_tests._CPP_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._CSHARP_TEST_SUITE.labels])
-    self.test_filtering(['src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar", "src/ruby/foo.bar"],
-                        [label for label in _LIST_OF_LANGUAGE_LABELS if label not in
-                         filter_pull_request_tests._OBJC_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._PHP_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._PYTHON_TEST_SUITE.labels and label not in
-                         filter_pull_request_tests._RUBY_TEST_SUITE.labels])
+        jobs_matching_labels = 0
+        for label in labels:
+            for job in all_jobs:
+                if (label in job.labels):
+                    jobs_matching_labels += 1
+        self.assertEquals(
+            len(filtered_jobs),
+            len(all_jobs) - jobs_matching_labels)
 
-  def test_platform_filter(self):
-    self.test_filtering(['vsprojects/foo.bar'], [label for label in _LIST_OF_PLATFORM_LABELS if label not in
-                                                 filter_pull_request_tests._WINDOWS_TEST_SUITE.labels])
+    def test_individual_language_filters(self):
+        # Changing unlisted file should trigger all languages
+        self.test_filtering(['ffffoo/bar.baz'], [_LIST_OF_LANGUAGE_LABELS])
+        # Changing core should trigger all tests
+        self.test_filtering(['src/core/foo.bar'], [_LIST_OF_LANGUAGE_LABELS])
+        # Testing individual languages
+        self.test_filtering(['test/core/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CORE_TEST_SUITE.labels +
+            filter_pull_request_tests._CPP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/cpp/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/csharp/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/objective-c/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/php/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/python/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/ruby/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+        ])
 
-  def test_whitelist(self):
-    whitelist = filter_pull_request_tests._WHITELIST_DICT
-    files_that_should_trigger_all_tests = ['src/core/foo.bar',
-                                           'some_file_not_on_the_white_list',
-                                           'BUILD',
-                                           'etc/roots.pem',
-                                           'Makefile',
-                                           'tools/foo']
-    for key in whitelist.keys():
-      for file_name in files_that_should_trigger_all_tests:
-        self.assertFalse(re.match(key, file_name))
+    def test_combined_language_filters(self):
+        self.test_filtering(['src/cpp/foo.bar', 'test/core/foo.bar'], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+            label not in filter_pull_request_tests._CORE_TEST_SUITE.labels
+        ])
+        self.test_filtering(['src/cpp/foo.bar', "src/csharp/foo.bar"], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._CPP_TEST_SUITE.labels and
+            label not in filter_pull_request_tests._CSHARP_TEST_SUITE.labels
+        ])
+        self.test_filtering([
+            'src/objective-c/foo.bar', 'src/php/foo.bar', "src/python/foo.bar",
+            "src/ruby/foo.bar"
+        ], [
+            label for label in _LIST_OF_LANGUAGE_LABELS
+            if label not in filter_pull_request_tests._OBJC_TEST_SUITE.labels
+            and label not in filter_pull_request_tests._PHP_TEST_SUITE.labels
+            and label not in filter_pull_request_tests._PYTHON_TEST_SUITE.labels
+            and label not in filter_pull_request_tests._RUBY_TEST_SUITE.labels
+        ])
+
+    def test_platform_filter(self):
+        self.test_filtering(['vsprojects/foo.bar'], [
+            label for label in _LIST_OF_PLATFORM_LABELS
+            if label not in filter_pull_request_tests._WINDOWS_TEST_SUITE.labels
+        ])
+
+    def test_whitelist(self):
+        whitelist = filter_pull_request_tests._WHITELIST_DICT
+        files_that_should_trigger_all_tests = [
+            'src/core/foo.bar', 'some_file_not_on_the_white_list', 'BUILD',
+            'etc/roots.pem', 'Makefile', 'tools/foo'
+        ]
+        for key in whitelist.keys():
+            for file_name in files_that_should_trigger_all_tests:
+                self.assertFalse(re.match(key, file_name))
+
 
 if __name__ == '__main__':
-  unittest.main(verbosity=2)
+    unittest.main(verbosity=2)
diff --git a/tools/run_tests/sanity/check_tracer_sanity.py b/tools/run_tests/sanity/check_tracer_sanity.py
index 997ec79..c4c7653 100755
--- a/tools/run_tests/sanity/check_tracer_sanity.py
+++ b/tools/run_tests/sanity/check_tracer_sanity.py
@@ -26,21 +26,22 @@
 tracers = []
 pattern = re.compile("GRPC_TRACER_INITIALIZER\((true|false), \"(.*)\"\)")
 for root, dirs, files in os.walk('src/core'):
-  for filename in files:
-    path = os.path.join(root, filename)
-    if os.path.splitext(path)[1] != '.c': continue
-    with open(path) as f:
-      text = f.read()
-    for o in pattern.findall(text):
-      tracers.append(o[1])
+    for filename in files:
+        path = os.path.join(root, filename)
+        if os.path.splitext(path)[1] != '.c': continue
+        with open(path) as f:
+            text = f.read()
+        for o in pattern.findall(text):
+            tracers.append(o[1])
 
 with open('doc/environment_variables.md') as f:
- text = f.read()
+    text = f.read()
 
 for t in tracers:
     if t not in text:
-        print("ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md" % t)
+        print(
+            "ERROR: tracer \"%s\" is not mentioned in doc/environment_variables.md"
+            % t)
         errors += 1
 
-
 assert errors == 0
diff --git a/tools/run_tests/sanity/check_unsecure.sh b/tools/run_tests/sanity/check_unsecure.sh
index 8584cbe..cca1235 100755
--- a/tools/run_tests/sanity/check_unsecure.sh
+++ b/tools/run_tests/sanity/check_unsecure.sh
@@ -18,10 +18,10 @@
 # Make sure that there is no path from known unsecure libraries and targets
 # to an SSL library. Any failure among these will make the script fail.
 
-test `bazel query 'somepath("//:grpc_unsecure", "//external:libssl")' 2>/dev/null | wc -l` -eq 0 || exit 1
-test `bazel query 'somepath("//:grpc++_unsecure", "//external:libssl")' 2>/dev/null | wc -l` -eq 0 || exit 1
-test `bazel query 'somepath("//:grpc++_codegen_proto", "//external:libssl")' 2>/dev/null | wc -l` -eq 0 || exit 1
-test `bazel query 'somepath("//test/cpp/microbenchmarks:helpers", "//external:libssl")' 2>/dev/null | wc -l` -eq 0 || exit 1
+test "$(bazel query 'somepath("//:grpc_unsecure", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+test "$(bazel query 'somepath("//:grpc++_unsecure", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+test "$(bazel query 'somepath("//:grpc++_codegen_proto", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
+test "$(bazel query 'somepath("//test/cpp/microbenchmarks:helpers", "//external:libssl")' 2>/dev/null | wc -l)" -eq 0 || exit 1
 
 exit 0
 
diff --git a/tools/run_tests/sanity/check_version.py b/tools/run_tests/sanity/check_version.py
index b9b6bab..6154b26 100755
--- a/tools/run_tests/sanity/check_version.py
+++ b/tools/run_tests/sanity/check_version.py
@@ -31,56 +31,56 @@
 from expand_version import Version
 
 try:
-  branch_name = subprocess.check_output(
-    'git rev-parse --abbrev-ref HEAD',
-    shell=True)
+    branch_name = subprocess.check_output(
+        'git rev-parse --abbrev-ref HEAD', shell=True)
 except:
-  print('WARNING: not a git repository')
-  branch_name = None
+    print('WARNING: not a git repository')
+    branch_name = None
 
 if branch_name is not None:
-  m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
-  if m:
-    print('RELEASE branch')
-    # version number should align with the branched version
-    check_version = lambda version: (
-      version.major == int(m.group(1)) and
-      version.minor == int(m.group(2)))
-    warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (m.group(1), m.group(2))
-  elif re.match(r'^debian/.*$', branch_name):
-    # no additional version checks for debian branches
-    check_version = lambda version: True
-  else:
-    # all other branches should have a -dev tag
-    check_version = lambda version: version.tag == 'dev'
-    warning = 'Version key "%s" value "%s" should have a -dev tag'
+    m = re.match(r'^release-([0-9]+)_([0-9]+)$', branch_name)
+    if m:
+        print('RELEASE branch')
+        # version number should align with the branched version
+        check_version = lambda version: (
+          version.major == int(m.group(1)) and
+          version.minor == int(m.group(2)))
+        warning = 'Version key "%%s" value "%%s" should have a major version %s and minor version %s' % (
+            m.group(1), m.group(2))
+    elif re.match(r'^debian/.*$', branch_name):
+        # no additional version checks for debian branches
+        check_version = lambda version: True
+    else:
+        # all other branches should have a -dev tag
+        check_version = lambda version: version.tag == 'dev'
+        warning = 'Version key "%s" value "%s" should have a -dev tag'
 else:
-  check_version = lambda version: True
+    check_version = lambda version: True
 
 with open('build.yaml', 'r') as f:
-  build_yaml = yaml.load(f.read())
+    build_yaml = yaml.load(f.read())
 
 settings = build_yaml['settings']
 
 top_version = Version(settings['version'])
 if not check_version(top_version):
-  errors += 1
-  print(warning % ('version', top_version))
+    errors += 1
+    print(warning % ('version', top_version))
 
 for tag, value in settings.iteritems():
-  if re.match(r'^[a-z]+_version$', tag):
-    value = Version(value)
-    if tag != 'core_version':
-      if value.major != top_version.major:
-        errors += 1
-        print('major version mismatch on %s: %d vs %d' % (tag, value.major,
-                                                          top_version.major))
-      if value.minor != top_version.minor:
-        errors += 1
-        print('minor version mismatch on %s: %d vs %d' % (tag, value.minor,
-                                                          top_version.minor))
-    if not check_version(value):
-      errors += 1
-      print(warning % (tag, value))
+    if re.match(r'^[a-z]+_version$', tag):
+        value = Version(value)
+        if tag != 'core_version':
+            if value.major != top_version.major:
+                errors += 1
+                print('major version mismatch on %s: %d vs %d' %
+                      (tag, value.major, top_version.major))
+            if value.minor != top_version.minor:
+                errors += 1
+                print('minor version mismatch on %s: %d vs %d' %
+                      (tag, value.minor, top_version.minor))
+        if not check_version(value):
+            errors += 1
+            print(warning % (tag, value))
 
 sys.exit(errors)
diff --git a/tools/run_tests/sanity/core_banned_functions.py b/tools/run_tests/sanity/core_banned_functions.py
index 1f13905..9ee2896 100755
--- a/tools/run_tests/sanity/core_banned_functions.py
+++ b/tools/run_tests/sanity/core_banned_functions.py
@@ -36,26 +36,28 @@
     'grpc_wsa_error(': ['src/core/lib/iomgr/error.c'],
     'grpc_log_if_error(': ['src/core/lib/iomgr/error.c'],
     'grpc_slice_malloc(': ['src/core/lib/slice/slice.c'],
-    'grpc_closure_create(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_init(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_sched(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_run(' : ['src/core/lib/iomgr/closure.c'],
-    'grpc_closure_list_sched(' : ['src/core/lib/iomgr/closure.c'],
-    'gpr_getenv_silent(' : ['src/core/lib/support/log.c', 'src/core/lib/support/env_linux.c', 
-                            'src/core/lib/support/env_posix.c', 'src/core/lib/support/env_windows.c'],
+    'grpc_closure_create(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_init(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_sched(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_run(': ['src/core/lib/iomgr/closure.c'],
+    'grpc_closure_list_sched(': ['src/core/lib/iomgr/closure.c'],
+    'gpr_getenv_silent(': [
+        'src/core/lib/support/log.c', 'src/core/lib/support/env_linux.c',
+        'src/core/lib/support/env_posix.c', 'src/core/lib/support/env_windows.c'
+    ],
 }
 
 errors = 0
 for root, dirs, files in os.walk('src/core'):
-  for filename in files:
-    path = os.path.join(root, filename)
-    if os.path.splitext(path)[1] != '.c': continue
-    with open(path) as f:
-      text = f.read()
-    for banned, exceptions in BANNED_EXCEPT.items():
-      if path in exceptions: continue
-      if banned in text:
-        print('Illegal use of "%s" in %s' % (banned, path))
-        errors += 1
+    for filename in files:
+        path = os.path.join(root, filename)
+        if os.path.splitext(path)[1] != '.c': continue
+        with open(path) as f:
+            text = f.read()
+        for banned, exceptions in BANNED_EXCEPT.items():
+            if path in exceptions: continue
+            if banned in text:
+                print('Illegal use of "%s" in %s' % (banned, path))
+                errors += 1
 
 assert errors == 0
diff --git a/tools/run_tests/sanity/core_untyped_structs.sh b/tools/run_tests/sanity/core_untyped_structs.sh
index 792dd68..af31909 100755
--- a/tools/run_tests/sanity/core_untyped_structs.sh
+++ b/tools/run_tests/sanity/core_untyped_structs.sh
@@ -15,7 +15,7 @@
 
 set -e
 
-cd `dirname $0`/../../..
+cd "$(dirname "$0")/../../.."
 
 #
 # Make sure that all core struct/unions have a name or are typedef'ed
diff --git a/tools/run_tests/sanity/sanity_tests.yaml b/tools/run_tests/sanity/sanity_tests.yaml
index 3ce864a..dab991a 100644
--- a/tools/run_tests/sanity/sanity_tests.yaml
+++ b/tools/run_tests/sanity/sanity_tests.yaml
@@ -3,6 +3,7 @@
 - script: tools/run_tests/sanity/check_cache_mk.sh
 - script: tools/run_tests/sanity/check_owners.sh
 - script: tools/run_tests/sanity/check_sources_and_headers.py
+- script: tools/run_tests/sanity/check_shellcheck.sh
 - script: tools/run_tests/sanity/check_submodules.sh
 - script: tools/run_tests/sanity/check_test_filtering.py
 - script: tools/run_tests/sanity/check_tracer_sanity.py
diff --git a/tools/run_tests/start_port_server.py b/tools/run_tests/start_port_server.py
index 3628750..0eeceb4 100755
--- a/tools/run_tests/start_port_server.py
+++ b/tools/run_tests/start_port_server.py
@@ -13,7 +13,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """
 Wrapper around port server starting code.
 
diff --git a/tools/run_tests/task_runner.py b/tools/run_tests/task_runner.py
index a065bb8..794db6e 100755
--- a/tools/run_tests/task_runner.py
+++ b/tools/run_tests/task_runner.py
@@ -12,7 +12,6 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-
 """Runs selected gRPC test/build tasks."""
 
 from __future__ import print_function
@@ -32,52 +31,54 @@
 _TARGETS += distribtest_targets.targets()
 _TARGETS += package_targets.targets()
 
+
 def _create_build_map():
-  """Maps task names and labels to list of tasks to be built."""
-  target_build_map = dict([(target.name, [target])
-                           for target in _TARGETS])
-  if len(_TARGETS) > len(target_build_map.keys()):
-    raise Exception('Target names need to be unique')
+    """Maps task names and labels to list of tasks to be built."""
+    target_build_map = dict([(target.name, [target]) for target in _TARGETS])
+    if len(_TARGETS) > len(target_build_map.keys()):
+        raise Exception('Target names need to be unique')
 
-  label_build_map = {}
-  label_build_map['all'] = [t for t in _TARGETS]  # to build all targets
-  for target in _TARGETS:
-    for label in target.labels:
-      if label in label_build_map:
-        label_build_map[label].append(target)
-      else:
-        label_build_map[label] = [target]
+    label_build_map = {}
+    label_build_map['all'] = [t for t in _TARGETS]  # to build all targets
+    for target in _TARGETS:
+        for label in target.labels:
+            if label in label_build_map:
+                label_build_map[label].append(target)
+            else:
+                label_build_map[label] = [target]
 
-  if set(target_build_map.keys()).intersection(label_build_map.keys()):
-    raise Exception('Target names need to be distinct from label names')
-  return dict( target_build_map.items() + label_build_map.items())
+    if set(target_build_map.keys()).intersection(label_build_map.keys()):
+        raise Exception('Target names need to be distinct from label names')
+    return dict(target_build_map.items() + label_build_map.items())
 
 
 _BUILD_MAP = _create_build_map()
 
 argp = argparse.ArgumentParser(description='Runs build/test targets.')
-argp.add_argument('-b', '--build',
-                  choices=sorted(_BUILD_MAP.keys()),
-                  nargs='+',
-                  default=['all'],
-                  help='Target name or target label to build.')
-argp.add_argument('-f', '--filter',
-                  choices=sorted(_BUILD_MAP.keys()),
-                  nargs='+',
-                  default=[],
-                  help='Filter targets to build with AND semantics.')
+argp.add_argument(
+    '-b',
+    '--build',
+    choices=sorted(_BUILD_MAP.keys()),
+    nargs='+',
+    default=['all'],
+    help='Target name or target label to build.')
+argp.add_argument(
+    '-f',
+    '--filter',
+    choices=sorted(_BUILD_MAP.keys()),
+    nargs='+',
+    default=[],
+    help='Filter targets to build with AND semantics.')
 argp.add_argument('-j', '--jobs', default=multiprocessing.cpu_count(), type=int)
-argp.add_argument('-t', '--travis',
-                  default=False,
-                  action='store_const',
-                  const=True)
+argp.add_argument(
+    '-t', '--travis', default=False, action='store_const', const=True)
 
 args = argp.parse_args()
 
 # Figure out which targets to build
 targets = []
 for label in args.build:
-  targets += _BUILD_MAP[label]
+    targets += _BUILD_MAP[label]
 
 # Among targets selected by -b, filter out those that don't match the filter
 targets = [t for t in targets if all(f in t.labels for f in args.filter)]
@@ -86,30 +87,29 @@
 # Execute pre-build phase
 prebuild_jobs = []
 for target in targets:
-  prebuild_jobs += target.pre_build_jobspecs()
+    prebuild_jobs += target.pre_build_jobspecs()
 if prebuild_jobs:
-  num_failures, _ = jobset.run(
-    prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
-  if num_failures != 0:
-    jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
-    sys.exit(1)
+    num_failures, _ = jobset.run(
+        prebuild_jobs, newline_on_success=True, maxjobs=args.jobs)
+    if num_failures != 0:
+        jobset.message('FAILED', 'Pre-build phase failed.', do_newline=True)
+        sys.exit(1)
 
 build_jobs = []
 for target in targets:
-  build_jobs.append(target.build_jobspec())
+    build_jobs.append(target.build_jobspec())
 if not build_jobs:
-  print('Nothing to build.')
-  sys.exit(1)
+    print('Nothing to build.')
+    sys.exit(1)
 
 jobset.message('START', 'Building targets.', do_newline=True)
 num_failures, resultset = jobset.run(
     build_jobs, newline_on_success=True, maxjobs=args.jobs)
-report_utils.render_junit_xml_report(resultset, 'report_taskrunner_sponge_log.xml',
-                                     suite_name='tasks')
+report_utils.render_junit_xml_report(
+    resultset, 'report_taskrunner_sponge_log.xml', suite_name='tasks')
 if num_failures == 0:
-  jobset.message('SUCCESS', 'All targets built successfully.',
-                 do_newline=True)
+    jobset.message(
+        'SUCCESS', 'All targets built successfully.', do_newline=True)
 else:
-  jobset.message('FAILED', 'Failed to build targets.',
-                 do_newline=True)
-  sys.exit(1)
+    jobset.message('FAILED', 'Failed to build targets.', do_newline=True)
+    sys.exit(1)