Merge branch 'master' of https://github.com/grpc/grpc into bazel-take-2
diff --git a/examples/python/helloworld/greeter_client.py b/examples/python/helloworld/greeter_client.py
index 44d42c1..281a68f 100644
--- a/examples/python/helloworld/greeter_client.py
+++ b/examples/python/helloworld/greeter_client.py
@@ -34,11 +34,12 @@
 import grpc
 
 import helloworld_pb2
+import helloworld_pb2_grpc
 
 
 def run():
   channel = grpc.insecure_channel('localhost:50051')
-  stub = helloworld_pb2.GreeterStub(channel)
+  stub = helloworld_pb2_grpc.GreeterStub(channel)
   response = stub.SayHello(helloworld_pb2.HelloRequest(name='you'))
   print("Greeter client received: " + response.message)
 
diff --git a/examples/python/helloworld/greeter_server.py b/examples/python/helloworld/greeter_server.py
index 37d8bd4..0afc21d 100644
--- a/examples/python/helloworld/greeter_server.py
+++ b/examples/python/helloworld/greeter_server.py
@@ -35,11 +35,12 @@
 import grpc
 
 import helloworld_pb2
+import helloworld_pb2_grpc
 
 _ONE_DAY_IN_SECONDS = 60 * 60 * 24
 
 
-class Greeter(helloworld_pb2.GreeterServicer):
+class Greeter(helloworld_pb2_grpc.GreeterServicer):
 
   def SayHello(self, request, context):
     return helloworld_pb2.HelloReply(message='Hello, %s!' % request.name)
@@ -47,7 +48,7 @@
 
 def serve():
   server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  helloworld_pb2.add_GreeterServicer_to_server(Greeter(), server)
+  helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
   server.add_insecure_port('[::]:50051')
   server.start()
   try:
diff --git a/examples/python/helloworld/helloworld_pb2.py b/examples/python/helloworld/helloworld_pb2.py
index 3ce33fb..6665b1f 100644
--- a/examples/python/helloworld/helloworld_pb2.py
+++ b/examples/python/helloworld/helloworld_pb2.py
@@ -107,98 +107,123 @@
 
 DESCRIPTOR.has_options = True
 DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
-import grpc
-from grpc.beta import implementations as beta_implementations
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.common import cardinality
-from grpc.framework.interfaces.face import utilities as face_utilities
+try:
+  # THESE ELEMENTS WILL BE DEPRECATED.
+  # Please use the generated *_pb2_grpc.py files instead.
+  import grpc
+  from grpc.framework.common import cardinality
+  from grpc.framework.interfaces.face import utilities as face_utilities
+  from grpc.beta import implementations as beta_implementations
+  from grpc.beta import interfaces as beta_interfaces
 
 
-class GreeterStub(object):
-  """The greeting service definition.
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
+  class GreeterStub(object):
+    """The greeting service definition.
     """
-    self.SayHello = channel.unary_unary(
-        '/helloworld.Greeter/SayHello',
-        request_serializer=HelloRequest.SerializeToString,
-        response_deserializer=HelloReply.FromString,
-        )
+
+    def __init__(self, channel):
+      """Constructor.
+
+      Args:
+        channel: A grpc.Channel.
+      """
+      self.SayHello = channel.unary_unary(
+          '/helloworld.Greeter/SayHello',
+          request_serializer=HelloRequest.SerializeToString,
+          response_deserializer=HelloReply.FromString,
+          )
 
 
-class GreeterServicer(object):
-  """The greeting service definition.
-  """
-
-  def SayHello(self, request, context):
-    """Sends a greeting
+  class GreeterServicer(object):
+    """The greeting service definition.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+
+    def SayHello(self, request, context):
+      """Sends a greeting
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
 
 
-def add_GreeterServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'SayHello': grpc.unary_unary_rpc_method_handler(
-          servicer.SayHello,
-          request_deserializer=HelloRequest.FromString,
-          response_serializer=HelloReply.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'helloworld.Greeter', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
+  def add_GreeterServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+        'SayHello': grpc.unary_unary_rpc_method_handler(
+            servicer.SayHello,
+            request_deserializer=HelloRequest.FromString,
+            response_serializer=HelloReply.SerializeToString,
+        ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+        'helloworld.Greeter', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
 
 
-class BetaGreeterServicer(object):
-  """The greeting service definition.
-  """
-  def SayHello(self, request, context):
-    """Sends a greeting
+  class BetaGreeterServicer(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """The greeting service definition.
     """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def SayHello(self, request, context):
+      """Sends a greeting
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
 
 
-class BetaGreeterStub(object):
-  """The greeting service definition.
-  """
-  def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
-    """Sends a greeting
+  class BetaGreeterStub(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """The greeting service definition.
     """
-    raise NotImplementedError()
-  SayHello.future = None
+    def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
+      """Sends a greeting
+      """
+      raise NotImplementedError()
+    SayHello.future = None
 
 
-def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
-  request_deserializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,
-  }
-  response_serializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,
-  }
-  method_implementations = {
-    ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
-  }
-  server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
-  return beta_implementations.server(method_implementations, options=server_options)
+  def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_deserializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,
+    }
+    response_serializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,
+    }
+    method_implementations = {
+      ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
+    }
+    server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+    return beta_implementations.server(method_implementations, options=server_options)
 
 
-def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
-  request_serializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,
-  }
-  response_deserializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloReply.FromString,
-  }
-  cardinalities = {
-    'SayHello': cardinality.Cardinality.UNARY_UNARY,
-  }
-  stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
-  return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
+  def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_serializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,
+    }
+    response_deserializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloReply.FromString,
+    }
+    cardinalities = {
+      'SayHello': cardinality.Cardinality.UNARY_UNARY,
+    }
+    stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+    return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
+except ImportError:
+  pass
 # @@protoc_insertion_point(module_scope)
diff --git a/examples/python/helloworld/helloworld_pb2_grpc.py b/examples/python/helloworld/helloworld_pb2_grpc.py
new file mode 100644
index 0000000..682dc36
--- /dev/null
+++ b/examples/python/helloworld/helloworld_pb2_grpc.py
@@ -0,0 +1,47 @@
+import grpc
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+import helloworld_pb2 as helloworld__pb2
+
+
+class GreeterStub(object):
+  """The greeting service definition.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.SayHello = channel.unary_unary(
+        '/helloworld.Greeter/SayHello',
+        request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
+        response_deserializer=helloworld__pb2.HelloReply.FromString,
+        )
+
+
+class GreeterServicer(object):
+  """The greeting service definition.
+  """
+
+  def SayHello(self, request, context):
+    """Sends a greeting
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_GreeterServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'SayHello': grpc.unary_unary_rpc_method_handler(
+          servicer.SayHello,
+          request_deserializer=helloworld__pb2.HelloRequest.FromString,
+          response_serializer=helloworld__pb2.HelloReply.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'helloworld.Greeter', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/examples/python/multiplex/helloworld_pb2.py b/examples/python/multiplex/helloworld_pb2.py
index 3ce33fb..6665b1f 100644
--- a/examples/python/multiplex/helloworld_pb2.py
+++ b/examples/python/multiplex/helloworld_pb2.py
@@ -107,98 +107,123 @@
 
 DESCRIPTOR.has_options = True
 DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
-import grpc
-from grpc.beta import implementations as beta_implementations
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.common import cardinality
-from grpc.framework.interfaces.face import utilities as face_utilities
+try:
+  # THESE ELEMENTS WILL BE DEPRECATED.
+  # Please use the generated *_pb2_grpc.py files instead.
+  import grpc
+  from grpc.framework.common import cardinality
+  from grpc.framework.interfaces.face import utilities as face_utilities
+  from grpc.beta import implementations as beta_implementations
+  from grpc.beta import interfaces as beta_interfaces
 
 
-class GreeterStub(object):
-  """The greeting service definition.
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
+  class GreeterStub(object):
+    """The greeting service definition.
     """
-    self.SayHello = channel.unary_unary(
-        '/helloworld.Greeter/SayHello',
-        request_serializer=HelloRequest.SerializeToString,
-        response_deserializer=HelloReply.FromString,
-        )
+
+    def __init__(self, channel):
+      """Constructor.
+
+      Args:
+        channel: A grpc.Channel.
+      """
+      self.SayHello = channel.unary_unary(
+          '/helloworld.Greeter/SayHello',
+          request_serializer=HelloRequest.SerializeToString,
+          response_deserializer=HelloReply.FromString,
+          )
 
 
-class GreeterServicer(object):
-  """The greeting service definition.
-  """
-
-  def SayHello(self, request, context):
-    """Sends a greeting
+  class GreeterServicer(object):
+    """The greeting service definition.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+
+    def SayHello(self, request, context):
+      """Sends a greeting
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
 
 
-def add_GreeterServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'SayHello': grpc.unary_unary_rpc_method_handler(
-          servicer.SayHello,
-          request_deserializer=HelloRequest.FromString,
-          response_serializer=HelloReply.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'helloworld.Greeter', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
+  def add_GreeterServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+        'SayHello': grpc.unary_unary_rpc_method_handler(
+            servicer.SayHello,
+            request_deserializer=HelloRequest.FromString,
+            response_serializer=HelloReply.SerializeToString,
+        ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+        'helloworld.Greeter', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
 
 
-class BetaGreeterServicer(object):
-  """The greeting service definition.
-  """
-  def SayHello(self, request, context):
-    """Sends a greeting
+  class BetaGreeterServicer(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """The greeting service definition.
     """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def SayHello(self, request, context):
+      """Sends a greeting
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
 
 
-class BetaGreeterStub(object):
-  """The greeting service definition.
-  """
-  def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
-    """Sends a greeting
+  class BetaGreeterStub(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """The greeting service definition.
     """
-    raise NotImplementedError()
-  SayHello.future = None
+    def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
+      """Sends a greeting
+      """
+      raise NotImplementedError()
+    SayHello.future = None
 
 
-def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
-  request_deserializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,
-  }
-  response_serializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,
-  }
-  method_implementations = {
-    ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
-  }
-  server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
-  return beta_implementations.server(method_implementations, options=server_options)
+  def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_deserializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,
+    }
+    response_serializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,
+    }
+    method_implementations = {
+      ('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
+    }
+    server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+    return beta_implementations.server(method_implementations, options=server_options)
 
 
-def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
-  request_serializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,
-  }
-  response_deserializers = {
-    ('helloworld.Greeter', 'SayHello'): HelloReply.FromString,
-  }
-  cardinalities = {
-    'SayHello': cardinality.Cardinality.UNARY_UNARY,
-  }
-  stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
-  return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
+  def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_serializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,
+    }
+    response_deserializers = {
+      ('helloworld.Greeter', 'SayHello'): HelloReply.FromString,
+    }
+    cardinalities = {
+      'SayHello': cardinality.Cardinality.UNARY_UNARY,
+    }
+    stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+    return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
+except ImportError:
+  pass
 # @@protoc_insertion_point(module_scope)
diff --git a/examples/python/multiplex/helloworld_pb2_grpc.py b/examples/python/multiplex/helloworld_pb2_grpc.py
new file mode 100644
index 0000000..682dc36
--- /dev/null
+++ b/examples/python/multiplex/helloworld_pb2_grpc.py
@@ -0,0 +1,47 @@
+import grpc
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+import helloworld_pb2 as helloworld__pb2
+
+
+class GreeterStub(object):
+  """The greeting service definition.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.SayHello = channel.unary_unary(
+        '/helloworld.Greeter/SayHello',
+        request_serializer=helloworld__pb2.HelloRequest.SerializeToString,
+        response_deserializer=helloworld__pb2.HelloReply.FromString,
+        )
+
+
+class GreeterServicer(object):
+  """The greeting service definition.
+  """
+
+  def SayHello(self, request, context):
+    """Sends a greeting
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_GreeterServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'SayHello': grpc.unary_unary_rpc_method_handler(
+          servicer.SayHello,
+          request_deserializer=helloworld__pb2.HelloRequest.FromString,
+          response_serializer=helloworld__pb2.HelloReply.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'helloworld.Greeter', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/examples/python/multiplex/multiplex_client.py b/examples/python/multiplex/multiplex_client.py
index 2e81629..b2d2021 100644
--- a/examples/python/multiplex/multiplex_client.py
+++ b/examples/python/multiplex/multiplex_client.py
@@ -37,7 +37,9 @@
 import grpc
 
 import helloworld_pb2
+import helloworld_pb2_grpc
 import route_guide_pb2
+import route_guide_pb2_grpc
 import route_guide_resources
 
 
@@ -120,8 +122,8 @@
 
 def run():
   channel = grpc.insecure_channel('localhost:50051')
-  greeter_stub = helloworld_pb2.GreeterStub(channel)
-  route_guide_stub = route_guide_pb2.RouteGuideStub(channel)
+  greeter_stub = helloworld_pb2_grpc.GreeterStub(channel)
+  route_guide_stub = route_guide_pb2_grpc.RouteGuideStub(channel)
   greeter_response = greeter_stub.SayHello(
       helloworld_pb2.HelloRequest(name='you'))
   print("Greeter client received: " + greeter_response.message)
diff --git a/examples/python/multiplex/multiplex_server.py b/examples/python/multiplex/multiplex_server.py
index 32a4ee4..b8b32e7 100644
--- a/examples/python/multiplex/multiplex_server.py
+++ b/examples/python/multiplex/multiplex_server.py
@@ -36,7 +36,9 @@
 import grpc
 
 import helloworld_pb2
+import helloworld_pb2_grpc
 import route_guide_pb2
+import route_guide_pb2_grpc
 import route_guide_resources
 
 _ONE_DAY_IN_SECONDS = 60 * 60 * 24
@@ -70,13 +72,13 @@
   return R * c;
 
 
-class _GreeterServicer(helloworld_pb2.GreeterServicer):
+class _GreeterServicer(helloworld_pb2_grpc.GreeterServicer):
 
   def SayHello(self, request, context):
     return helloworld_pb2.HelloReply(message='Hello, {}!'.format(request.name))
 
 
-class _RouteGuideServicer(route_guide_pb2.RouteGuideServicer):
+class _RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
   """Provides methods that implement functionality of route guide server."""
 
   def __init__(self):
@@ -133,8 +135,8 @@
 
 def serve():
   server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  helloworld_pb2.add_GreeterServicer_to_server(_GreeterServicer(), server)
-  route_guide_pb2.add_RouteGuideServicer_to_server(
+  helloworld_pb2_grpc.add_GreeterServicer_to_server(_GreeterServicer(), server)
+  route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
       _RouteGuideServicer(), server)
   server.add_insecure_port('[::]:50051')
   server.start()
diff --git a/examples/python/multiplex/route_guide_pb2.py b/examples/python/multiplex/route_guide_pb2.py
index 924e186..e6775eb 100644
--- a/examples/python/multiplex/route_guide_pb2.py
+++ b/examples/python/multiplex/route_guide_pb2.py
@@ -277,240 +277,265 @@
 
 DESCRIPTOR.has_options = True
 DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG'))
-import grpc
-from grpc.beta import implementations as beta_implementations
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.common import cardinality
-from grpc.framework.interfaces.face import utilities as face_utilities
+try:
+  # THESE ELEMENTS WILL BE DEPRECATED.
+  # Please use the generated *_pb2_grpc.py files instead.
+  import grpc
+  from grpc.framework.common import cardinality
+  from grpc.framework.interfaces.face import utilities as face_utilities
+  from grpc.beta import implementations as beta_implementations
+  from grpc.beta import interfaces as beta_interfaces
 
 
-class RouteGuideStub(object):
-  """Interface exported by the server.
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
+  class RouteGuideStub(object):
+    """Interface exported by the server.
     """
-    self.GetFeature = channel.unary_unary(
-        '/routeguide.RouteGuide/GetFeature',
-        request_serializer=Point.SerializeToString,
-        response_deserializer=Feature.FromString,
-        )
-    self.ListFeatures = channel.unary_stream(
-        '/routeguide.RouteGuide/ListFeatures',
-        request_serializer=Rectangle.SerializeToString,
-        response_deserializer=Feature.FromString,
-        )
-    self.RecordRoute = channel.stream_unary(
-        '/routeguide.RouteGuide/RecordRoute',
-        request_serializer=Point.SerializeToString,
-        response_deserializer=RouteSummary.FromString,
-        )
-    self.RouteChat = channel.stream_stream(
-        '/routeguide.RouteGuide/RouteChat',
-        request_serializer=RouteNote.SerializeToString,
-        response_deserializer=RouteNote.FromString,
-        )
+
+    def __init__(self, channel):
+      """Constructor.
+
+      Args:
+        channel: A grpc.Channel.
+      """
+      self.GetFeature = channel.unary_unary(
+          '/routeguide.RouteGuide/GetFeature',
+          request_serializer=Point.SerializeToString,
+          response_deserializer=Feature.FromString,
+          )
+      self.ListFeatures = channel.unary_stream(
+          '/routeguide.RouteGuide/ListFeatures',
+          request_serializer=Rectangle.SerializeToString,
+          response_deserializer=Feature.FromString,
+          )
+      self.RecordRoute = channel.stream_unary(
+          '/routeguide.RouteGuide/RecordRoute',
+          request_serializer=Point.SerializeToString,
+          response_deserializer=RouteSummary.FromString,
+          )
+      self.RouteChat = channel.stream_stream(
+          '/routeguide.RouteGuide/RouteChat',
+          request_serializer=RouteNote.SerializeToString,
+          response_deserializer=RouteNote.FromString,
+          )
 
 
-class RouteGuideServicer(object):
-  """Interface exported by the server.
-  """
-
-  def GetFeature(self, request, context):
-    """A simple RPC.
-
-    Obtains the feature at a given position.
-
-    A feature with an empty name is returned if there's no feature at the given
-    position.
+  class RouteGuideServicer(object):
+    """Interface exported by the server.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
 
-  def ListFeatures(self, request, context):
-    """A server-to-client streaming RPC.
+    def GetFeature(self, request, context):
+      """A simple RPC.
 
-    Obtains the Features available within the given Rectangle.  Results are
-    streamed rather than returned at once (e.g. in a response message with a
-    repeated field), as the rectangle may cover a large area and contain a
-    huge number of features.
+      Obtains the feature at a given position.
+
+      A feature with an empty name is returned if there's no feature at the given
+      position.
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+    def ListFeatures(self, request, context):
+      """A server-to-client streaming RPC.
+
+      Obtains the Features available within the given Rectangle.  Results are
+      streamed rather than returned at once (e.g. in a response message with a
+      repeated field), as the rectangle may cover a large area and contain a
+      huge number of features.
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+    def RecordRoute(self, request_iterator, context):
+      """A client-to-server streaming RPC.
+
+      Accepts a stream of Points on a route being traversed, returning a
+      RouteSummary when traversal is completed.
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+    def RouteChat(self, request_iterator, context):
+      """A Bidirectional streaming RPC.
+
+      Accepts a stream of RouteNotes sent while a route is being traversed,
+      while receiving other RouteNotes (e.g. from other users).
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+
+  def add_RouteGuideServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+        'GetFeature': grpc.unary_unary_rpc_method_handler(
+            servicer.GetFeature,
+            request_deserializer=Point.FromString,
+            response_serializer=Feature.SerializeToString,
+        ),
+        'ListFeatures': grpc.unary_stream_rpc_method_handler(
+            servicer.ListFeatures,
+            request_deserializer=Rectangle.FromString,
+            response_serializer=Feature.SerializeToString,
+        ),
+        'RecordRoute': grpc.stream_unary_rpc_method_handler(
+            servicer.RecordRoute,
+            request_deserializer=Point.FromString,
+            response_serializer=RouteSummary.SerializeToString,
+        ),
+        'RouteChat': grpc.stream_stream_rpc_method_handler(
+            servicer.RouteChat,
+            request_deserializer=RouteNote.FromString,
+            response_serializer=RouteNote.SerializeToString,
+        ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+        'routeguide.RouteGuide', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+  class BetaRouteGuideServicer(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """Interface exported by the server.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+    def GetFeature(self, request, context):
+      """A simple RPC.
 
-  def RecordRoute(self, request_iterator, context):
-    """A client-to-server streaming RPC.
+      Obtains the feature at a given position.
 
-    Accepts a stream of Points on a route being traversed, returning a
-    RouteSummary when traversal is completed.
+      A feature with an empty name is returned if there's no feature at the given
+      position.
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def ListFeatures(self, request, context):
+      """A server-to-client streaming RPC.
+
+      Obtains the Features available within the given Rectangle.  Results are
+      streamed rather than returned at once (e.g. in a response message with a
+      repeated field), as the rectangle may cover a large area and contain a
+      huge number of features.
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def RecordRoute(self, request_iterator, context):
+      """A client-to-server streaming RPC.
+
+      Accepts a stream of Points on a route being traversed, returning a
+      RouteSummary when traversal is completed.
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def RouteChat(self, request_iterator, context):
+      """A Bidirectional streaming RPC.
+
+      Accepts a stream of RouteNotes sent while a route is being traversed,
+      while receiving other RouteNotes (e.g. from other users).
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+
+  class BetaRouteGuideStub(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """Interface exported by the server.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+    def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A simple RPC.
 
-  def RouteChat(self, request_iterator, context):
-    """A Bidirectional streaming RPC.
+      Obtains the feature at a given position.
 
-    Accepts a stream of RouteNotes sent while a route is being traversed,
-    while receiving other RouteNotes (e.g. from other users).
-    """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+      A feature with an empty name is returned if there's no feature at the given
+      position.
+      """
+      raise NotImplementedError()
+    GetFeature.future = None
+    def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A server-to-client streaming RPC.
+
+      Obtains the Features available within the given Rectangle.  Results are
+      streamed rather than returned at once (e.g. in a response message with a
+      repeated field), as the rectangle may cover a large area and contain a
+      huge number of features.
+      """
+      raise NotImplementedError()
+    def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A client-to-server streaming RPC.
+
+      Accepts a stream of Points on a route being traversed, returning a
+      RouteSummary when traversal is completed.
+      """
+      raise NotImplementedError()
+    RecordRoute.future = None
+    def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A Bidirectional streaming RPC.
+
+      Accepts a stream of RouteNotes sent while a route is being traversed,
+      while receiving other RouteNotes (e.g. from other users).
+      """
+      raise NotImplementedError()
 
 
-def add_RouteGuideServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'GetFeature': grpc.unary_unary_rpc_method_handler(
-          servicer.GetFeature,
-          request_deserializer=Point.FromString,
-          response_serializer=Feature.SerializeToString,
-      ),
-      'ListFeatures': grpc.unary_stream_rpc_method_handler(
-          servicer.ListFeatures,
-          request_deserializer=Rectangle.FromString,
-          response_serializer=Feature.SerializeToString,
-      ),
-      'RecordRoute': grpc.stream_unary_rpc_method_handler(
-          servicer.RecordRoute,
-          request_deserializer=Point.FromString,
-          response_serializer=RouteSummary.SerializeToString,
-      ),
-      'RouteChat': grpc.stream_stream_rpc_method_handler(
-          servicer.RouteChat,
-          request_deserializer=RouteNote.FromString,
-          response_serializer=RouteNote.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'routeguide.RouteGuide', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
+  def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_deserializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Point.FromString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString,
+      ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
+    }
+    response_serializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString,
+      ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
+    }
+    method_implementations = {
+      ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature),
+      ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures),
+      ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute),
+      ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat),
+    }
+    server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+    return beta_implementations.server(method_implementations, options=server_options)
 
 
-class BetaRouteGuideServicer(object):
-  """Interface exported by the server.
-  """
-  def GetFeature(self, request, context):
-    """A simple RPC.
+  def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+    """The Beta API is deprecated for 0.15.0 and later.
 
-    Obtains the feature at a given position.
-
-    A feature with an empty name is returned if there's no feature at the given
-    position.
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-  def ListFeatures(self, request, context):
-    """A server-to-client streaming RPC.
-
-    Obtains the Features available within the given Rectangle.  Results are
-    streamed rather than returned at once (e.g. in a response message with a
-    repeated field), as the rectangle may cover a large area and contain a
-    huge number of features.
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-  def RecordRoute(self, request_iterator, context):
-    """A client-to-server streaming RPC.
-
-    Accepts a stream of Points on a route being traversed, returning a
-    RouteSummary when traversal is completed.
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-  def RouteChat(self, request_iterator, context):
-    """A Bidirectional streaming RPC.
-
-    Accepts a stream of RouteNotes sent while a route is being traversed,
-    while receiving other RouteNotes (e.g. from other users).
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-
-
-class BetaRouteGuideStub(object):
-  """Interface exported by the server.
-  """
-  def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A simple RPC.
-
-    Obtains the feature at a given position.
-
-    A feature with an empty name is returned if there's no feature at the given
-    position.
-    """
-    raise NotImplementedError()
-  GetFeature.future = None
-  def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A server-to-client streaming RPC.
-
-    Obtains the Features available within the given Rectangle.  Results are
-    streamed rather than returned at once (e.g. in a response message with a
-    repeated field), as the rectangle may cover a large area and contain a
-    huge number of features.
-    """
-    raise NotImplementedError()
-  def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A client-to-server streaming RPC.
-
-    Accepts a stream of Points on a route being traversed, returning a
-    RouteSummary when traversal is completed.
-    """
-    raise NotImplementedError()
-  RecordRoute.future = None
-  def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A Bidirectional streaming RPC.
-
-    Accepts a stream of RouteNotes sent while a route is being traversed,
-    while receiving other RouteNotes (e.g. from other users).
-    """
-    raise NotImplementedError()
-
-
-def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
-  request_deserializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Point.FromString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString,
-    ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
-  }
-  response_serializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString,
-    ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
-  }
-  method_implementations = {
-    ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature),
-    ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures),
-    ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute),
-    ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat),
-  }
-  server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
-  return beta_implementations.server(method_implementations, options=server_options)
-
-
-def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
-  request_serializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString,
-    ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
-  }
-  response_deserializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString,
-    ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
-  }
-  cardinalities = {
-    'GetFeature': cardinality.Cardinality.UNARY_UNARY,
-    'ListFeatures': cardinality.Cardinality.UNARY_STREAM,
-    'RecordRoute': cardinality.Cardinality.STREAM_UNARY,
-    'RouteChat': cardinality.Cardinality.STREAM_STREAM,
-  }
-  stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
-  return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options)
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_serializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString,
+      ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
+    }
+    response_deserializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString,
+      ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
+    }
+    cardinalities = {
+      'GetFeature': cardinality.Cardinality.UNARY_UNARY,
+      'ListFeatures': cardinality.Cardinality.UNARY_STREAM,
+      'RecordRoute': cardinality.Cardinality.STREAM_UNARY,
+      'RouteChat': cardinality.Cardinality.STREAM_STREAM,
+    }
+    stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+    return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options)
+except ImportError:
+  pass
 # @@protoc_insertion_point(module_scope)
diff --git a/examples/python/multiplex/route_guide_pb2_grpc.py b/examples/python/multiplex/route_guide_pb2_grpc.py
new file mode 100644
index 0000000..27b24c7
--- /dev/null
+++ b/examples/python/multiplex/route_guide_pb2_grpc.py
@@ -0,0 +1,114 @@
+import grpc
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+import route_guide_pb2 as route__guide__pb2
+
+
+class RouteGuideStub(object):
+  """Interface exported by the server.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.GetFeature = channel.unary_unary(
+        '/routeguide.RouteGuide/GetFeature',
+        request_serializer=route__guide__pb2.Point.SerializeToString,
+        response_deserializer=route__guide__pb2.Feature.FromString,
+        )
+    self.ListFeatures = channel.unary_stream(
+        '/routeguide.RouteGuide/ListFeatures',
+        request_serializer=route__guide__pb2.Rectangle.SerializeToString,
+        response_deserializer=route__guide__pb2.Feature.FromString,
+        )
+    self.RecordRoute = channel.stream_unary(
+        '/routeguide.RouteGuide/RecordRoute',
+        request_serializer=route__guide__pb2.Point.SerializeToString,
+        response_deserializer=route__guide__pb2.RouteSummary.FromString,
+        )
+    self.RouteChat = channel.stream_stream(
+        '/routeguide.RouteGuide/RouteChat',
+        request_serializer=route__guide__pb2.RouteNote.SerializeToString,
+        response_deserializer=route__guide__pb2.RouteNote.FromString,
+        )
+
+
+class RouteGuideServicer(object):
+  """Interface exported by the server.
+  """
+
+  def GetFeature(self, request, context):
+    """A simple RPC.
+
+    Obtains the feature at a given position.
+
+    A feature with an empty name is returned if there's no feature at the given
+    position.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def ListFeatures(self, request, context):
+    """A server-to-client streaming RPC.
+
+    Obtains the Features available within the given Rectangle.  Results are
+    streamed rather than returned at once (e.g. in a response message with a
+    repeated field), as the rectangle may cover a large area and contain a
+    huge number of features.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def RecordRoute(self, request_iterator, context):
+    """A client-to-server streaming RPC.
+
+    Accepts a stream of Points on a route being traversed, returning a
+    RouteSummary when traversal is completed.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def RouteChat(self, request_iterator, context):
+    """A Bidirectional streaming RPC.
+
+    Accepts a stream of RouteNotes sent while a route is being traversed,
+    while receiving other RouteNotes (e.g. from other users).
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_RouteGuideServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'GetFeature': grpc.unary_unary_rpc_method_handler(
+          servicer.GetFeature,
+          request_deserializer=route__guide__pb2.Point.FromString,
+          response_serializer=route__guide__pb2.Feature.SerializeToString,
+      ),
+      'ListFeatures': grpc.unary_stream_rpc_method_handler(
+          servicer.ListFeatures,
+          request_deserializer=route__guide__pb2.Rectangle.FromString,
+          response_serializer=route__guide__pb2.Feature.SerializeToString,
+      ),
+      'RecordRoute': grpc.stream_unary_rpc_method_handler(
+          servicer.RecordRoute,
+          request_deserializer=route__guide__pb2.Point.FromString,
+          response_serializer=route__guide__pb2.RouteSummary.SerializeToString,
+      ),
+      'RouteChat': grpc.stream_stream_rpc_method_handler(
+          servicer.RouteChat,
+          request_deserializer=route__guide__pb2.RouteNote.FromString,
+          response_serializer=route__guide__pb2.RouteNote.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'routeguide.RouteGuide', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/examples/python/multiplex/run_codegen.py b/examples/python/multiplex/run_codegen.py
old mode 100755
new mode 100644
diff --git a/examples/python/route_guide/route_guide_client.py b/examples/python/route_guide/route_guide_client.py
index 8a80ed8..d295523 100644
--- a/examples/python/route_guide/route_guide_client.py
+++ b/examples/python/route_guide/route_guide_client.py
@@ -37,6 +37,7 @@
 import grpc
 
 import route_guide_pb2
+import route_guide_pb2_grpc
 import route_guide_resources
 
 
@@ -116,7 +117,7 @@
 
 def run():
   channel = grpc.insecure_channel('localhost:50051')
-  stub = route_guide_pb2.RouteGuideStub(channel)
+  stub = route_guide_pb2_grpc.RouteGuideStub(channel)
   print("-------------- GetFeature --------------")
   guide_get_feature(stub)
   print("-------------- ListFeatures --------------")
diff --git a/examples/python/route_guide/route_guide_pb2.py b/examples/python/route_guide/route_guide_pb2.py
index 924e186..e6775eb 100644
--- a/examples/python/route_guide/route_guide_pb2.py
+++ b/examples/python/route_guide/route_guide_pb2.py
@@ -277,240 +277,265 @@
 
 DESCRIPTOR.has_options = True
 DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.routeguideB\017RouteGuideProtoP\001\242\002\003RTG'))
-import grpc
-from grpc.beta import implementations as beta_implementations
-from grpc.beta import interfaces as beta_interfaces
-from grpc.framework.common import cardinality
-from grpc.framework.interfaces.face import utilities as face_utilities
+try:
+  # THESE ELEMENTS WILL BE DEPRECATED.
+  # Please use the generated *_pb2_grpc.py files instead.
+  import grpc
+  from grpc.framework.common import cardinality
+  from grpc.framework.interfaces.face import utilities as face_utilities
+  from grpc.beta import implementations as beta_implementations
+  from grpc.beta import interfaces as beta_interfaces
 
 
-class RouteGuideStub(object):
-  """Interface exported by the server.
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
+  class RouteGuideStub(object):
+    """Interface exported by the server.
     """
-    self.GetFeature = channel.unary_unary(
-        '/routeguide.RouteGuide/GetFeature',
-        request_serializer=Point.SerializeToString,
-        response_deserializer=Feature.FromString,
-        )
-    self.ListFeatures = channel.unary_stream(
-        '/routeguide.RouteGuide/ListFeatures',
-        request_serializer=Rectangle.SerializeToString,
-        response_deserializer=Feature.FromString,
-        )
-    self.RecordRoute = channel.stream_unary(
-        '/routeguide.RouteGuide/RecordRoute',
-        request_serializer=Point.SerializeToString,
-        response_deserializer=RouteSummary.FromString,
-        )
-    self.RouteChat = channel.stream_stream(
-        '/routeguide.RouteGuide/RouteChat',
-        request_serializer=RouteNote.SerializeToString,
-        response_deserializer=RouteNote.FromString,
-        )
+
+    def __init__(self, channel):
+      """Constructor.
+
+      Args:
+        channel: A grpc.Channel.
+      """
+      self.GetFeature = channel.unary_unary(
+          '/routeguide.RouteGuide/GetFeature',
+          request_serializer=Point.SerializeToString,
+          response_deserializer=Feature.FromString,
+          )
+      self.ListFeatures = channel.unary_stream(
+          '/routeguide.RouteGuide/ListFeatures',
+          request_serializer=Rectangle.SerializeToString,
+          response_deserializer=Feature.FromString,
+          )
+      self.RecordRoute = channel.stream_unary(
+          '/routeguide.RouteGuide/RecordRoute',
+          request_serializer=Point.SerializeToString,
+          response_deserializer=RouteSummary.FromString,
+          )
+      self.RouteChat = channel.stream_stream(
+          '/routeguide.RouteGuide/RouteChat',
+          request_serializer=RouteNote.SerializeToString,
+          response_deserializer=RouteNote.FromString,
+          )
 
 
-class RouteGuideServicer(object):
-  """Interface exported by the server.
-  """
-
-  def GetFeature(self, request, context):
-    """A simple RPC.
-
-    Obtains the feature at a given position.
-
-    A feature with an empty name is returned if there's no feature at the given
-    position.
+  class RouteGuideServicer(object):
+    """Interface exported by the server.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
 
-  def ListFeatures(self, request, context):
-    """A server-to-client streaming RPC.
+    def GetFeature(self, request, context):
+      """A simple RPC.
 
-    Obtains the Features available within the given Rectangle.  Results are
-    streamed rather than returned at once (e.g. in a response message with a
-    repeated field), as the rectangle may cover a large area and contain a
-    huge number of features.
+      Obtains the feature at a given position.
+
+      A feature with an empty name is returned if there's no feature at the given
+      position.
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+    def ListFeatures(self, request, context):
+      """A server-to-client streaming RPC.
+
+      Obtains the Features available within the given Rectangle.  Results are
+      streamed rather than returned at once (e.g. in a response message with a
+      repeated field), as the rectangle may cover a large area and contain a
+      huge number of features.
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+    def RecordRoute(self, request_iterator, context):
+      """A client-to-server streaming RPC.
+
+      Accepts a stream of Points on a route being traversed, returning a
+      RouteSummary when traversal is completed.
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+    def RouteChat(self, request_iterator, context):
+      """A Bidirectional streaming RPC.
+
+      Accepts a stream of RouteNotes sent while a route is being traversed,
+      while receiving other RouteNotes (e.g. from other users).
+      """
+      context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+      context.set_details('Method not implemented!')
+      raise NotImplementedError('Method not implemented!')
+
+
+  def add_RouteGuideServicer_to_server(servicer, server):
+    rpc_method_handlers = {
+        'GetFeature': grpc.unary_unary_rpc_method_handler(
+            servicer.GetFeature,
+            request_deserializer=Point.FromString,
+            response_serializer=Feature.SerializeToString,
+        ),
+        'ListFeatures': grpc.unary_stream_rpc_method_handler(
+            servicer.ListFeatures,
+            request_deserializer=Rectangle.FromString,
+            response_serializer=Feature.SerializeToString,
+        ),
+        'RecordRoute': grpc.stream_unary_rpc_method_handler(
+            servicer.RecordRoute,
+            request_deserializer=Point.FromString,
+            response_serializer=RouteSummary.SerializeToString,
+        ),
+        'RouteChat': grpc.stream_stream_rpc_method_handler(
+            servicer.RouteChat,
+            request_deserializer=RouteNote.FromString,
+            response_serializer=RouteNote.SerializeToString,
+        ),
+    }
+    generic_handler = grpc.method_handlers_generic_handler(
+        'routeguide.RouteGuide', rpc_method_handlers)
+    server.add_generic_rpc_handlers((generic_handler,))
+
+
+  class BetaRouteGuideServicer(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """Interface exported by the server.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+    def GetFeature(self, request, context):
+      """A simple RPC.
 
-  def RecordRoute(self, request_iterator, context):
-    """A client-to-server streaming RPC.
+      Obtains the feature at a given position.
 
-    Accepts a stream of Points on a route being traversed, returning a
-    RouteSummary when traversal is completed.
+      A feature with an empty name is returned if there's no feature at the given
+      position.
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def ListFeatures(self, request, context):
+      """A server-to-client streaming RPC.
+
+      Obtains the Features available within the given Rectangle.  Results are
+      streamed rather than returned at once (e.g. in a response message with a
+      repeated field), as the rectangle may cover a large area and contain a
+      huge number of features.
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def RecordRoute(self, request_iterator, context):
+      """A client-to-server streaming RPC.
+
+      Accepts a stream of Points on a route being traversed, returning a
+      RouteSummary when traversal is completed.
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+    def RouteChat(self, request_iterator, context):
+      """A Bidirectional streaming RPC.
+
+      Accepts a stream of RouteNotes sent while a route is being traversed,
+      while receiving other RouteNotes (e.g. from other users).
+      """
+      context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
+
+
+  class BetaRouteGuideStub(object):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This class was generated
+    only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0."""
+    """Interface exported by the server.
     """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+    def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A simple RPC.
 
-  def RouteChat(self, request_iterator, context):
-    """A Bidirectional streaming RPC.
+      Obtains the feature at a given position.
 
-    Accepts a stream of RouteNotes sent while a route is being traversed,
-    while receiving other RouteNotes (e.g. from other users).
-    """
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
+      A feature with an empty name is returned if there's no feature at the given
+      position.
+      """
+      raise NotImplementedError()
+    GetFeature.future = None
+    def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A server-to-client streaming RPC.
+
+      Obtains the Features available within the given Rectangle.  Results are
+      streamed rather than returned at once (e.g. in a response message with a
+      repeated field), as the rectangle may cover a large area and contain a
+      huge number of features.
+      """
+      raise NotImplementedError()
+    def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A client-to-server streaming RPC.
+
+      Accepts a stream of Points on a route being traversed, returning a
+      RouteSummary when traversal is completed.
+      """
+      raise NotImplementedError()
+    RecordRoute.future = None
+    def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
+      """A Bidirectional streaming RPC.
+
+      Accepts a stream of RouteNotes sent while a route is being traversed,
+      while receiving other RouteNotes (e.g. from other users).
+      """
+      raise NotImplementedError()
 
 
-def add_RouteGuideServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'GetFeature': grpc.unary_unary_rpc_method_handler(
-          servicer.GetFeature,
-          request_deserializer=Point.FromString,
-          response_serializer=Feature.SerializeToString,
-      ),
-      'ListFeatures': grpc.unary_stream_rpc_method_handler(
-          servicer.ListFeatures,
-          request_deserializer=Rectangle.FromString,
-          response_serializer=Feature.SerializeToString,
-      ),
-      'RecordRoute': grpc.stream_unary_rpc_method_handler(
-          servicer.RecordRoute,
-          request_deserializer=Point.FromString,
-          response_serializer=RouteSummary.SerializeToString,
-      ),
-      'RouteChat': grpc.stream_stream_rpc_method_handler(
-          servicer.RouteChat,
-          request_deserializer=RouteNote.FromString,
-          response_serializer=RouteNote.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'routeguide.RouteGuide', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
+  def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
+    """The Beta API is deprecated for 0.15.0 and later.
+
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_deserializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Point.FromString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString,
+      ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
+    }
+    response_serializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString,
+      ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
+    }
+    method_implementations = {
+      ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature),
+      ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures),
+      ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute),
+      ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat),
+    }
+    server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
+    return beta_implementations.server(method_implementations, options=server_options)
 
 
-class BetaRouteGuideServicer(object):
-  """Interface exported by the server.
-  """
-  def GetFeature(self, request, context):
-    """A simple RPC.
+  def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
+    """The Beta API is deprecated for 0.15.0 and later.
 
-    Obtains the feature at a given position.
-
-    A feature with an empty name is returned if there's no feature at the given
-    position.
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-  def ListFeatures(self, request, context):
-    """A server-to-client streaming RPC.
-
-    Obtains the Features available within the given Rectangle.  Results are
-    streamed rather than returned at once (e.g. in a response message with a
-    repeated field), as the rectangle may cover a large area and contain a
-    huge number of features.
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-  def RecordRoute(self, request_iterator, context):
-    """A client-to-server streaming RPC.
-
-    Accepts a stream of Points on a route being traversed, returning a
-    RouteSummary when traversal is completed.
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-  def RouteChat(self, request_iterator, context):
-    """A Bidirectional streaming RPC.
-
-    Accepts a stream of RouteNotes sent while a route is being traversed,
-    while receiving other RouteNotes (e.g. from other users).
-    """
-    context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
-
-
-class BetaRouteGuideStub(object):
-  """Interface exported by the server.
-  """
-  def GetFeature(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A simple RPC.
-
-    Obtains the feature at a given position.
-
-    A feature with an empty name is returned if there's no feature at the given
-    position.
-    """
-    raise NotImplementedError()
-  GetFeature.future = None
-  def ListFeatures(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A server-to-client streaming RPC.
-
-    Obtains the Features available within the given Rectangle.  Results are
-    streamed rather than returned at once (e.g. in a response message with a
-    repeated field), as the rectangle may cover a large area and contain a
-    huge number of features.
-    """
-    raise NotImplementedError()
-  def RecordRoute(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A client-to-server streaming RPC.
-
-    Accepts a stream of Points on a route being traversed, returning a
-    RouteSummary when traversal is completed.
-    """
-    raise NotImplementedError()
-  RecordRoute.future = None
-  def RouteChat(self, request_iterator, timeout, metadata=None, with_call=False, protocol_options=None):
-    """A Bidirectional streaming RPC.
-
-    Accepts a stream of RouteNotes sent while a route is being traversed,
-    while receiving other RouteNotes (e.g. from other users).
-    """
-    raise NotImplementedError()
-
-
-def beta_create_RouteGuide_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
-  request_deserializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Point.FromString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.FromString,
-    ('routeguide.RouteGuide', 'RecordRoute'): Point.FromString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
-  }
-  response_serializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Feature.SerializeToString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Feature.SerializeToString,
-    ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.SerializeToString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
-  }
-  method_implementations = {
-    ('routeguide.RouteGuide', 'GetFeature'): face_utilities.unary_unary_inline(servicer.GetFeature),
-    ('routeguide.RouteGuide', 'ListFeatures'): face_utilities.unary_stream_inline(servicer.ListFeatures),
-    ('routeguide.RouteGuide', 'RecordRoute'): face_utilities.stream_unary_inline(servicer.RecordRoute),
-    ('routeguide.RouteGuide', 'RouteChat'): face_utilities.stream_stream_inline(servicer.RouteChat),
-  }
-  server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
-  return beta_implementations.server(method_implementations, options=server_options)
-
-
-def beta_create_RouteGuide_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
-  request_serializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString,
-    ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
-  }
-  response_deserializers = {
-    ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString,
-    ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString,
-    ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString,
-    ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
-  }
-  cardinalities = {
-    'GetFeature': cardinality.Cardinality.UNARY_UNARY,
-    'ListFeatures': cardinality.Cardinality.UNARY_STREAM,
-    'RecordRoute': cardinality.Cardinality.STREAM_UNARY,
-    'RouteChat': cardinality.Cardinality.STREAM_STREAM,
-  }
-  stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
-  return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options)
+    It is recommended to use the GA API (classes and functions in this
+    file not marked beta) for all further purposes. This function was
+    generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0"""
+    request_serializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Point.SerializeToString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Rectangle.SerializeToString,
+      ('routeguide.RouteGuide', 'RecordRoute'): Point.SerializeToString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.SerializeToString,
+    }
+    response_deserializers = {
+      ('routeguide.RouteGuide', 'GetFeature'): Feature.FromString,
+      ('routeguide.RouteGuide', 'ListFeatures'): Feature.FromString,
+      ('routeguide.RouteGuide', 'RecordRoute'): RouteSummary.FromString,
+      ('routeguide.RouteGuide', 'RouteChat'): RouteNote.FromString,
+    }
+    cardinalities = {
+      'GetFeature': cardinality.Cardinality.UNARY_UNARY,
+      'ListFeatures': cardinality.Cardinality.UNARY_STREAM,
+      'RecordRoute': cardinality.Cardinality.STREAM_UNARY,
+      'RouteChat': cardinality.Cardinality.STREAM_STREAM,
+    }
+    stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
+    return beta_implementations.dynamic_stub(channel, 'routeguide.RouteGuide', cardinalities, options=stub_options)
+except ImportError:
+  pass
 # @@protoc_insertion_point(module_scope)
diff --git a/examples/python/route_guide/route_guide_pb2_grpc.py b/examples/python/route_guide/route_guide_pb2_grpc.py
new file mode 100644
index 0000000..27b24c7
--- /dev/null
+++ b/examples/python/route_guide/route_guide_pb2_grpc.py
@@ -0,0 +1,114 @@
+import grpc
+from grpc.framework.common import cardinality
+from grpc.framework.interfaces.face import utilities as face_utilities
+
+import route_guide_pb2 as route__guide__pb2
+
+
+class RouteGuideStub(object):
+  """Interface exported by the server.
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.GetFeature = channel.unary_unary(
+        '/routeguide.RouteGuide/GetFeature',
+        request_serializer=route__guide__pb2.Point.SerializeToString,
+        response_deserializer=route__guide__pb2.Feature.FromString,
+        )
+    self.ListFeatures = channel.unary_stream(
+        '/routeguide.RouteGuide/ListFeatures',
+        request_serializer=route__guide__pb2.Rectangle.SerializeToString,
+        response_deserializer=route__guide__pb2.Feature.FromString,
+        )
+    self.RecordRoute = channel.stream_unary(
+        '/routeguide.RouteGuide/RecordRoute',
+        request_serializer=route__guide__pb2.Point.SerializeToString,
+        response_deserializer=route__guide__pb2.RouteSummary.FromString,
+        )
+    self.RouteChat = channel.stream_stream(
+        '/routeguide.RouteGuide/RouteChat',
+        request_serializer=route__guide__pb2.RouteNote.SerializeToString,
+        response_deserializer=route__guide__pb2.RouteNote.FromString,
+        )
+
+
+class RouteGuideServicer(object):
+  """Interface exported by the server.
+  """
+
+  def GetFeature(self, request, context):
+    """A simple RPC.
+
+    Obtains the feature at a given position.
+
+    A feature with an empty name is returned if there's no feature at the given
+    position.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def ListFeatures(self, request, context):
+    """A server-to-client streaming RPC.
+
+    Obtains the Features available within the given Rectangle.  Results are
+    streamed rather than returned at once (e.g. in a response message with a
+    repeated field), as the rectangle may cover a large area and contain a
+    huge number of features.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def RecordRoute(self, request_iterator, context):
+    """A client-to-server streaming RPC.
+
+    Accepts a stream of Points on a route being traversed, returning a
+    RouteSummary when traversal is completed.
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def RouteChat(self, request_iterator, context):
+    """A Bidirectional streaming RPC.
+
+    Accepts a stream of RouteNotes sent while a route is being traversed,
+    while receiving other RouteNotes (e.g. from other users).
+    """
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_RouteGuideServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'GetFeature': grpc.unary_unary_rpc_method_handler(
+          servicer.GetFeature,
+          request_deserializer=route__guide__pb2.Point.FromString,
+          response_serializer=route__guide__pb2.Feature.SerializeToString,
+      ),
+      'ListFeatures': grpc.unary_stream_rpc_method_handler(
+          servicer.ListFeatures,
+          request_deserializer=route__guide__pb2.Rectangle.FromString,
+          response_serializer=route__guide__pb2.Feature.SerializeToString,
+      ),
+      'RecordRoute': grpc.stream_unary_rpc_method_handler(
+          servicer.RecordRoute,
+          request_deserializer=route__guide__pb2.Point.FromString,
+          response_serializer=route__guide__pb2.RouteSummary.SerializeToString,
+      ),
+      'RouteChat': grpc.stream_stream_rpc_method_handler(
+          servicer.RouteChat,
+          request_deserializer=route__guide__pb2.RouteNote.FromString,
+          response_serializer=route__guide__pb2.RouteNote.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'routeguide.RouteGuide', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/examples/python/route_guide/route_guide_server.py b/examples/python/route_guide/route_guide_server.py
index 3ffe678..bf49217 100644
--- a/examples/python/route_guide/route_guide_server.py
+++ b/examples/python/route_guide/route_guide_server.py
@@ -36,6 +36,7 @@
 import grpc
 
 import route_guide_pb2
+import route_guide_pb2_grpc
 import route_guide_resources
 
 _ONE_DAY_IN_SECONDS = 60 * 60 * 24
@@ -68,7 +69,7 @@
   R = 6371000; # metres
   return R * c;
 
-class RouteGuideServicer(route_guide_pb2.RouteGuideServicer):
+class RouteGuideServicer(route_guide_pb2_grpc.RouteGuideServicer):
   """Provides methods that implement functionality of route guide server."""
 
   def __init__(self):
@@ -125,7 +126,7 @@
 
 def serve():
   server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
-  route_guide_pb2.add_RouteGuideServicer_to_server(
+  route_guide_pb2_grpc.add_RouteGuideServicer_to_server(
       RouteGuideServicer(), server)
   server.add_insecure_port('[::]:50051')
   server.start()
diff --git a/include/grpc++/resource_quota.h b/include/grpc++/resource_quota.h
index 75e04d4..68a5146 100644
--- a/include/grpc++/resource_quota.h
+++ b/include/grpc++/resource_quota.h
@@ -37,6 +37,7 @@
 struct grpc_resource_quota;
 
 #include <grpc++/impl/codegen/config.h>
+#include <grpc++/impl/codegen/grpc_library.h>
 
 namespace grpc {
 
@@ -44,7 +45,7 @@
 /// A ResourceQuota can be attached to a server (via ServerBuilder), or a client
 /// channel (via ChannelArguments). gRPC will attempt to keep memory used by
 /// all attached entities below the ResourceQuota bound.
-class ResourceQuota final {
+class ResourceQuota final : private GrpcLibraryCodegen {
  public:
   explicit ResourceQuota(const grpc::string& name);
   ResourceQuota();
diff --git a/include/grpc++/server_builder.h b/include/grpc++/server_builder.h
index 9252c6a..2ac2f0a 100644
--- a/include/grpc++/server_builder.h
+++ b/include/grpc++/server_builder.h
@@ -187,7 +187,7 @@
 
   struct SyncServerSettings {
     SyncServerSettings()
-        : num_cqs(GPR_MAX(gpr_cpu_num_cores(), 4)),
+        : num_cqs(1),
           min_pollers(1),
           max_pollers(INT_MAX),
           cq_timeout_msec(1000) {}
diff --git a/src/compiler/python_generator.cc b/src/compiler/python_generator.cc
index 0fac1b8..4841da8 100644
--- a/src/compiler/python_generator.cc
+++ b/src/compiler/python_generator.cc
@@ -724,6 +724,9 @@
     out = &out_printer;
 
     if (generate_in_pb2_grpc) {
+      out->Print(
+          "# Generated by the gRPC Python protocol compiler plugin. "
+          "DO NOT EDIT!\n");
       if (!PrintPreamble()) {
         return make_pair(false, "");
       }
diff --git a/src/core/ext/census/grpc_filter.c b/src/core/ext/census/grpc_filter.c
index 3e8acc8..8e4d473 100644
--- a/src/core/ext/census/grpc_filter.c
+++ b/src/core/ext/census/grpc_filter.c
@@ -154,7 +154,8 @@
   memset(d, 0, sizeof(*d));
   d->start_ts = args->start_time;
   /* TODO(hongyu): call census_tracing_start_op here. */
-  grpc_closure_init(&d->finish_recv, server_on_done_recv, elem);
+  grpc_closure_init(&d->finish_recv, server_on_done_recv, elem,
+                    grpc_schedule_on_exec_ctx);
   return GRPC_ERROR_NONE;
 }
 
diff --git a/src/core/ext/client_channel/channel_connectivity.c b/src/core/ext/client_channel/channel_connectivity.c
index 9797e66..b10f444 100644
--- a/src/core/ext/client_channel/channel_connectivity.c
+++ b/src/core/ext/client_channel/channel_connectivity.c
@@ -198,7 +198,8 @@
   grpc_cq_begin_op(cq, tag);
 
   gpr_mu_init(&w->mu);
-  grpc_closure_init(&w->on_complete, watch_complete, w);
+  grpc_closure_init(&w->on_complete, watch_complete, w,
+                    grpc_schedule_on_exec_ctx);
   w->phase = WAITING;
   w->state = last_observed_state;
   w->cq = cq;
diff --git a/src/core/ext/client_channel/client_channel.c b/src/core/ext/client_channel/client_channel.c
index 9d46338..a762b89 100644
--- a/src/core/ext/client_channel/client_channel.c
+++ b/src/core/ext/client_channel/client_channel.c
@@ -249,7 +249,8 @@
   GRPC_CHANNEL_STACK_REF(chand->owning_stack, "watch_lb_policy");
 
   w->chand = chand;
-  grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w);
+  grpc_closure_init(&w->on_changed, on_lb_policy_state_changed, w,
+                    grpc_schedule_on_exec_ctx);
   w->state = current_state;
   w->lb_policy = lb_policy;
   grpc_lb_policy_notify_on_state_change(exec_ctx, lb_policy, &w->state,
@@ -361,14 +362,12 @@
   }
   chand->method_params_table = method_params_table;
   if (lb_policy != NULL) {
-    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
-                               NULL);
+    grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
   } else if (chand->resolver == NULL /* disconnected */) {
     grpc_closure_list_fail_all(
         &chand->waiting_for_config_closures,
         GRPC_ERROR_CREATE_REFERENCING("Channel disconnected", &error, 1));
-    grpc_exec_ctx_enqueue_list(exec_ctx, &chand->waiting_for_config_closures,
-                               NULL);
+    grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
   }
   if (lb_policy != NULL && chand->exit_idle_when_lb_policy_arrives) {
     GRPC_LB_POLICY_REF(lb_policy, "exit_idle");
@@ -425,7 +424,7 @@
                                   grpc_transport_op *op) {
   channel_data *chand = elem->channel_data;
 
-  grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
 
   GPR_ASSERT(op->set_accept_stream == false);
   if (op->bind_pollset != NULL) {
@@ -444,9 +443,8 @@
 
   if (op->send_ping != NULL) {
     if (chand->lb_policy == NULL) {
-      grpc_exec_ctx_sched(exec_ctx, op->send_ping,
-                          GRPC_ERROR_CREATE("Ping with no load balancing"),
-                          NULL);
+      grpc_closure_sched(exec_ctx, op->send_ping,
+                         GRPC_ERROR_CREATE("Ping with no load balancing"));
     } else {
       grpc_lb_policy_ping_one(exec_ctx, chand->lb_policy, op->send_ping);
       op->bind_pollset = NULL;
@@ -465,8 +463,7 @@
       if (!chand->started_resolving) {
         grpc_closure_list_fail_all(&chand->waiting_for_config_closures,
                                    GRPC_ERROR_REF(op->disconnect_with_error));
-        grpc_exec_ctx_enqueue_list(exec_ctx,
-                                   &chand->waiting_for_config_closures, NULL);
+        grpc_closure_list_sched(exec_ctx, &chand->waiting_for_config_closures);
       }
       if (chand->lb_policy != NULL) {
         grpc_pollset_set_del_pollset_set(exec_ctx,
@@ -511,7 +508,8 @@
   gpr_mu_init(&chand->mu);
   chand->owning_stack = args->channel_stack;
   grpc_closure_init(&chand->on_resolver_result_changed,
-                    on_resolver_result_changed, chand);
+                    on_resolver_result_changed, chand,
+                    grpc_schedule_on_exec_ctx);
   chand->interested_parties = grpc_pollset_set_create();
   grpc_connectivity_state_init(&chand->state_tracker, GRPC_CHANNEL_IDLE,
                                "client_channel");
@@ -678,8 +676,9 @@
   calld->waiting_ops_count = 0;
   calld->waiting_ops_capacity = 0;
   GRPC_SUBCHANNEL_CALL_REF(a->call, "retry_ops");
-  grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(retry_ops, a),
-                      GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(
+      exec_ctx, grpc_closure_create(retry_ops, a, grpc_schedule_on_exec_ctx),
+      GRPC_ERROR_NONE);
 }
 
 static void subchannel_ready(grpc_exec_ctx *exec_ctx, void *arg,
@@ -761,14 +760,14 @@
   if (cpa->connected_subchannel == NULL) {
     /* cancelled, do nothing */
   } else if (error != GRPC_ERROR_NONE) {
-    grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error), NULL);
+    grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_REF(error));
   } else {
     call_data *calld = cpa->elem->call_data;
     gpr_mu_lock(&calld->mu);
     if (pick_subchannel(exec_ctx, cpa->elem, cpa->initial_metadata,
                         cpa->initial_metadata_flags, cpa->connected_subchannel,
                         cpa->on_ready, GRPC_ERROR_NONE)) {
-      grpc_exec_ctx_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE, NULL);
+      grpc_closure_sched(exec_ctx, cpa->on_ready, GRPC_ERROR_NONE);
     }
     gpr_mu_unlock(&calld->mu);
   }
@@ -800,9 +799,9 @@
       cpa = closure->cb_arg;
       if (cpa->connected_subchannel == connected_subchannel) {
         cpa->connected_subchannel = NULL;
-        grpc_exec_ctx_sched(
+        grpc_closure_sched(
             exec_ctx, cpa->on_ready,
-            GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
+            GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
       }
     }
     gpr_mu_unlock(&chand->mu);
@@ -853,12 +852,12 @@
     cpa->connected_subchannel = connected_subchannel;
     cpa->on_ready = on_ready;
     cpa->elem = elem;
-    grpc_closure_init(&cpa->closure, continue_picking, cpa);
+    grpc_closure_init(&cpa->closure, continue_picking, cpa,
+                      grpc_schedule_on_exec_ctx);
     grpc_closure_list_append(&chand->waiting_for_config_closures, &cpa->closure,
                              GRPC_ERROR_NONE);
   } else {
-    grpc_exec_ctx_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, on_ready, GRPC_ERROR_CREATE("Disconnected"));
   }
   gpr_mu_unlock(&chand->mu);
 
@@ -943,7 +942,8 @@
       calld->connected_subchannel == NULL &&
       op->send_initial_metadata != NULL) {
     calld->creation_phase = GRPC_SUBCHANNEL_CALL_HOLDER_PICKING_SUBCHANNEL;
-    grpc_closure_init(&calld->next_step, subchannel_ready, elem);
+    grpc_closure_init(&calld->next_step, subchannel_ready, elem,
+                      grpc_schedule_on_exec_ctx);
     GRPC_CALL_STACK_REF(calld->owning_call, "pick_subchannel");
     /* If a subchannel is not available immediately, the polling entity from
        call_data should be provided to channel_data's interested_parties, so
@@ -1089,7 +1089,8 @@
     // get the service config data once the resolver returns.
     // Take a reference to the call stack to be owned by the callback.
     GRPC_CALL_STACK_REF(calld->owning_call, "read_service_config");
-    grpc_closure_init(&calld->read_service_config, read_service_config, elem);
+    grpc_closure_init(&calld->read_service_config, read_service_config, elem,
+                      grpc_schedule_on_exec_ctx);
     grpc_closure_list_append(&chand->waiting_for_config_closures,
                              &calld->read_service_config, GRPC_ERROR_NONE);
     gpr_mu_unlock(&chand->mu);
@@ -1202,7 +1203,8 @@
   w->pollset = pollset;
   w->on_complete = on_complete;
   grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties, pollset);
-  grpc_closure_init(&w->my_closure, on_external_watch_complete, w);
+  grpc_closure_init(&w->my_closure, on_external_watch_complete, w,
+                    grpc_schedule_on_exec_ctx);
   GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
                          "external_connectivity_watcher");
   gpr_mu_lock(&chand->mu);
diff --git a/src/core/ext/client_channel/http_connect_handshaker.c b/src/core/ext/client_channel/http_connect_handshaker.c
index 76c78ee..a0fc95e 100644
--- a/src/core/ext/client_channel/http_connect_handshaker.c
+++ b/src/core/ext/client_channel/http_connect_handshaker.c
@@ -131,7 +131,7 @@
     handshaker->shutdown = true;
   }
   // Invoke callback.
-  grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
+  grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
 }
 
 // Callback invoked when finished writing HTTP CONNECT request.
@@ -229,7 +229,7 @@
     goto done;
   }
   // Success.  Invoke handshake-done callback.
-  grpc_exec_ctx_sched(exec_ctx, handshaker->on_handshake_done, error, NULL);
+  grpc_closure_sched(exec_ctx, handshaker->on_handshake_done, error);
 done:
   // Set shutdown to true so that subsequent calls to
   // http_connect_handshaker_shutdown() do nothing.
@@ -313,9 +313,9 @@
   handshaker->proxy_server = gpr_strdup(proxy_server);
   grpc_slice_buffer_init(&handshaker->write_buffer);
   grpc_closure_init(&handshaker->request_done_closure, on_write_done,
-                    handshaker);
+                    handshaker, grpc_schedule_on_exec_ctx);
   grpc_closure_init(&handshaker->response_read_closure, on_read_done,
-                    handshaker);
+                    handshaker, grpc_schedule_on_exec_ctx);
   grpc_http_parser_init(&handshaker->http_parser, GRPC_HTTP_RESPONSE,
                         &handshaker->http_response);
   return &handshaker->base;
diff --git a/src/core/ext/client_channel/subchannel.c b/src/core/ext/client_channel/subchannel.c
index f294e69..87f0ef2 100644
--- a/src/core/ext/client_channel/subchannel.c
+++ b/src/core/ext/client_channel/subchannel.c
@@ -293,8 +293,9 @@
   gpr_atm old_refs;
   old_refs = ref_mutate(c, -(gpr_atm)1, 1 REF_MUTATE_PURPOSE("WEAK_UNREF"));
   if (old_refs == 1) {
-    grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c),
-                        GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, grpc_closure_create(subchannel_destroy, c,
+                                                     grpc_schedule_on_exec_ctx),
+                       GRPC_ERROR_NONE);
   }
 }
 
@@ -330,7 +331,8 @@
   c->args = grpc_channel_args_copy(args->args);
   c->root_external_state_watcher.next = c->root_external_state_watcher.prev =
       &c->root_external_state_watcher;
-  grpc_closure_init(&c->connected, subchannel_connected, c);
+  grpc_closure_init(&c->connected, subchannel_connected, c,
+                    grpc_schedule_on_exec_ctx);
   grpc_connectivity_state_init(&c->state_tracker, GRPC_CHANNEL_IDLE,
                                "subchannel");
   int initial_backoff_ms =
@@ -505,7 +507,8 @@
     w->subchannel = c;
     w->pollset_set = interested_parties;
     w->notify = notify;
-    grpc_closure_init(&w->closure, on_external_state_watcher_done, w);
+    grpc_closure_init(&w->closure, on_external_state_watcher_done, w,
+                      grpc_schedule_on_exec_ctx);
     if (interested_parties != NULL) {
       grpc_pollset_set_add_pollset_set(exec_ctx, c->pollset_set,
                                        interested_parties);
@@ -626,7 +629,7 @@
   sw_subchannel->subchannel = c;
   sw_subchannel->connectivity_state = GRPC_CHANNEL_READY;
   grpc_closure_init(&sw_subchannel->closure, subchannel_on_child_state_changed,
-                    sw_subchannel);
+                    sw_subchannel, grpc_schedule_on_exec_ctx);
 
   if (c->disconnected) {
     gpr_free(sw_subchannel);
diff --git a/src/core/ext/client_config/message_size_filter.c b/src/core/ext/client_config/message_size_filter.c
index 770526d..f0b2074 100644
--- a/src/core/ext/client_config/message_size_filter.c
+++ b/src/core/ext/client_config/message_size_filter.c
@@ -124,7 +124,7 @@
     gpr_free(message_string);
   }
   // Invoke the next callback.
-  grpc_exec_ctx_sched(exec_ctx, calld->next_recv_message_ready, error, NULL);
+  grpc_closure_sched(exec_ctx, calld->next_recv_message_ready, error);
 }
 
 // Start transport stream op.
@@ -160,7 +160,8 @@
   channel_data* chand = elem->channel_data;
   call_data* calld = elem->call_data;
   calld->next_recv_message_ready = NULL;
-  grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem);
+  grpc_closure_init(&calld->recv_message_ready, recv_message_ready, elem,
+                    grpc_schedule_on_exec_ctx);
   // Get max sizes from channel data, then merge in per-method config values.
   // Note: Per-method config is only available on the client, so we
   // apply the max request size to the send limit and the max response
diff --git a/src/core/ext/lb_policy/grpclb/grpclb.c b/src/core/ext/lb_policy/grpclb/grpclb.c
index bed5e6c..bb0ee9d 100644
--- a/src/core/ext/lb_policy/grpclb/grpclb.c
+++ b/src/core/ext/lb_policy/grpclb/grpclb.c
@@ -180,8 +180,7 @@
   wrapped_rr_closure_arg *wc_arg = arg;
 
   GPR_ASSERT(wc_arg->wrapped_closure != NULL);
-  grpc_exec_ctx_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error),
-                      NULL);
+  grpc_closure_sched(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
 
   if (wc_arg->rr_policy != NULL) {
     /* if *target is NULL, no pick has been made by the RR policy (eg, all
@@ -248,7 +247,8 @@
       pick_args->lb_token_mdelem_storage;
   pp->wrapped_on_complete_arg.free_when_done = pp;
   grpc_closure_init(&pp->wrapped_on_complete_arg.wrapper_closure,
-                    wrapped_rr_closure, &pp->wrapped_on_complete_arg);
+                    wrapped_rr_closure, &pp->wrapped_on_complete_arg,
+                    grpc_schedule_on_exec_ctx);
   *root = pp;
 }
 
@@ -268,7 +268,8 @@
   pping->wrapped_notify_arg.free_when_done = pping;
   pping->next = *root;
   grpc_closure_init(&pping->wrapped_notify_arg.wrapper_closure,
-                    wrapped_rr_closure, &pping->wrapped_notify_arg);
+                    wrapped_rr_closure, &pping->wrapped_notify_arg,
+                    grpc_schedule_on_exec_ctx);
   *root = pping;
 }
 
@@ -667,7 +668,7 @@
       gpr_malloc(sizeof(rr_connectivity_data));
   memset(rr_connectivity, 0, sizeof(rr_connectivity_data));
   grpc_closure_init(&rr_connectivity->on_change, glb_rr_connectivity_changed,
-                    rr_connectivity);
+                    rr_connectivity, grpc_schedule_on_exec_ctx);
   rr_connectivity->glb_policy = glb_policy;
   rr_connectivity->state = new_rr_state;
 
@@ -908,15 +909,15 @@
   while (pp != NULL) {
     pending_pick *next = pp->next;
     *pp->target = NULL;
-    grpc_exec_ctx_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
-                        GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+                       GRPC_ERROR_NONE);
     pp = next;
   }
 
   while (pping != NULL) {
     pending_ping *next = pping->next;
-    grpc_exec_ctx_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
-                        GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+                       GRPC_ERROR_NONE);
     pping = next;
   }
 }
@@ -932,9 +933,9 @@
     pending_pick *next = pp->next;
     if (pp->target == target) {
       *target = NULL;
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
-          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
     } else {
       pp->next = glb_policy->pending_picks;
       glb_policy->pending_picks = pp;
@@ -957,9 +958,9 @@
     pending_pick *next = pp->next;
     if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
-          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
     } else {
       pp->next = glb_policy->pending_picks;
       glb_policy->pending_picks = pp;
@@ -994,11 +995,10 @@
                     grpc_closure *on_complete) {
   if (pick_args->lb_token_mdelem_storage == NULL) {
     *target = NULL;
-    grpc_exec_ctx_sched(
+    grpc_closure_sched(
         exec_ctx, on_complete,
         GRPC_ERROR_CREATE("No mdelem storage for the LB token. Load reporting "
-                          "won't work without it. Failing"),
-        NULL);
+                          "won't work without it. Failing"));
     return 0;
   }
 
@@ -1017,7 +1017,8 @@
     wrapped_rr_closure_arg *wc_arg = gpr_malloc(sizeof(wrapped_rr_closure_arg));
     memset(wc_arg, 0, sizeof(wrapped_rr_closure_arg));
 
-    grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg);
+    grpc_closure_init(&wc_arg->wrapper_closure, wrapped_rr_closure, wc_arg,
+                      grpc_schedule_on_exec_ctx);
     wc_arg->rr_policy = glb_policy->rr_policy;
     wc_arg->target = target;
     wc_arg->wrapped_closure = on_complete;
@@ -1117,9 +1118,11 @@
   glb_policy->lb_call_status_details_capacity = 0;
 
   grpc_closure_init(&glb_policy->lb_on_server_status_received,
-                    lb_on_server_status_received, glb_policy);
+                    lb_on_server_status_received, glb_policy,
+                    grpc_schedule_on_exec_ctx);
   grpc_closure_init(&glb_policy->lb_on_response_received,
-                    lb_on_response_received, glb_policy);
+                    lb_on_response_received, glb_policy,
+                    grpc_schedule_on_exec_ctx);
 
   gpr_backoff_init(&glb_policy->lb_call_backoff_state,
                    GRPC_GRPCLB_INITIAL_CONNECT_BACKOFF_SECONDS,
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
index afecb71..e352e03 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c
@@ -1,35 +1,3 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
 /* Automatically generated nanopb constant definitions */
 /* Generated by nanopb-0.3.7-dev */
 
diff --git a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
index e36d096..725aa7e 100644
--- a/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
+++ b/src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h
@@ -1,35 +1,3 @@
-/*
- *
- * Copyright 2016, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
 /* Automatically generated nanopb header */
 /* Generated by nanopb-0.3.7-dev */
 
diff --git a/src/core/ext/lb_policy/pick_first/pick_first.c b/src/core/ext/lb_policy/pick_first/pick_first.c
index b9cfe6b..821becf 100644
--- a/src/core/ext/lb_policy/pick_first/pick_first.c
+++ b/src/core/ext/lb_policy/pick_first/pick_first.c
@@ -120,7 +120,7 @@
   while (pp != NULL) {
     pending_pick *next = pp->next;
     *pp->target = NULL;
-    grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
     gpr_free(pp);
     pp = next;
   }
@@ -138,9 +138,9 @@
     pending_pick *next = pp->next;
     if (pp->target == target) {
       *target = NULL;
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, pp->on_complete,
-          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
       gpr_free(pp);
     } else {
       pp->next = p->pending_picks;
@@ -165,9 +165,9 @@
     pending_pick *next = pp->next;
     if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, pp->on_complete,
-          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1), NULL);
+          GRPC_ERROR_CREATE_REFERENCING("Pick Cancelled", &error, 1));
       gpr_free(pp);
     } else {
       pp->next = p->pending_picks;
@@ -306,14 +306,15 @@
         /* drop the pick list: we are connected now */
         GRPC_LB_POLICY_WEAK_REF(&p->base, "destroy_subchannels");
         gpr_atm_rel_store(&p->selected, (gpr_atm)selected);
-        grpc_exec_ctx_sched(exec_ctx,
-                            grpc_closure_create(destroy_subchannels, p),
-                            GRPC_ERROR_NONE, NULL);
+        grpc_closure_sched(exec_ctx,
+                           grpc_closure_create(destroy_subchannels, p,
+                                               grpc_schedule_on_exec_ctx),
+                           GRPC_ERROR_NONE);
         /* update any calls that were waiting for a pick */
         while ((pp = p->pending_picks)) {
           p->pending_picks = pp->next;
           *pp->target = GRPC_CONNECTED_SUBCHANNEL_REF(selected, "picked");
-          grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+          grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
           gpr_free(pp);
         }
         grpc_connected_subchannel_notify_on_state_change(
@@ -366,8 +367,7 @@
           while ((pp = p->pending_picks)) {
             p->pending_picks = pp->next;
             *pp->target = NULL;
-            grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE,
-                                NULL);
+            grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
             gpr_free(pp);
           }
           GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &p->base,
@@ -419,8 +419,7 @@
   if (selected) {
     grpc_connected_subchannel_ping(exec_ctx, selected, closure);
   } else {
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("Not connected"));
   }
 }
 
@@ -485,7 +484,8 @@
   p->num_subchannels = subchannel_idx;
 
   grpc_lb_policy_init(&p->base, &pick_first_lb_policy_vtable);
-  grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p);
+  grpc_closure_init(&p->connectivity_changed, pf_connectivity_changed, p,
+                    grpc_schedule_on_exec_ctx);
   gpr_mu_init(&p->mu);
   return &p->base;
 }
diff --git a/src/core/ext/lb_policy/round_robin/round_robin.c b/src/core/ext/lb_policy/round_robin/round_robin.c
index f030547..47f20a1 100644
--- a/src/core/ext/lb_policy/round_robin/round_robin.c
+++ b/src/core/ext/lb_policy/round_robin/round_robin.c
@@ -321,8 +321,8 @@
   while ((pp = p->pending_picks)) {
     p->pending_picks = pp->next;
     *pp->target = NULL;
-    grpc_exec_ctx_sched(exec_ctx, pp->on_complete,
-                        GRPC_ERROR_CREATE("Channel Shutdown"), NULL);
+    grpc_closure_sched(exec_ctx, pp->on_complete,
+                       GRPC_ERROR_CREATE("Channel Shutdown"));
     gpr_free(pp);
   }
   grpc_connectivity_state_set(
@@ -348,9 +348,9 @@
     pending_pick *next = pp->next;
     if (pp->target == target) {
       *target = NULL;
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, pp->on_complete,
-          GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
+          GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
       gpr_free(pp);
     } else {
       pp->next = p->pending_picks;
@@ -376,9 +376,9 @@
     if ((pp->initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
       *pp->target = NULL;
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, pp->on_complete,
-          GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1), NULL);
+          GRPC_ERROR_CREATE_REFERENCING("Pick cancelled", &error, 1));
       gpr_free(pp);
     } else {
       pp->next = p->pending_picks;
@@ -581,7 +581,7 @@
                   "[RR CONN CHANGED] TARGET <-- SUBCHANNEL %p (NODE %p)",
                   (void *)selected->subchannel, (void *)selected);
         }
-        grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+        grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
         gpr_free(pp);
       }
       update_lb_connectivity_status(exec_ctx, sd, error);
@@ -634,7 +634,7 @@
         while ((pp = p->pending_picks)) {
           p->pending_picks = pp->next;
           *pp->target = NULL;
-          grpc_exec_ctx_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE, NULL);
+          grpc_closure_sched(exec_ctx, pp->on_complete, GRPC_ERROR_NONE);
           gpr_free(pp);
         }
       }
@@ -684,8 +684,8 @@
     GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, target, "rr_picked");
   } else {
     gpr_mu_unlock(&p->mu);
-    grpc_exec_ctx_sched(exec_ctx, closure,
-                        GRPC_ERROR_CREATE("Round Robin not connected"), NULL);
+    grpc_closure_sched(exec_ctx, closure,
+                       GRPC_ERROR_CREATE("Round Robin not connected"));
   }
 }
 
@@ -749,7 +749,7 @@
       }
       ++subchannel_idx;
       grpc_closure_init(&sd->connectivity_changed_closure,
-                        rr_connectivity_changed, sd);
+                        rr_connectivity_changed, sd, grpc_schedule_on_exec_ctx);
     }
   }
   if (subchannel_idx == 0) {
diff --git a/src/core/ext/load_reporting/load_reporting_filter.c b/src/core/ext/load_reporting/load_reporting_filter.c
index 18bb826..1403eb0 100644
--- a/src/core/ext/load_reporting/load_reporting_filter.c
+++ b/src/core/ext/load_reporting/load_reporting_filter.c
@@ -114,7 +114,8 @@
   memset(calld, 0, sizeof(call_data));
 
   calld->id = (intptr_t)args->call_stack;
-  grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem);
+  grpc_closure_init(&calld->on_initial_md_ready, on_initial_md_ready, elem,
+                    grpc_schedule_on_exec_ctx);
 
   /* TODO(dgq): do something with the data
   channel_data *chand = elem->channel_data;
diff --git a/src/core/ext/resolver/dns/native/dns_resolver.c b/src/core/ext/resolver/dns/native/dns_resolver.c
index 2675fa9..efbb2be 100644
--- a/src/core/ext/resolver/dns/native/dns_resolver.c
+++ b/src/core/ext/resolver/dns/native/dns_resolver.c
@@ -112,8 +112,8 @@
   }
   if (r->next_completion != NULL) {
     *r->target_result = NULL;
-    grpc_exec_ctx_sched(exec_ctx, r->next_completion,
-                        GRPC_ERROR_CREATE("Resolver Shutdown"), NULL);
+    grpc_closure_sched(exec_ctx, r->next_completion,
+                       GRPC_ERROR_CREATE("Resolver Shutdown"));
     r->next_completion = NULL;
   }
   gpr_mu_unlock(&r->mu);
@@ -219,9 +219,10 @@
   GPR_ASSERT(!r->resolving);
   r->resolving = true;
   r->addresses = NULL;
-  grpc_resolve_address(exec_ctx, r->name_to_resolve, r->default_port,
-                       r->interested_parties,
-                       grpc_closure_create(dns_on_resolved, r), &r->addresses);
+  grpc_resolve_address(
+      exec_ctx, r->name_to_resolve, r->default_port, r->interested_parties,
+      grpc_closure_create(dns_on_resolved, r, grpc_schedule_on_exec_ctx),
+      &r->addresses);
 }
 
 static void dns_maybe_finish_next_locked(grpc_exec_ctx *exec_ctx,
@@ -231,7 +232,7 @@
     *r->target_result = r->resolved_result == NULL
                             ? NULL
                             : grpc_channel_args_copy(r->resolved_result);
-    grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = NULL;
     r->published_version = r->resolved_version;
   }
diff --git a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
index 88808c6..55ea502 100644
--- a/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
+++ b/src/core/ext/resolver/sockaddr/sockaddr_resolver.c
@@ -89,7 +89,7 @@
   gpr_mu_lock(&r->mu);
   if (r->next_completion != NULL) {
     *r->target_result = NULL;
-    grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = NULL;
   }
   gpr_mu_unlock(&r->mu);
@@ -123,7 +123,7 @@
     grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
     *r->target_result =
         grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
-    grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = NULL;
   }
 }
diff --git a/src/core/ext/transport/chttp2/client/chttp2_connector.c b/src/core/ext/transport/chttp2/client/chttp2_connector.c
index dfa0808..be0d426 100644
--- a/src/core/ext/transport/chttp2/client/chttp2_connector.c
+++ b/src/core/ext/transport/chttp2/client/chttp2_connector.c
@@ -140,7 +140,7 @@
   }
   grpc_closure *notify = c->notify;
   c->notify = NULL;
-  grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+  grpc_closure_sched(exec_ctx, notify, error);
   grpc_handshake_manager_destroy(exec_ctx, c->handshake_mgr);
   c->handshake_mgr = NULL;
   gpr_mu_unlock(&c->mu);
@@ -179,7 +179,7 @@
     memset(c->result, 0, sizeof(*c->result));
     grpc_closure *notify = c->notify;
     c->notify = NULL;
-    grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+    grpc_closure_sched(exec_ctx, notify, error);
     gpr_mu_unlock(&c->mu);
     chttp2_connector_unref(exec_ctx, arg);
   } else {
@@ -202,7 +202,7 @@
     memset(c->result, 0, sizeof(*c->result));
     grpc_closure *notify = c->notify;
     c->notify = NULL;
-    grpc_exec_ctx_sched(exec_ctx, notify, error, NULL);
+    grpc_closure_sched(exec_ctx, notify, error);
     if (c->endpoint != NULL) grpc_endpoint_shutdown(exec_ctx, c->endpoint);
     gpr_mu_unlock(&c->mu);
     chttp2_connector_unref(exec_ctx, arg);
@@ -210,7 +210,7 @@
     GPR_ASSERT(c->endpoint != NULL);
     if (!GRPC_SLICE_IS_EMPTY(c->args.initial_connect_string)) {
       grpc_closure_init(&c->initial_string_sent, on_initial_connect_string_sent,
-                        c);
+                        c, grpc_schedule_on_exec_ctx);
       grpc_slice_buffer_init(&c->initial_string_buffer);
       grpc_slice_buffer_add(&c->initial_string_buffer,
                             c->args.initial_connect_string);
@@ -236,7 +236,7 @@
   c->result = result;
   GPR_ASSERT(c->endpoint == NULL);
   chttp2_connector_ref(con);  // Ref taken for callback.
-  grpc_closure_init(&c->connected, connected, c);
+  grpc_closure_init(&c->connected, connected, c, grpc_schedule_on_exec_ctx);
   GPR_ASSERT(!c->connecting);
   c->connecting = true;
   grpc_tcp_client_connect(exec_ctx, &c->connected, &c->endpoint,
diff --git a/src/core/ext/transport/chttp2/server/chttp2_server.c b/src/core/ext/transport/chttp2/server/chttp2_server.c
index f085771..9af62d3 100644
--- a/src/core/ext/transport/chttp2/server/chttp2_server.c
+++ b/src/core/ext/transport/chttp2/server/chttp2_server.c
@@ -281,7 +281,8 @@
   state = gpr_malloc(sizeof(*state));
   memset(state, 0, sizeof(*state));
   grpc_closure_init(&state->tcp_server_shutdown_complete,
-                    tcp_server_shutdown_complete, state);
+                    tcp_server_shutdown_complete, state,
+                    grpc_schedule_on_exec_ctx);
   err = grpc_tcp_server_create(exec_ctx, &state->tcp_server_shutdown_complete,
                                args, &tcp_server);
   if (err != GRPC_ERROR_NONE) {
diff --git a/src/core/ext/transport/chttp2/transport/chttp2_transport.c b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
index 6bc0548..488c3b9 100644
--- a/src/core/ext/transport/chttp2/transport/chttp2_transport.c
+++ b/src/core/ext/transport/chttp2/transport/chttp2_transport.c
@@ -73,20 +73,14 @@
 static void write_action_begin_locked(grpc_exec_ctx *exec_ctx, void *t,
                                       grpc_error *error);
 static void write_action(grpc_exec_ctx *exec_ctx, void *t, grpc_error *error);
-static void write_action_end(grpc_exec_ctx *exec_ctx, void *t,
-                             grpc_error *error);
 static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *t,
                                     grpc_error *error);
 
-static void read_action_begin(grpc_exec_ctx *exec_ctx, void *t,
-                              grpc_error *error);
 static void read_action_locked(grpc_exec_ctx *exec_ctx, void *t,
                                grpc_error *error);
 
 static void complete_fetch_locked(grpc_exec_ctx *exec_ctx, void *gs,
                                   grpc_error *error);
-static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
-                           grpc_error *error);
 /** Set a transport level setting, and push it to our peer */
 static void push_setting(grpc_exec_ctx *exec_ctx, grpc_chttp2_transport *t,
                          grpc_chttp2_setting_id id, uint32_t value);
@@ -112,12 +106,8 @@
                                                 void *byte_stream,
                                                 grpc_error *error_ignored);
 
-static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
-                             grpc_error *error);
 static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
                                     grpc_error *error);
-static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *t,
-                                  grpc_error *error);
 static void destructive_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *t,
                                          grpc_error *error);
 
@@ -166,8 +156,8 @@
      and maybe they hold resources that need to be freed */
   while (t->pings.next != &t->pings) {
     grpc_chttp2_outstanding_ping *ping = t->pings.next;
-    grpc_exec_ctx_sched(exec_ctx, ping->on_recv,
-                        GRPC_ERROR_CREATE("Transport closed"), NULL);
+    grpc_closure_sched(exec_ctx, ping->on_recv,
+                       GRPC_ERROR_CREATE("Transport closed"));
     ping->next->prev = ping->prev;
     ping->prev->next = ping->next;
     gpr_free(ping);
@@ -246,18 +236,15 @@
   grpc_slice_buffer_init(&t->outbuf);
   grpc_chttp2_hpack_compressor_init(&t->hpack_compressor);
 
-  grpc_closure_init(&t->write_action_begin_locked, write_action_begin_locked,
-                    t);
-  grpc_closure_init(&t->write_action, write_action, t);
-  grpc_closure_init(&t->write_action_end, write_action_end, t);
-  grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t);
-  grpc_closure_init(&t->read_action_begin, read_action_begin, t);
-  grpc_closure_init(&t->read_action_locked, read_action_locked, t);
-  grpc_closure_init(&t->benign_reclaimer, benign_reclaimer, t);
-  grpc_closure_init(&t->destructive_reclaimer, destructive_reclaimer, t);
-  grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t);
+  grpc_closure_init(&t->write_action, write_action, t,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&t->read_action_locked, read_action_locked, t,
+                    grpc_combiner_scheduler(t->combiner, false));
+  grpc_closure_init(&t->benign_reclaimer_locked, benign_reclaimer_locked, t,
+                    grpc_combiner_scheduler(t->combiner, false));
   grpc_closure_init(&t->destructive_reclaimer_locked,
-                    destructive_reclaimer_locked, t);
+                    destructive_reclaimer_locked, t,
+                    grpc_combiner_scheduler(t->combiner, false));
 
   grpc_chttp2_goaway_parser_init(&t->goaway_parser);
   grpc_chttp2_hpack_parser_init(&t->hpack_parser);
@@ -395,9 +382,10 @@
 
 static void destroy_transport(grpc_exec_ctx *exec_ctx, grpc_transport *gt) {
   grpc_chttp2_transport *t = (grpc_chttp2_transport *)gt;
-  grpc_combiner_execute(exec_ctx, t->combiner,
-                        grpc_closure_create(destroy_transport_locked, t),
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(exec_ctx, grpc_closure_create(
+                                   destroy_transport_locked, t,
+                                   grpc_combiner_scheduler(t->combiner, false)),
+                     GRPC_ERROR_NONE);
 }
 
 static void close_transport_locked(grpc_exec_ctx *exec_ctx,
@@ -471,8 +459,8 @@
   grpc_chttp2_data_parser_init(&s->data_parser);
   grpc_slice_buffer_init(&s->flow_controlled_buffer);
   s->deadline = gpr_inf_future(GPR_CLOCK_MONOTONIC);
-  grpc_closure_init(&s->complete_fetch, complete_fetch, s);
-  grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s);
+  grpc_closure_init(&s->complete_fetch_locked, complete_fetch_locked, s,
+                    grpc_schedule_on_exec_ctx);
 
   GRPC_CHTTP2_REF_TRANSPORT(t, "stream");
 
@@ -547,9 +535,10 @@
   grpc_chttp2_stream *s = (grpc_chttp2_stream *)gs;
 
   s->destroy_stream_arg = and_free_memory;
-  grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s);
-  grpc_combiner_execute(exec_ctx, t->combiner, &s->destroy_stream,
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(
+      exec_ctx, grpc_closure_init(&s->destroy_stream, destroy_stream_locked, s,
+                                  grpc_combiner_scheduler(t->combiner, false)),
+      GRPC_ERROR_NONE);
   GPR_TIMER_END("destroy_stream", 0);
 }
 
@@ -600,7 +589,7 @@
                                  write_state_name(st), reason));
   t->write_state = st;
   if (st == GRPC_CHTTP2_WRITE_STATE_IDLE) {
-    grpc_exec_ctx_enqueue_list(exec_ctx, &t->run_after_write, NULL);
+    grpc_closure_list_sched(exec_ctx, &t->run_after_write);
     if (t->close_transport_on_writes_finished != NULL) {
       grpc_error *err = t->close_transport_on_writes_finished;
       t->close_transport_on_writes_finished = NULL;
@@ -618,9 +607,12 @@
     case GRPC_CHTTP2_WRITE_STATE_IDLE:
       set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING, reason);
       GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
-      grpc_combiner_execute_finally(exec_ctx, t->combiner,
-                                    &t->write_action_begin_locked,
-                                    GRPC_ERROR_NONE, covered_by_poller);
+      grpc_closure_sched(
+          exec_ctx,
+          grpc_closure_init(
+              &t->write_action_begin_locked, write_action_begin_locked, t,
+              grpc_combiner_finally_scheduler(t->combiner, covered_by_poller)),
+          GRPC_ERROR_NONE);
       break;
     case GRPC_CHTTP2_WRITE_STATE_WRITING:
       set_write_state(
@@ -662,7 +654,7 @@
   if (!t->closed && grpc_chttp2_begin_write(exec_ctx, t)) {
     set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
                     "begin writing");
-    grpc_exec_ctx_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &t->write_action, GRPC_ERROR_NONE);
   } else {
     set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_IDLE,
                     "begin writing nothing");
@@ -674,19 +666,13 @@
 static void write_action(grpc_exec_ctx *exec_ctx, void *gt, grpc_error *error) {
   grpc_chttp2_transport *t = gt;
   GPR_TIMER_BEGIN("write_action", 0);
-  grpc_endpoint_write(exec_ctx, t->ep, &t->outbuf, &t->write_action_end);
+  grpc_endpoint_write(
+      exec_ctx, t->ep, &t->outbuf,
+      grpc_closure_init(&t->write_action_end_locked, write_action_end_locked, t,
+                        grpc_combiner_scheduler(t->combiner, false)));
   GPR_TIMER_END("write_action", 0);
 }
 
-static void write_action_end(grpc_exec_ctx *exec_ctx, void *gt,
-                             grpc_error *error) {
-  grpc_chttp2_transport *t = gt;
-  GPR_TIMER_BEGIN("write_action_end", 0);
-  grpc_combiner_execute(exec_ctx, t->combiner, &t->write_action_end_locked,
-                        GRPC_ERROR_REF(error), false);
-  GPR_TIMER_END("write_action_end", 0);
-}
-
 static void write_action_end_locked(grpc_exec_ctx *exec_ctx, void *tp,
                                     grpc_error *error) {
   GPR_TIMER_BEGIN("terminate_writing_with_lock", 0);
@@ -716,18 +702,24 @@
       set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
                       "continue writing [!covered]");
       GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
-      grpc_combiner_execute_finally(exec_ctx, t->combiner,
-                                    &t->write_action_begin_locked,
-                                    GRPC_ERROR_NONE, false);
+      grpc_closure_run(
+          exec_ctx,
+          grpc_closure_init(
+              &t->write_action_begin_locked, write_action_begin_locked, t,
+              grpc_combiner_finally_scheduler(t->combiner, false)),
+          GRPC_ERROR_NONE);
       break;
     case GRPC_CHTTP2_WRITE_STATE_WRITING_WITH_MORE_AND_COVERED_BY_POLLER:
       GPR_TIMER_MARK("state=writing_stale_with_poller", 0);
       set_write_state(exec_ctx, t, GRPC_CHTTP2_WRITE_STATE_WRITING,
                       "continue writing [covered]");
       GRPC_CHTTP2_REF_TRANSPORT(t, "writing");
-      grpc_combiner_execute_finally(exec_ctx, t->combiner,
-                                    &t->write_action_begin_locked,
-                                    GRPC_ERROR_NONE, true);
+      grpc_closure_run(
+          exec_ctx,
+          grpc_closure_init(&t->write_action_begin_locked,
+                            write_action_begin_locked, t,
+                            grpc_combiner_finally_scheduler(t->combiner, true)),
+          GRPC_ERROR_NONE);
       break;
   }
 
@@ -965,15 +957,6 @@
   }
 }
 
-static void complete_fetch(grpc_exec_ctx *exec_ctx, void *gs,
-                           grpc_error *error) {
-  grpc_chttp2_stream *s = gs;
-  grpc_chttp2_transport *t = s->t;
-  grpc_combiner_execute(exec_ctx, t->combiner, &s->complete_fetch_locked,
-                        GRPC_ERROR_REF(error),
-                        s->complete_fetch_covered_by_poller);
-}
-
 static void do_nothing(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {}
 
 static void log_metadata(const grpc_metadata_batch *md_batch, uint32_t id,
@@ -1009,7 +992,8 @@
 
   grpc_closure *on_complete = op->on_complete;
   if (on_complete == NULL) {
-    on_complete = grpc_closure_create(do_nothing, NULL);
+    on_complete =
+        grpc_closure_create(do_nothing, NULL, grpc_schedule_on_exec_ctx);
   }
 
   /* use final_data as a barrier until enqueue time; the inital counter is
@@ -1212,13 +1196,15 @@
     gpr_free(str);
   }
 
-  grpc_closure_init(&op->transport_private.closure, perform_stream_op_locked,
-                    op);
   op->transport_private.args[0] = gt;
   op->transport_private.args[1] = gs;
   GRPC_CHTTP2_STREAM_REF(s, "perform_stream_op");
-  grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
-                        GRPC_ERROR_NONE, op->covered_by_poller);
+  grpc_closure_sched(
+      exec_ctx,
+      grpc_closure_init(
+          &op->transport_private.closure, perform_stream_op_locked, op,
+          grpc_combiner_scheduler(t->combiner, op->covered_by_poller)),
+      GRPC_ERROR_NONE);
   GPR_TIMER_END("perform_stream_op", 0);
 }
 
@@ -1247,7 +1233,7 @@
   grpc_chttp2_outstanding_ping *ping;
   for (ping = t->pings.next; ping != &t->pings; ping = ping->next) {
     if (0 == memcmp(opaque_8bytes, ping->id, 8)) {
-      grpc_exec_ctx_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE, NULL);
+      grpc_closure_sched(exec_ctx, ping->on_recv, GRPC_ERROR_NONE);
       ping->next->prev = ping->prev;
       ping->prev->next = ping->next;
       gpr_free(ping);
@@ -1321,11 +1307,12 @@
   char *msg = grpc_transport_op_string(op);
   gpr_free(msg);
   op->transport_private.args[0] = gt;
-  grpc_closure_init(&op->transport_private.closure, perform_transport_op_locked,
-                    op);
   GRPC_CHTTP2_REF_TRANSPORT(t, "transport_op");
-  grpc_combiner_execute(exec_ctx, t->combiner, &op->transport_private.closure,
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(
+      exec_ctx, grpc_closure_init(&op->transport_private.closure,
+                                  perform_transport_op_locked, op,
+                                  grpc_combiner_scheduler(t->combiner, false)),
+      GRPC_ERROR_NONE);
 }
 
 /*******************************************************************************
@@ -1801,19 +1788,6 @@
  * INPUT PROCESSING - PARSING
  */
 
-static void read_action_begin(grpc_exec_ctx *exec_ctx, void *tp,
-                              grpc_error *error) {
-  /* Control flow:
-     reading_action_locked ->
-       (parse_unlocked -> post_parse_locked)? ->
-       post_reading_action_locked */
-  GPR_TIMER_BEGIN("reading_action", 0);
-  grpc_chttp2_transport *t = tp;
-  grpc_combiner_execute(exec_ctx, t->combiner, &t->read_action_locked,
-                        GRPC_ERROR_REF(error), false);
-  GPR_TIMER_END("reading_action", 0);
-}
-
 static grpc_error *try_http_parsing(grpc_exec_ctx *exec_ctx,
                                     grpc_chttp2_transport *t) {
   grpc_http_parser parser;
@@ -1913,7 +1887,8 @@
   grpc_slice_buffer_reset_and_unref(&t->read_buffer);
 
   if (keep_reading) {
-    grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer, &t->read_action_begin);
+    grpc_endpoint_read(exec_ctx, t->ep, &t->read_buffer,
+                       &t->read_action_locked);
     GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "keep_reading");
   } else {
     GRPC_CHTTP2_UNREF_TRANSPORT(exec_ctx, t, "reading_action");
@@ -2050,10 +2025,12 @@
   bs->next_action.slice = slice;
   bs->next_action.max_size_hint = max_size_hint;
   bs->next_action.on_complete = on_complete;
-  grpc_closure_init(&bs->next_action.closure, incoming_byte_stream_next_locked,
-                    bs);
-  grpc_combiner_execute(exec_ctx, bs->transport->combiner,
-                        &bs->next_action.closure, GRPC_ERROR_NONE, false);
+  grpc_closure_sched(
+      exec_ctx,
+      grpc_closure_init(
+          &bs->next_action.closure, incoming_byte_stream_next_locked, bs,
+          grpc_combiner_scheduler(bs->transport->combiner, false)),
+      GRPC_ERROR_NONE);
   GPR_TIMER_END("incoming_byte_stream_next", 0);
   return 0;
 }
@@ -2075,10 +2052,12 @@
   GPR_TIMER_BEGIN("incoming_byte_stream_destroy", 0);
   grpc_chttp2_incoming_byte_stream *bs =
       (grpc_chttp2_incoming_byte_stream *)byte_stream;
-  grpc_closure_init(&bs->destroy_action, incoming_byte_stream_destroy_locked,
-                    bs);
-  grpc_combiner_execute(exec_ctx, bs->transport->combiner, &bs->destroy_action,
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(
+      exec_ctx,
+      grpc_closure_init(
+          &bs->destroy_action, incoming_byte_stream_destroy_locked, bs,
+          grpc_combiner_scheduler(bs->transport->combiner, false)),
+      GRPC_ERROR_NONE);
   GPR_TIMER_END("incoming_byte_stream_destroy", 0);
 }
 
@@ -2086,7 +2065,7 @@
     grpc_exec_ctx *exec_ctx, grpc_chttp2_incoming_byte_stream *bs,
     grpc_error *error) {
   GPR_ASSERT(error != GRPC_ERROR_NONE);
-  grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error), NULL);
+  grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_REF(error));
   bs->on_next = NULL;
   GRPC_ERROR_UNREF(bs->error);
   bs->error = error;
@@ -2103,7 +2082,7 @@
     bs->remaining_bytes -= (uint32_t)GRPC_SLICE_LENGTH(slice);
     if (bs->on_next != NULL) {
       *bs->next = slice;
-      grpc_exec_ctx_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE, NULL);
+      grpc_closure_sched(exec_ctx, bs->on_next, GRPC_ERROR_NONE);
       bs->on_next = NULL;
     } else {
       grpc_slice_buffer_add(&bs->slices, slice);
@@ -2171,7 +2150,7 @@
     GRPC_CHTTP2_REF_TRANSPORT(t, "benign_reclaimer");
     grpc_resource_user_post_reclaimer(exec_ctx,
                                       grpc_endpoint_get_resource_user(t->ep),
-                                      false, &t->benign_reclaimer);
+                                      false, &t->benign_reclaimer_locked);
   }
 }
 
@@ -2182,24 +2161,10 @@
     GRPC_CHTTP2_REF_TRANSPORT(t, "destructive_reclaimer");
     grpc_resource_user_post_reclaimer(exec_ctx,
                                       grpc_endpoint_get_resource_user(t->ep),
-                                      true, &t->destructive_reclaimer);
+                                      true, &t->destructive_reclaimer_locked);
   }
 }
 
-static void benign_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
-                             grpc_error *error) {
-  grpc_chttp2_transport *t = arg;
-  grpc_combiner_execute(exec_ctx, t->combiner, &t->benign_reclaimer_locked,
-                        GRPC_ERROR_REF(error), false);
-}
-
-static void destructive_reclaimer(grpc_exec_ctx *exec_ctx, void *arg,
-                                  grpc_error *error) {
-  grpc_chttp2_transport *t = arg;
-  grpc_combiner_execute(exec_ctx, t->combiner, &t->destructive_reclaimer_locked,
-                        GRPC_ERROR_REF(error), false);
-}
-
 static void benign_reclaimer_locked(grpc_exec_ctx *exec_ctx, void *arg,
                                     grpc_error *error) {
   grpc_chttp2_transport *t = arg;
@@ -2380,5 +2345,5 @@
     grpc_slice_buffer_move_into(read_buffer, &t->read_buffer);
     gpr_free(read_buffer);
   }
-  read_action_begin(exec_ctx, t, GRPC_ERROR_NONE);
+  grpc_closure_sched(exec_ctx, &t->read_action_locked, GRPC_ERROR_NONE);
 }
diff --git a/src/core/ext/transport/chttp2/transport/hpack_parser.c b/src/core/ext/transport/chttp2/transport/hpack_parser.c
index 6a9200b..64ce07b 100644
--- a/src/core/ext/transport/chttp2/transport/hpack_parser.c
+++ b/src/core/ext/transport/chttp2/transport/hpack_parser.c
@@ -1634,10 +1634,11 @@
              however -- it might be that we receive a RST_STREAM following this
              and can avoid the extra write */
           GRPC_CHTTP2_STREAM_REF(s, "final_rst");
-          grpc_combiner_execute_finally(
-              exec_ctx, t->combiner,
-              grpc_closure_create(force_client_rst_stream, s), GRPC_ERROR_NONE,
-              false);
+          grpc_closure_sched(
+              exec_ctx, grpc_closure_create(force_client_rst_stream, s,
+                                            grpc_combiner_finally_scheduler(
+                                                t->combiner, false)),
+              GRPC_ERROR_NONE);
         }
         grpc_chttp2_mark_stream_closed(exec_ctx, t, s, true, false,
                                        GRPC_ERROR_NONE);
diff --git a/src/core/ext/transport/chttp2/transport/internal.h b/src/core/ext/transport/chttp2/transport/internal.h
index b727965..a52acba 100644
--- a/src/core/ext/transport/chttp2/transport/internal.h
+++ b/src/core/ext/transport/chttp2/transport/internal.h
@@ -212,10 +212,8 @@
 
   grpc_closure write_action_begin_locked;
   grpc_closure write_action;
-  grpc_closure write_action_end;
   grpc_closure write_action_end_locked;
 
-  grpc_closure read_action_begin;
   grpc_closure read_action_locked;
 
   /** incoming read bytes */
@@ -336,10 +334,8 @@
   /** have we scheduled a destructive cleanup? */
   bool destructive_reclaimer_registered;
   /** benign cleanup closure */
-  grpc_closure benign_reclaimer;
   grpc_closure benign_reclaimer_locked;
   /** destructive cleanup closure */
-  grpc_closure destructive_reclaimer;
   grpc_closure destructive_reclaimer_locked;
 };
 
diff --git a/src/core/ext/transport/cronet/transport/cronet_transport.c b/src/core/ext/transport/cronet/transport/cronet_transport.c
index afc59f4..df0a769 100644
--- a/src/core/ext/transport/cronet/transport/cronet_transport.c
+++ b/src/core/ext/transport/cronet/transport/cronet_transport.c
@@ -849,17 +849,17 @@
                            OP_RECV_INITIAL_METADATA)) {
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_RECV_INITIAL_METADATA", oas);
     if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
-      grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
-                          GRPC_ERROR_CANCELLED, NULL);
+      grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+                         GRPC_ERROR_CANCELLED);
     } else if (stream_state->state_callback_received[OP_FAILED]) {
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, stream_op->recv_initial_metadata_ready,
-          make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+          make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
     } else {
       grpc_chttp2_incoming_metadata_buffer_publish(
           &oas->s->state.rs.initial_metadata, stream_op->recv_initial_metadata);
-      grpc_exec_ctx_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
-                          GRPC_ERROR_NONE, NULL);
+      grpc_closure_sched(exec_ctx, stream_op->recv_initial_metadata_ready,
+                         GRPC_ERROR_NONE);
     }
     stream_state->state_op_done[OP_RECV_INITIAL_METADATA] = true;
     result = ACTION_TAKEN_NO_CALLBACK;
@@ -910,22 +910,22 @@
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_RECV_MESSAGE", oas);
     if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
       CRONET_LOG(GPR_DEBUG, "Stream is cancelled.");
-      grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
-                          GRPC_ERROR_CANCELLED, NULL);
+      grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+                         GRPC_ERROR_CANCELLED);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       result = ACTION_TAKEN_NO_CALLBACK;
     } else if (stream_state->state_callback_received[OP_FAILED]) {
       CRONET_LOG(GPR_DEBUG, "Stream failed.");
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, stream_op->recv_message_ready,
-          make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+          make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       result = ACTION_TAKEN_NO_CALLBACK;
     } else if (stream_state->rs.read_stream_closed == true) {
       /* No more data will be received */
       CRONET_LOG(GPR_DEBUG, "read stream closed");
-      grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
-                          GRPC_ERROR_NONE, NULL);
+      grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+                         GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
       result = ACTION_TAKEN_NO_CALLBACK;
@@ -958,8 +958,8 @@
                                         &stream_state->rs.read_slice_buffer, 0);
           *((grpc_byte_buffer **)stream_op->recv_message) =
               (grpc_byte_buffer *)&stream_state->rs.sbs;
-          grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
-                              GRPC_ERROR_NONE, NULL);
+          grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+                             GRPC_ERROR_NONE);
           stream_state->state_op_done[OP_RECV_MESSAGE] = true;
           oas->state.state_op_done[OP_RECV_MESSAGE] = true;
           result = ACTION_TAKEN_NO_CALLBACK;
@@ -993,8 +993,8 @@
                                     &stream_state->rs.read_slice_buffer, 0);
       *((grpc_byte_buffer **)stream_op->recv_message) =
           (grpc_byte_buffer *)&stream_state->rs.sbs;
-      grpc_exec_ctx_sched(exec_ctx, stream_op->recv_message_ready,
-                          GRPC_ERROR_NONE, NULL);
+      grpc_closure_sched(exec_ctx, stream_op->recv_message_ready,
+                         GRPC_ERROR_NONE);
       stream_state->state_op_done[OP_RECV_MESSAGE] = true;
       oas->state.state_op_done[OP_RECV_MESSAGE] = true;
       /* Do an extra read to trigger on_succeeded() callback in case connection
@@ -1055,18 +1055,17 @@
                            OP_ON_COMPLETE)) {
     CRONET_LOG(GPR_DEBUG, "running: %p  OP_ON_COMPLETE", oas);
     if (stream_state->state_op_done[OP_CANCEL_ERROR]) {
-      grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete,
-                          GRPC_ERROR_REF(stream_state->cancel_error), NULL);
+      grpc_closure_sched(exec_ctx, stream_op->on_complete,
+                         GRPC_ERROR_REF(stream_state->cancel_error));
     } else if (stream_state->state_callback_received[OP_FAILED]) {
-      grpc_exec_ctx_sched(
+      grpc_closure_sched(
           exec_ctx, stream_op->on_complete,
-          make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."), NULL);
+          make_error_with_desc(GRPC_STATUS_UNAVAILABLE, "Unavailable."));
     } else {
       /* All actions in this stream_op are complete. Call the on_complete
        * callback
        */
-      grpc_exec_ctx_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE,
-                          NULL);
+      grpc_closure_sched(exec_ctx, stream_op->on_complete, GRPC_ERROR_NONE);
     }
     oas->state.state_op_done[OP_ON_COMPLETE] = true;
     oas->done = true;
diff --git a/src/core/lib/channel/channel_stack.c b/src/core/lib/channel/channel_stack.c
index 1d0b7d4..cddd84f 100644
--- a/src/core/lib/channel/channel_stack.c
+++ b/src/core/lib/channel/channel_stack.c
@@ -297,7 +297,8 @@
   grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
   memset(op, 0, sizeof(*op));
   op->cancel_error = GRPC_ERROR_CANCELLED;
-  op->on_complete = grpc_closure_create(destroy_op, op);
+  op->on_complete =
+      grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
   elem->filter->start_transport_stream_op(exec_ctx, elem, op);
 }
 
@@ -307,7 +308,8 @@
                                                 grpc_slice *optional_message) {
   grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
   memset(op, 0, sizeof(*op));
-  op->on_complete = grpc_closure_create(destroy_op, op);
+  op->on_complete =
+      grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
   grpc_transport_stream_op_add_cancellation_with_message(op, status,
                                                          optional_message);
   elem->filter->start_transport_stream_op(exec_ctx, elem, op);
@@ -319,7 +321,8 @@
                                                grpc_slice *optional_message) {
   grpc_transport_stream_op *op = gpr_malloc(sizeof(*op));
   memset(op, 0, sizeof(*op));
-  op->on_complete = grpc_closure_create(destroy_op, op);
+  op->on_complete =
+      grpc_closure_create(destroy_op, op, grpc_schedule_on_exec_ctx);
   grpc_transport_stream_op_add_close(op, status, optional_message);
   elem->filter->start_transport_stream_op(exec_ctx, elem, op);
 }
diff --git a/src/core/lib/channel/compress_filter.c b/src/core/lib/channel/compress_filter.c
index 0e336dc..35455d4 100644
--- a/src/core/lib/channel/compress_filter.c
+++ b/src/core/lib/channel/compress_filter.c
@@ -269,8 +269,10 @@
   /* initialize members */
   grpc_slice_buffer_init(&calld->slices);
   calld->has_compression_algorithm = 0;
-  grpc_closure_init(&calld->got_slice, got_slice, elem);
-  grpc_closure_init(&calld->send_done, send_done, elem);
+  grpc_closure_init(&calld->got_slice, got_slice, elem,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&calld->send_done, send_done, elem,
+                    grpc_schedule_on_exec_ctx);
 
   return GRPC_ERROR_NONE;
 }
diff --git a/src/core/lib/channel/deadline_filter.c b/src/core/lib/channel/deadline_filter.c
index 470ccfe..902ebf1 100644
--- a/src/core/lib/channel/deadline_filter.c
+++ b/src/core/lib/channel/deadline_filter.c
@@ -123,7 +123,8 @@
 static void inject_on_complete_cb(grpc_deadline_state* deadline_state,
                                   grpc_transport_stream_op* op) {
   deadline_state->next_on_complete = op->on_complete;
-  grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state);
+  grpc_closure_init(&deadline_state->on_complete, on_complete, deadline_state,
+                    grpc_schedule_on_exec_ctx);
   op->on_complete = &deadline_state->on_complete;
 }
 
@@ -172,8 +173,9 @@
     struct start_timer_after_init_state* state = gpr_malloc(sizeof(*state));
     state->elem = elem;
     state->deadline = deadline;
-    grpc_closure_init(&state->closure, start_timer_after_init, state);
-    grpc_exec_ctx_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_init(&state->closure, start_timer_after_init, state,
+                      grpc_schedule_on_exec_ctx);
+    grpc_closure_sched(exec_ctx, &state->closure, GRPC_ERROR_NONE);
   }
 }
 
@@ -290,7 +292,8 @@
       calld->next_recv_initial_metadata_ready = op->recv_initial_metadata_ready;
       calld->recv_initial_metadata = op->recv_initial_metadata;
       grpc_closure_init(&calld->recv_initial_metadata_ready,
-                        recv_initial_metadata_ready, elem);
+                        recv_initial_metadata_ready, elem,
+                        grpc_schedule_on_exec_ctx);
       op->recv_initial_metadata_ready = &calld->recv_initial_metadata_ready;
     }
     // Make sure we know when the call is complete, so that we can cancel
diff --git a/src/core/lib/channel/handshaker.c b/src/core/lib/channel/handshaker.c
index 23edc82..ff82752 100644
--- a/src/core/lib/channel/handshaker.c
+++ b/src/core/lib/channel/handshaker.c
@@ -165,7 +165,7 @@
     // Cancel deadline timer, since we're invoking the on_handshake_done
     // callback now.
     grpc_timer_cancel(exec_ctx, &mgr->deadline_timer);
-    grpc_exec_ctx_sched(exec_ctx, &mgr->on_handshake_done, error, NULL);
+    grpc_closure_sched(exec_ctx, &mgr->on_handshake_done, error);
     mgr->shutdown = true;
   } else {
     grpc_handshaker_do_handshake(exec_ctx, mgr->handshakers[mgr->index],
@@ -218,8 +218,10 @@
   grpc_slice_buffer_init(mgr->args.read_buffer);
   // Initialize state needed for calling handshakers.
   mgr->acceptor = acceptor;
-  grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr);
-  grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args);
+  grpc_closure_init(&mgr->call_next_handshaker, call_next_handshaker, mgr,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&mgr->on_handshake_done, on_handshake_done, &mgr->args,
+                    grpc_schedule_on_exec_ctx);
   // Start deadline timer, which owns a ref.
   gpr_ref(&mgr->refs);
   grpc_timer_init(exec_ctx, &mgr->deadline_timer,
diff --git a/src/core/lib/channel/http_client_filter.c b/src/core/lib/channel/http_client_filter.c
index 1a2d08d..c37cab3 100644
--- a/src/core/lib/channel/http_client_filter.c
+++ b/src/core/lib/channel/http_client_filter.c
@@ -352,12 +352,17 @@
   calld->send_message_blocked = false;
   grpc_slice_buffer_init(&calld->slices);
   grpc_closure_init(&calld->hc_on_recv_initial_metadata,
-                    hc_on_recv_initial_metadata, elem);
+                    hc_on_recv_initial_metadata, elem,
+                    grpc_schedule_on_exec_ctx);
   grpc_closure_init(&calld->hc_on_recv_trailing_metadata,
-                    hc_on_recv_trailing_metadata, elem);
-  grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem);
-  grpc_closure_init(&calld->got_slice, got_slice, elem);
-  grpc_closure_init(&calld->send_done, send_done, elem);
+                    hc_on_recv_trailing_metadata, elem,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&calld->hc_on_complete, hc_on_complete, elem,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&calld->got_slice, got_slice, elem,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&calld->send_done, send_done, elem,
+                    grpc_schedule_on_exec_ctx);
   return GRPC_ERROR_NONE;
 }
 
diff --git a/src/core/lib/channel/http_server_filter.c b/src/core/lib/channel/http_server_filter.c
index a5134ee..a6d7205 100644
--- a/src/core/lib/channel/http_server_filter.c
+++ b/src/core/lib/channel/http_server_filter.c
@@ -334,9 +334,12 @@
   call_data *calld = elem->call_data;
   /* initialize members */
   memset(calld, 0, sizeof(*calld));
-  grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem);
-  grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem);
-  grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem);
+  grpc_closure_init(&calld->hs_on_recv, hs_on_recv, elem,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&calld->hs_on_complete, hs_on_complete, elem,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&calld->hs_recv_message_ready, hs_recv_message_ready, elem,
+                    grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&calld->read_slice_buffer);
   return GRPC_ERROR_NONE;
 }
diff --git a/src/core/lib/http/httpcli.c b/src/core/lib/http/httpcli.c
index 1035f31..581a74b 100644
--- a/src/core/lib/http/httpcli.c
+++ b/src/core/lib/http/httpcli.c
@@ -103,7 +103,7 @@
                    grpc_error *error) {
   grpc_polling_entity_del_from_pollset_set(exec_ctx, req->pollent,
                                            req->context->pollset_set);
-  grpc_exec_ctx_sched(exec_ctx, req->on_done, error, NULL);
+  grpc_closure_sched(exec_ctx, req->on_done, error);
   grpc_http_parser_destroy(&req->parser);
   if (req->addresses != NULL) {
     grpc_resolved_addresses_destroy(req->addresses);
@@ -224,7 +224,8 @@
     return;
   }
   addr = &req->addresses->addrs[req->next_address++];
-  grpc_closure_init(&req->connected, on_connected, req);
+  grpc_closure_init(&req->connected, on_connected, req,
+                    grpc_schedule_on_exec_ctx);
   grpc_arg arg;
   arg.key = GRPC_ARG_RESOURCE_QUOTA;
   arg.type = GRPC_ARG_POINTER;
@@ -266,8 +267,9 @@
   req->pollent = pollent;
   req->overall_error = GRPC_ERROR_NONE;
   req->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
-  grpc_closure_init(&req->on_read, on_read, req);
-  grpc_closure_init(&req->done_write, done_write, req);
+  grpc_closure_init(&req->on_read, on_read, req, grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&req->done_write, done_write, req,
+                    grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&req->incoming);
   grpc_slice_buffer_init(&req->outgoing);
   grpc_iomgr_register_object(&req->iomgr_obj, name);
@@ -277,9 +279,11 @@
   GPR_ASSERT(pollent);
   grpc_polling_entity_add_to_pollset_set(exec_ctx, req->pollent,
                                          req->context->pollset_set);
-  grpc_resolve_address(exec_ctx, request->host, req->handshaker->default_port,
-                       req->context->pollset_set,
-                       grpc_closure_create(on_resolved, req), &req->addresses);
+  grpc_resolve_address(
+      exec_ctx, request->host, req->handshaker->default_port,
+      req->context->pollset_set,
+      grpc_closure_create(on_resolved, req, grpc_schedule_on_exec_ctx),
+      &req->addresses);
 }
 
 void grpc_httpcli_get(grpc_exec_ctx *exec_ctx, grpc_httpcli_context *context,
diff --git a/src/core/lib/http/httpcli_security_connector.c b/src/core/lib/http/httpcli_security_connector.c
index 14cdb1d..6b197c2 100644
--- a/src/core/lib/http/httpcli_security_connector.c
+++ b/src/core/lib/http/httpcli_security_connector.c
@@ -96,7 +96,7 @@
     error = GRPC_ERROR_CREATE(msg);
     gpr_free(msg);
   }
-  grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+  grpc_closure_sched(exec_ctx, on_peer_checked, error);
   tsi_peer_destruct(&peer);
 }
 
diff --git a/src/core/lib/iomgr/closure.c b/src/core/lib/iomgr/closure.c
index c6ddc76..da0ec87 100644
--- a/src/core/lib/iomgr/closure.c
+++ b/src/core/lib/iomgr/closure.c
@@ -37,10 +37,13 @@
 
 #include "src/core/lib/profiling/timers.h"
 
-void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
-                       void *cb_arg) {
+grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+                                void *cb_arg,
+                                grpc_closure_scheduler *scheduler) {
   closure->cb = cb;
   closure->cb_arg = cb_arg;
+  closure->scheduler = scheduler;
+  return closure;
 }
 
 void grpc_closure_list_init(grpc_closure_list *closure_list) {
@@ -105,11 +108,12 @@
   cb(exec_ctx, cb_arg, error);
 }
 
-grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg) {
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
+                                  grpc_closure_scheduler *scheduler) {
   wrapped_closure *wc = gpr_malloc(sizeof(*wc));
   wc->cb = cb;
   wc->cb_arg = cb_arg;
-  grpc_closure_init(&wc->wrapper, closure_wrapper, wc);
+  grpc_closure_init(&wc->wrapper, closure_wrapper, wc, scheduler);
   return &wc->wrapper;
 }
 
@@ -117,8 +121,30 @@
                       grpc_error *error) {
   GPR_TIMER_BEGIN("grpc_closure_run", 0);
   if (c != NULL) {
-    c->cb(exec_ctx, c->cb_arg, error);
+    c->scheduler->vtable->run(exec_ctx, c, error);
+  } else {
+    GRPC_ERROR_UNREF(error);
   }
-  GRPC_ERROR_UNREF(error);
   GPR_TIMER_END("grpc_closure_run", 0);
 }
+
+void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *c,
+                        grpc_error *error) {
+  GPR_TIMER_BEGIN("grpc_closure_sched", 0);
+  if (c != NULL) {
+    c->scheduler->vtable->sched(exec_ctx, c, error);
+  } else {
+    GRPC_ERROR_UNREF(error);
+  }
+  GPR_TIMER_END("grpc_closure_sched", 0);
+}
+
+void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx, grpc_closure_list *list) {
+  grpc_closure *c = list->head;
+  while (c != NULL) {
+    grpc_closure *next = c->next_data.next;
+    c->scheduler->vtable->sched(exec_ctx, c, c->error_data.error);
+    c = next;
+  }
+  list->head = list->tail = NULL;
+}
diff --git a/src/core/lib/iomgr/closure.h b/src/core/lib/iomgr/closure.h
index 2b4b271..1b5d9b2 100644
--- a/src/core/lib/iomgr/closure.h
+++ b/src/core/lib/iomgr/closure.h
@@ -59,6 +59,22 @@
 typedef void (*grpc_iomgr_cb_func)(grpc_exec_ctx *exec_ctx, void *arg,
                                    grpc_error *error);
 
+typedef struct grpc_closure_scheduler grpc_closure_scheduler;
+
+typedef struct grpc_closure_scheduler_vtable {
+  /* NOTE: for all these functions, closure->scheduler == the scheduler that was
+           used to find this vtable */
+  void (*run)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+              grpc_error *error);
+  void (*sched)(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+                grpc_error *error);
+} grpc_closure_scheduler_vtable;
+
+/** Abstract type that can schedule closures for execution */
+struct grpc_closure_scheduler {
+  const grpc_closure_scheduler_vtable *vtable;
+};
+
 /** A closure over a grpc_iomgr_cb_func. */
 struct grpc_closure {
   /** Once queued, next indicates the next queued closure; before then, scratch
@@ -75,6 +91,10 @@
   /** Arguments to be passed to "cb". */
   void *cb_arg;
 
+  /** Scheduler to schedule against: NULL to schedule against current execution
+      context */
+  grpc_closure_scheduler *scheduler;
+
   /** Once queued, the result of the closure. Before then: scratch space */
   union {
     grpc_error *error;
@@ -82,12 +102,14 @@
   } error_data;
 };
 
-/** Initializes \a closure with \a cb and \a cb_arg. */
-void grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
-                       void *cb_arg);
+/** Initializes \a closure with \a cb and \a cb_arg. Returns \a closure. */
+grpc_closure *grpc_closure_init(grpc_closure *closure, grpc_iomgr_cb_func cb,
+                                void *cb_arg,
+                                grpc_closure_scheduler *scheduler);
 
 /* Create a heap allocated closure: try to avoid except for very rare events */
-grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg);
+grpc_closure *grpc_closure_create(grpc_iomgr_cb_func cb, void *cb_arg,
+                                  grpc_closure_scheduler *scheduler);
 
 #define GRPC_CLOSURE_LIST_INIT \
   { NULL, NULL }
@@ -115,4 +137,13 @@
 void grpc_closure_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
                       grpc_error *error);
 
+/** Schedule a closure to be run. Does not need to be run from a safe point. */
+void grpc_closure_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+                        grpc_error *error);
+
+/** Schedule all closures in a list to be run. Does not need to be run from a
+ * safe point. */
+void grpc_closure_list_sched(grpc_exec_ctx *exec_ctx,
+                             grpc_closure_list *closure_list);
+
 #endif /* GRPC_CORE_LIB_IOMGR_CLOSURE_H */
diff --git a/src/core/lib/iomgr/combiner.c b/src/core/lib/iomgr/combiner.c
index cfc6702..c26a73b 100644
--- a/src/core/lib/iomgr/combiner.c
+++ b/src/core/lib/iomgr/combiner.c
@@ -56,6 +56,10 @@
 struct grpc_combiner {
   grpc_combiner *next_combiner_on_this_exec_ctx;
   grpc_workqueue *optional_workqueue;
+  grpc_closure_scheduler uncovered_scheduler;
+  grpc_closure_scheduler covered_scheduler;
+  grpc_closure_scheduler uncovered_finally_scheduler;
+  grpc_closure_scheduler covered_finally_scheduler;
   gpr_mpscq queue;
   // state is:
   // lower bit - zero if orphaned (STATE_UNORPHANED)
@@ -70,6 +74,26 @@
   grpc_closure offload;
 };
 
+static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
+                                    grpc_closure *closure, grpc_error *error);
+static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
+                                  grpc_closure *closure, grpc_error *error);
+static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
+                                            grpc_closure *closure,
+                                            grpc_error *error);
+static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
+                                          grpc_closure *closure,
+                                          grpc_error *error);
+
+static const grpc_closure_scheduler_vtable scheduler_uncovered = {
+    combiner_exec_uncovered, combiner_exec_uncovered};
+static const grpc_closure_scheduler_vtable scheduler_covered = {
+    combiner_exec_covered, combiner_exec_covered};
+static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
+    combiner_finally_exec_uncovered, combiner_finally_exec_uncovered};
+static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
+    combiner_finally_exec_covered, combiner_finally_exec_covered};
+
 static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
 
 typedef struct {
@@ -102,11 +126,16 @@
   lock->time_to_execute_final_list = false;
   lock->optional_workqueue = optional_workqueue;
   lock->final_list_covered_by_poller = false;
+  lock->uncovered_scheduler.vtable = &scheduler_uncovered;
+  lock->covered_scheduler.vtable = &scheduler_covered;
+  lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
+  lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
   gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
   gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
   gpr_mpscq_init(&lock->queue);
   grpc_closure_list_init(&lock->final_list);
-  grpc_closure_init(&lock->offload, offload, lock);
+  grpc_closure_init(&lock->offload, offload, lock,
+                    grpc_workqueue_scheduler(lock->optional_workqueue));
   GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
   return lock;
 }
@@ -148,9 +177,9 @@
   }
 }
 
-void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
-                           grpc_closure *cl, grpc_error *error,
-                           bool covered_by_poller) {
+static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
+                          grpc_closure *cl, grpc_error *error,
+                          bool covered_by_poller) {
   GPR_TIMER_BEGIN("combiner.execute", 0);
   gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
   GRPC_COMBINER_TRACE(gpr_log(
@@ -171,6 +200,24 @@
   GPR_TIMER_END("combiner.execute", 0);
 }
 
+#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
+  ((grpc_combiner *)(((char *)((closure)->scheduler)) -          \
+                     offsetof(grpc_combiner, scheduler_name)))
+
+static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
+                                    grpc_error *error) {
+  combiner_exec(exec_ctx,
+                COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
+                error, false);
+}
+
+static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
+                                  grpc_error *error) {
+  combiner_exec(exec_ctx,
+                COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
+                error, true);
+}
+
 static void move_next(grpc_exec_ctx *exec_ctx) {
   exec_ctx->active_combiner =
       exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
@@ -188,8 +235,7 @@
   move_next(exec_ctx);
   GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
                               lock->optional_workqueue));
-  grpc_workqueue_enqueue(exec_ctx, lock->optional_workqueue, &lock->offload,
-                         GRPC_ERROR_NONE);
+  grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
 }
 
 bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
@@ -312,23 +358,22 @@
 }
 
 static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
-                            grpc_error *error) {
-  grpc_combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
-                                GRPC_ERROR_REF(error), false);
-}
+                            grpc_error *error);
 
-void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
-                                   grpc_closure *closure, grpc_error *error,
-                                   bool covered_by_poller) {
+static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
+                                     grpc_combiner *lock, grpc_closure *closure,
+                                     grpc_error *error,
+                                     bool covered_by_poller) {
   GRPC_COMBINER_TRACE(gpr_log(
       GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
       closure, exec_ctx->active_combiner, covered_by_poller));
   GPR_TIMER_BEGIN("combiner.execute_finally", 0);
   if (exec_ctx->active_combiner != lock) {
     GPR_TIMER_MARK("slowpath", 0);
-    grpc_combiner_execute(exec_ctx, lock,
-                          grpc_closure_create(enqueue_finally, closure), error,
-                          false);
+    grpc_closure_sched(
+        exec_ctx, grpc_closure_create(enqueue_finally, closure,
+                                      grpc_combiner_scheduler(lock, false)),
+        error);
     GPR_TIMER_END("combiner.execute_finally", 0);
     return;
   }
@@ -342,3 +387,36 @@
   grpc_closure_list_append(&lock->final_list, closure, error);
   GPR_TIMER_END("combiner.execute_finally", 0);
 }
+
+static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
+                            grpc_error *error) {
+  combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
+                           GRPC_ERROR_REF(error), false);
+}
+
+static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
+                                            grpc_closure *cl,
+                                            grpc_error *error) {
+  combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
+                                         cl, uncovered_finally_scheduler),
+                           cl, error, false);
+}
+
+static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
+                                          grpc_closure *cl, grpc_error *error) {
+  combiner_execute_finally(
+      exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
+      cl, error, true);
+}
+
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
+                                                bool covered_by_poller) {
+  return covered_by_poller ? &combiner->covered_scheduler
+                           : &combiner->uncovered_scheduler;
+}
+
+grpc_closure_scheduler *grpc_combiner_finally_scheduler(
+    grpc_combiner *combiner, bool covered_by_poller) {
+  return covered_by_poller ? &combiner->covered_finally_scheduler
+                           : &combiner->uncovered_finally_scheduler;
+}
diff --git a/src/core/lib/iomgr/combiner.h b/src/core/lib/iomgr/combiner.h
index d04eeed..81dff85 100644
--- a/src/core/lib/iomgr/combiner.h
+++ b/src/core/lib/iomgr/combiner.h
@@ -50,14 +50,12 @@
 grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue);
 // Destroy the lock
 void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock);
-// Execute \a action within the lock.
-void grpc_combiner_execute(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
-                           grpc_closure *closure, grpc_error *error,
-                           bool covered_by_poller);
-// Execute \a action within the lock just prior to unlocking.
-void grpc_combiner_execute_finally(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
-                                   grpc_closure *closure, grpc_error *error,
-                                   bool covered_by_poller);
+// Fetch a scheduler to schedule closures against
+grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *lock,
+                                                bool covered_by_poller);
+// Scheduler to execute \a action within the lock just prior to unlocking.
+grpc_closure_scheduler *grpc_combiner_finally_scheduler(grpc_combiner *lock,
+                                                        bool covered_by_poller);
 
 bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx);
 
diff --git a/src/core/lib/iomgr/ev_epoll_linux.c b/src/core/lib/iomgr/ev_epoll_linux.c
index 1b15e0e..045001f 100644
--- a/src/core/lib/iomgr/ev_epoll_linux.c
+++ b/src/core/lib/iomgr/ev_epoll_linux.c
@@ -202,6 +202,8 @@
 
 /* This is also used as grpc_workqueue (by directly casing it) */
 typedef struct polling_island {
+  grpc_closure_scheduler workqueue_scheduler;
+
   gpr_mu mu;
   /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
      the refcount.
@@ -305,6 +307,8 @@
 
 /* Forward declaration */
 static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+                              grpc_error *error);
 
 #ifdef GRPC_TSAN
 /* Currently TSAN may incorrectly flag data races between epoll_ctl and
@@ -317,6 +321,9 @@
 gpr_atm g_epoll_sync;
 #endif /* defined(GRPC_TSAN) */
 
+static const grpc_closure_scheduler_vtable workqueue_scheduler_vtable = {
+    workqueue_enqueue, workqueue_enqueue};
+
 static void pi_add_ref(polling_island *pi);
 static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
 
@@ -529,6 +536,7 @@
   *error = GRPC_ERROR_NONE;
 
   pi = gpr_malloc(sizeof(*pi));
+  pi->workqueue_scheduler.vtable = &workqueue_scheduler_vtable;
   gpr_mu_init(&pi->mu);
   pi->fd_cnt = 0;
   pi->fd_capacity = 0;
@@ -800,10 +808,10 @@
   return q;
 }
 
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
-                              grpc_workqueue *workqueue, grpc_closure *closure,
+static void workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
                               grpc_error *error) {
   GPR_TIMER_BEGIN("workqueue.enqueue", 0);
+  grpc_workqueue *workqueue = (grpc_workqueue *)closure->scheduler;
   /* take a ref to the workqueue: otherwise it can happen that whatever events
    * this kicks off ends up destroying the workqueue before this function
    * completes */
@@ -820,6 +828,12 @@
   GPR_TIMER_END("workqueue.enqueue", 0);
 }
 
+static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
+  polling_island *pi = (polling_island *)workqueue;
+  return workqueue == NULL ? grpc_schedule_on_exec_ctx
+                           : &pi->workqueue_scheduler;
+}
+
 static grpc_error *polling_island_global_init() {
   grpc_error *error = GRPC_ERROR_NONE;
 
@@ -1030,8 +1044,7 @@
     fd->po.pi = NULL;
   }
 
-  grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error),
-                      NULL);
+  grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_REF(error));
 
   gpr_mu_unlock(&fd->po.mu);
   UNREF_BY(fd, 2, reason); /* Drop the reference */
@@ -1057,16 +1070,14 @@
 static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
                              grpc_closure **st, grpc_closure *closure) {
   if (fd->shutdown) {
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
   } else if (*st == CLOSURE_NOT_READY) {
     /* not ready ==> switch to a waiting state by setting the closure */
     *st = closure;
   } else if (*st == CLOSURE_READY) {
     /* already ready ==> queue the closure to run immediately */
     *st = CLOSURE_NOT_READY;
-    grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
-                        NULL);
+    grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown));
   } else {
     /* upcallptr was set to a different closure.  This is an error! */
     gpr_log(GPR_ERROR,
@@ -1088,7 +1099,7 @@
     return 0;
   } else {
     /* waiting ==> queue closure */
-    grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+    grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown));
     *st = CLOSURE_NOT_READY;
     return 1;
   }
@@ -1359,7 +1370,7 @@
 
   /* Release the ref and set pollset->po.pi to NULL */
   pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
-  grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
 }
 
 /* pollset->po.mu lock must be held by the caller before calling this */
@@ -1410,7 +1421,9 @@
         workqueue_maybe_wakeup(pi);
       }
       grpc_closure *c = (grpc_closure *)n;
-      grpc_closure_run(exec_ctx, c, c->error_data.error);
+      grpc_error *error = c->error_data.error;
+      c->cb(exec_ctx, c->cb_arg, error);
+      GRPC_ERROR_UNREF(error);
       return true;
     } else if (gpr_atm_no_barrier_load(&pi->workqueue_item_count) > 0) {
       /* n == NULL might mean there's work but it's not available to be popped
@@ -1959,7 +1972,7 @@
 
     .workqueue_ref = workqueue_ref,
     .workqueue_unref = workqueue_unref,
-    .workqueue_enqueue = workqueue_enqueue,
+    .workqueue_scheduler = workqueue_scheduler,
 
     .shutdown_engine = shutdown_engine,
 };
diff --git a/src/core/lib/iomgr/ev_poll_posix.c b/src/core/lib/iomgr/ev_poll_posix.c
index 21b28e5..5bc5621 100644
--- a/src/core/lib/iomgr/ev_poll_posix.c
+++ b/src/core/lib/iomgr/ev_poll_posix.c
@@ -397,7 +397,7 @@
   if (!fd->released) {
     close(fd->fd);
   }
-  grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
 }
 
 static int fd_wrapped_fd(grpc_fd *fd) {
@@ -457,16 +457,14 @@
 static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
                              grpc_closure **st, grpc_closure *closure) {
   if (fd->shutdown) {
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"));
   } else if (*st == CLOSURE_NOT_READY) {
     /* not ready ==> switch to a waiting state by setting the closure */
     *st = closure;
   } else if (*st == CLOSURE_READY) {
     /* already ready ==> queue the closure to run immediately */
     *st = CLOSURE_NOT_READY;
-    grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
-                        NULL);
+    grpc_closure_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown));
     maybe_wake_one_watcher_locked(fd);
   } else {
     /* upcallptr was set to a different closure.  This is an error! */
@@ -489,7 +487,7 @@
     return 0;
   } else {
     /* waiting ==> queue closure */
-    grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
+    grpc_closure_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown));
     *st = CLOSURE_NOT_READY;
     return 1;
   }
@@ -852,7 +850,7 @@
     GRPC_FD_UNREF(pollset->fds[i], "multipoller");
   }
   pollset->fd_count = 0;
-  grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE);
 }
 
 static void work_combine_error(grpc_error **composite, grpc_error *error) {
@@ -901,7 +899,7 @@
   if (!pollset_has_workers(pollset) &&
       !grpc_closure_list_empty(pollset->idle_jobs)) {
     GPR_TIMER_MARK("pollset_work.idle_jobs", 0);
-    grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+    grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
     goto done;
   }
   /* If we're shutting down then we don't execute any extended work */
@@ -1081,7 +1079,7 @@
        * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
       gpr_mu_lock(&pollset->mu);
     } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
-      grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+      grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
       gpr_mu_unlock(&pollset->mu);
       grpc_exec_ctx_flush(exec_ctx);
       gpr_mu_lock(&pollset->mu);
@@ -1100,7 +1098,7 @@
   pollset->shutdown_done = closure;
   pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
   if (!pollset_has_workers(pollset)) {
-    grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
+    grpc_closure_list_sched(exec_ctx, &pollset->idle_jobs);
   }
   if (!pollset->called_shutdown && !pollset_has_workers(pollset)) {
     pollset->called_shutdown = 1;
@@ -1288,10 +1286,8 @@
                             grpc_workqueue *workqueue) {}
 #endif
 
-static void workqueue_enqueue(grpc_exec_ctx *exec_ctx,
-                              grpc_workqueue *workqueue, grpc_closure *closure,
-                              grpc_error *error) {
-  grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
+  return grpc_schedule_on_exec_ctx;
 }
 
 /*******************************************************************************
@@ -1534,7 +1530,7 @@
 
     .workqueue_ref = workqueue_ref,
     .workqueue_unref = workqueue_unref,
-    .workqueue_enqueue = workqueue_enqueue,
+    .workqueue_scheduler = workqueue_scheduler,
 
     .shutdown_engine = shutdown_engine,
 };
diff --git a/src/core/lib/iomgr/ev_posix.c b/src/core/lib/iomgr/ev_posix.c
index ab13989..2975d61 100644
--- a/src/core/lib/iomgr/ev_posix.c
+++ b/src/core/lib/iomgr/ev_posix.c
@@ -275,9 +275,8 @@
 }
 #endif
 
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
-                            grpc_closure *closure, grpc_error *error) {
-  g_event_engine->workqueue_enqueue(exec_ctx, workqueue, closure, error);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
+  return g_event_engine->workqueue_scheduler(workqueue);
 }
 
 #endif  // GRPC_POSIX_SOCKET
diff --git a/src/core/lib/iomgr/ev_posix.h b/src/core/lib/iomgr/ev_posix.h
index cb58325..1068a4b 100644
--- a/src/core/lib/iomgr/ev_posix.h
+++ b/src/core/lib/iomgr/ev_posix.h
@@ -106,8 +106,7 @@
   grpc_workqueue *(*workqueue_ref)(grpc_workqueue *workqueue);
   void (*workqueue_unref)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
 #endif
-  void (*workqueue_enqueue)(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
-                            grpc_closure *closure, grpc_error *error);
+  grpc_closure_scheduler *(*workqueue_scheduler)(grpc_workqueue *workqueue);
 } grpc_event_engine_vtable;
 
 void grpc_event_engine_init(void);
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 604713e..6aa788f 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -57,7 +57,6 @@
   return true;
 }
 
-#ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
 bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
   bool did_something = 0;
   GPR_TIMER_BEGIN("grpc_exec_ctx_flush", 0);
@@ -67,8 +66,10 @@
       exec_ctx->closure_list.head = exec_ctx->closure_list.tail = NULL;
       while (c != NULL) {
         grpc_closure *next = c->next_data.next;
+        grpc_error *error = c->error_data.error;
         did_something = true;
-        grpc_closure_run(exec_ctx, c, c->error_data.error);
+        c->cb(exec_ctx, c->cb_arg, error);
+        GRPC_ERROR_UNREF(error);
         c = next;
       }
     } else if (!grpc_combiner_continue_exec_ctx(exec_ctx)) {
@@ -76,30 +77,6 @@
     }
   }
   GPR_ASSERT(exec_ctx->active_combiner == NULL);
-  if (exec_ctx->stealing_from_workqueue != NULL) {
-    if (grpc_exec_ctx_ready_to_finish(exec_ctx)) {
-      grpc_workqueue_enqueue(exec_ctx, exec_ctx->stealing_from_workqueue,
-                             exec_ctx->stolen_closure,
-                             exec_ctx->stolen_closure->error_data.error);
-      GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
-                           "exec_ctx_sched");
-      exec_ctx->stealing_from_workqueue = NULL;
-      exec_ctx->stolen_closure = NULL;
-    } else {
-      grpc_closure *c = exec_ctx->stolen_closure;
-      GRPC_WORKQUEUE_UNREF(exec_ctx, exec_ctx->stealing_from_workqueue,
-                           "exec_ctx_sched");
-      exec_ctx->stealing_from_workqueue = NULL;
-      exec_ctx->stolen_closure = NULL;
-      grpc_error *error = c->error_data.error;
-      GPR_TIMER_BEGIN("grpc_exec_ctx_flush.stolen_cb", 0);
-      c->cb(exec_ctx, c->cb_arg, error);
-      GRPC_ERROR_UNREF(error);
-      GPR_TIMER_END("grpc_exec_ctx_flush.stolen_cb", 0);
-      grpc_exec_ctx_flush(exec_ctx);
-      did_something = true;
-    }
-  }
   GPR_TIMER_END("grpc_exec_ctx_flush", 0);
   return did_something;
 }
@@ -109,104 +86,21 @@
   grpc_exec_ctx_flush(exec_ctx);
 }
 
-void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
-                         grpc_error *error,
-                         grpc_workqueue *offload_target_or_null) {
-  GPR_TIMER_BEGIN("grpc_exec_ctx_sched", 0);
-  if (offload_target_or_null == NULL) {
-    grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
-  } else if (exec_ctx->stealing_from_workqueue == NULL) {
-    exec_ctx->stealing_from_workqueue = offload_target_or_null;
-    closure->error_data.error = error;
-    exec_ctx->stolen_closure = closure;
-  } else if (exec_ctx->stealing_from_workqueue != offload_target_or_null) {
-    grpc_workqueue_enqueue(exec_ctx, offload_target_or_null, closure, error);
-    GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
-  } else { /* stealing_from_workqueue == offload_target_or_null */
-    grpc_workqueue_enqueue(exec_ctx, offload_target_or_null,
-                           exec_ctx->stolen_closure,
-                           exec_ctx->stolen_closure->error_data.error);
-    closure->error_data.error = error;
-    exec_ctx->stolen_closure = closure;
-    GRPC_WORKQUEUE_UNREF(exec_ctx, offload_target_or_null, "exec_ctx_sched");
-  }
-  GPR_TIMER_END("grpc_exec_ctx_sched", 0);
+static void exec_ctx_run(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+                         grpc_error *error) {
+  closure->cb(exec_ctx, closure->cb_arg, error);
+  GRPC_ERROR_UNREF(error);
 }
 
-void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
-                                grpc_closure_list *list,
-                                grpc_workqueue *offload_target_or_null) {
-  grpc_closure_list_move(list, &exec_ctx->closure_list);
+static void exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+                           grpc_error *error) {
+  grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
 }
 
 void grpc_exec_ctx_global_init(void) {}
 void grpc_exec_ctx_global_shutdown(void) {}
-#else
-static gpr_mu g_mu;
-static gpr_cv g_cv;
-static int g_threads = 0;
 
-static void run_closure(void *arg) {
-  grpc_closure *closure = arg;
-  grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  closure->cb(&exec_ctx, closure->cb_arg, (closure->final_data & 1) != 0);
-  grpc_exec_ctx_finish(&exec_ctx);
-  gpr_mu_lock(&g_mu);
-  if (--g_threads == 0) {
-    gpr_cv_signal(&g_cv);
-  }
-  gpr_mu_unlock(&g_mu);
-}
-
-static void start_closure(grpc_closure *closure) {
-  gpr_thd_id id;
-  gpr_mu_lock(&g_mu);
-  g_threads++;
-  gpr_mu_unlock(&g_mu);
-  gpr_thd_new(&id, run_closure, closure, NULL);
-}
-
-bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) { return false; }
-
-void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {}
-
-void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
-                           bool success,
-                           grpc_workqueue *offload_target_or_null) {
-  GPR_ASSERT(offload_target_or_null == NULL);
-  if (closure == NULL) return;
-  closure->final_data = success;
-  start_closure(closure);
-}
-
-void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
-                                grpc_closure_list *list,
-                                grpc_workqueue *offload_target_or_null) {
-  GPR_ASSERT(offload_target_or_null == NULL);
-  if (list == NULL) return;
-  grpc_closure *p = list->head;
-  while (p) {
-    grpc_closure *start = p;
-    p = grpc_closure_next(start);
-    start_closure(start);
-  }
-  grpc_closure_list r = GRPC_CLOSURE_LIST_INIT;
-  *list = r;
-}
-
-void grpc_exec_ctx_global_init(void) {
-  gpr_mu_init(&g_mu);
-  gpr_cv_init(&g_cv);
-}
-
-void grpc_exec_ctx_global_shutdown(void) {
-  gpr_mu_lock(&g_mu);
-  while (g_threads != 0) {
-    gpr_cv_wait(&g_cv, &g_mu, gpr_inf_future(GPR_CLOCK_REALTIME));
-  }
-  gpr_mu_unlock(&g_mu);
-
-  gpr_mu_destroy(&g_mu);
-  gpr_cv_destroy(&g_cv);
-}
-#endif
+static const grpc_closure_scheduler_vtable exec_ctx_scheduler_vtable = {
+    exec_ctx_run, exec_ctx_sched};
+static grpc_closure_scheduler exec_ctx_scheduler = {&exec_ctx_scheduler_vtable};
+grpc_closure_scheduler *grpc_schedule_on_exec_ctx = &exec_ctx_scheduler;
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 7e50cb9..e566f1b 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -66,17 +66,6 @@
 #ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
 struct grpc_exec_ctx {
   grpc_closure_list closure_list;
-  /** The workqueue we're stealing work from.
-      As items are queued to the execution context, we try to steal one
-      workqueue item and execute it inline (assuming the exec_ctx is not
-      finished) - doing so does not invalidate the workqueue's contract, and
-      provides a small latency win in cases where we get a hit */
-  grpc_workqueue *stealing_from_workqueue;
-  /** The workqueue item that was stolen from the workqueue above. When new
-      items are scheduled to be offloaded to that workqueue, we need to update
-      this like a 1-deep fifo to maintain the invariant that workqueue items
-      queued by one thread are started in order */
-  grpc_closure *stolen_closure;
   /** currently active combiner: updated only via combiner.c */
   grpc_combiner *active_combiner;
   /** last active combiner in the active combiner list */
@@ -89,10 +78,7 @@
 /* initializer for grpc_exec_ctx:
    prefer to use GRPC_EXEC_CTX_INIT whenever possible */
 #define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
-  {                                                                          \
-    GRPC_CLOSURE_LIST_INIT, NULL, NULL, NULL, NULL, false, finish_check_arg, \
-        finish_check                                                         \
-  }
+  { GRPC_CLOSURE_LIST_INIT, NULL, NULL, false, finish_check_arg, finish_check }
 #else
 struct grpc_exec_ctx {
   bool cached_ready_to_finish;
@@ -108,6 +94,8 @@
 #define GRPC_EXEC_CTX_INIT \
   GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_always_ready_to_finish, NULL)
 
+extern grpc_closure_scheduler *grpc_schedule_on_exec_ctx;
+
 /** Flush any work that has been enqueued onto this grpc_exec_ctx.
  *  Caller must guarantee that no interfering locks are held.
  *  Returns true if work was performed, false otherwise. */
@@ -115,14 +103,6 @@
 /** Finish any pending work for a grpc_exec_ctx. Must be called before
  *  the instance is destroyed, or work may be lost. */
 void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx);
-/** Add a closure to be executed in the future.
-    If \a offload_target_or_null is NULL, the closure will be executed at the
-    next exec_ctx.{finish,flush} point.
-    If \a offload_target_or_null is non-NULL, the closure will be scheduled
-    against the workqueue, and a reference to the workqueue will be consumed. */
-void grpc_exec_ctx_sched(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
-                         grpc_error *error,
-                         grpc_workqueue *offload_target_or_null);
 /** Returns true if we'd like to leave this execution context as soon as
     possible: useful for deciding whether to do something more or not depending
     on outside context */
@@ -131,11 +111,6 @@
 bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
 /** A finish check that is always ready to finish */
 bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
-/** Add a list of closures to be executed at the next flush/finish point.
- *  Leaves \a list empty. */
-void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,
-                                grpc_closure_list *list,
-                                grpc_workqueue *offload_target_or_null);
 
 void grpc_exec_ctx_global_init(void);
 
diff --git a/src/core/lib/iomgr/executor.c b/src/core/lib/iomgr/executor.c
index 8d7535d..1342a28 100644
--- a/src/core/lib/iomgr/executor.c
+++ b/src/core/lib/iomgr/executor.c
@@ -77,10 +77,18 @@
       gpr_mu_unlock(&g_executor.mu);
       break;
     } else {
-      grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
+      grpc_closure *c = g_executor.closures.head;
+      grpc_closure_list_init(&g_executor.closures);
+      gpr_mu_unlock(&g_executor.mu);
+      while (c != NULL) {
+        grpc_closure *next = c->next_data.next;
+        grpc_error *error = c->error_data.error;
+        c->cb(&exec_ctx, c->cb_arg, error);
+        GRPC_ERROR_UNREF(error);
+        c = next;
+      }
+      grpc_exec_ctx_flush(&exec_ctx);
     }
-    gpr_mu_unlock(&g_executor.mu);
-    grpc_exec_ctx_flush(&exec_ctx);
   }
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -112,7 +120,8 @@
   g_executor.pending_join = 1;
 }
 
-void grpc_executor_push(grpc_closure *closure, grpc_error *error) {
+static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
+                          grpc_error *error) {
   gpr_mu_lock(&g_executor.mu);
   if (g_executor.shutting_down == 0) {
     grpc_closure_list_append(&g_executor.closures, closure, error);
@@ -133,7 +142,15 @@
    * list below because we aren't accepting new work */
 
   /* Execute pending callbacks, some may be performing cleanups */
-  grpc_exec_ctx_enqueue_list(&exec_ctx, &g_executor.closures, NULL);
+  grpc_closure *c = g_executor.closures.head;
+  grpc_closure_list_init(&g_executor.closures);
+  while (c != NULL) {
+    grpc_closure *next = c->next_data.next;
+    grpc_error *error = c->error_data.error;
+    c->cb(&exec_ctx, c->cb_arg, error);
+    GRPC_ERROR_UNREF(error);
+    c = next;
+  }
   grpc_exec_ctx_finish(&exec_ctx);
   GPR_ASSERT(grpc_closure_list_empty(g_executor.closures));
   if (pending_join) {
@@ -141,3 +158,8 @@
   }
   gpr_mu_destroy(&g_executor.mu);
 }
+
+static const grpc_closure_scheduler_vtable executor_vtable = {executor_push,
+                                                              executor_push};
+static grpc_closure_scheduler executor_scheduler = {&executor_vtable};
+grpc_closure_scheduler *grpc_executor_scheduler = &executor_scheduler;
diff --git a/src/core/lib/iomgr/executor.h b/src/core/lib/iomgr/executor.h
index da9dcd0..53f3b6d 100644
--- a/src/core/lib/iomgr/executor.h
+++ b/src/core/lib/iomgr/executor.h
@@ -43,9 +43,7 @@
  * non-blocking solution available. */
 void grpc_executor_init();
 
-/** Enqueue \a closure for its eventual execution of \a f(arg) on a separate
- * thread */
-void grpc_executor_push(grpc_closure *closure, grpc_error *error);
+extern grpc_closure_scheduler *grpc_executor_scheduler;
 
 /** Shutdown the executor, running all pending work as part of the call */
 void grpc_executor_shutdown();
diff --git a/src/core/lib/iomgr/pollset_uv.c b/src/core/lib/iomgr/pollset_uv.c
index 3a74b84..ed3edee 100644
--- a/src/core/lib/iomgr/pollset_uv.c
+++ b/src/core/lib/iomgr/pollset_uv.c
@@ -83,7 +83,7 @@
     // Drain any pending UV callbacks without blocking
     uv_run(uv_default_loop(), UV_RUN_NOWAIT);
   }
-  grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
 }
 
 void grpc_pollset_destroy(grpc_pollset *pollset) {
diff --git a/src/core/lib/iomgr/pollset_windows.c b/src/core/lib/iomgr/pollset_windows.c
index 5540303..2a45e70 100644
--- a/src/core/lib/iomgr/pollset_windows.c
+++ b/src/core/lib/iomgr/pollset_windows.c
@@ -109,7 +109,7 @@
   pollset->shutting_down = 1;
   grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
   if (!pollset->is_iocp_worker) {
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
   } else {
     pollset->on_shutdown = closure;
   }
@@ -167,8 +167,7 @@
       }
 
       if (pollset->shutting_down && pollset->on_shutdown != NULL) {
-        grpc_exec_ctx_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE,
-                            NULL);
+        grpc_closure_sched(exec_ctx, pollset->on_shutdown, GRPC_ERROR_NONE);
         pollset->on_shutdown = NULL;
       }
       goto done;
diff --git a/src/core/lib/iomgr/resolve_address_posix.c b/src/core/lib/iomgr/resolve_address_posix.c
index 821932e..50e470d 100644
--- a/src/core/lib/iomgr/resolve_address_posix.c
+++ b/src/core/lib/iomgr/resolve_address_posix.c
@@ -163,10 +163,9 @@
 static void do_request_thread(grpc_exec_ctx *exec_ctx, void *rp,
                               grpc_error *error) {
   request *r = rp;
-  grpc_exec_ctx_sched(
+  grpc_closure_sched(
       exec_ctx, r->on_done,
-      grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out),
-      NULL);
+      grpc_blocking_resolve_address(r->name, r->default_port, r->addrs_out));
   gpr_free(r->name);
   gpr_free(r->default_port);
   gpr_free(r);
@@ -185,12 +184,13 @@
                                  grpc_closure *on_done,
                                  grpc_resolved_addresses **addrs) {
   request *r = gpr_malloc(sizeof(request));
-  grpc_closure_init(&r->request_closure, do_request_thread, r);
+  grpc_closure_init(&r->request_closure, do_request_thread, r,
+                    grpc_executor_scheduler);
   r->name = gpr_strdup(name);
   r->default_port = gpr_strdup(default_port);
   r->on_done = on_done;
   r->addrs_out = addrs;
-  grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE);
+  grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
 }
 
 void (*grpc_resolve_address)(
diff --git a/src/core/lib/iomgr/resolve_address_uv.c b/src/core/lib/iomgr/resolve_address_uv.c
index 3269c4f..9b5f320 100644
--- a/src/core/lib/iomgr/resolve_address_uv.c
+++ b/src/core/lib/iomgr/resolve_address_uv.c
@@ -98,7 +98,7 @@
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_error *error;
   error = handle_addrinfo_result(status, res, r->addresses);
-  grpc_exec_ctx_sched(&exec_ctx, r->on_done, error, NULL);
+  grpc_closure_sched(&exec_ctx, r->on_done, error);
   grpc_exec_ctx_finish(&exec_ctx);
 
   gpr_free(r->hints);
@@ -193,7 +193,7 @@
   int s;
   err = try_split_host_port(name, default_port, &host, &port);
   if (err != GRPC_ERROR_NONE) {
-    grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+    grpc_closure_sched(exec_ctx, on_done, err);
     return;
   }
   r = gpr_malloc(sizeof(request));
@@ -217,7 +217,7 @@
     *addrs = NULL;
     err = GRPC_ERROR_CREATE("getaddrinfo failed");
     err = grpc_error_set_str(err, GRPC_ERROR_STR_OS_ERROR, uv_strerror(s));
-    grpc_exec_ctx_sched(exec_ctx, on_done, err, NULL);
+    grpc_closure_sched(exec_ctx, on_done, err);
     gpr_free(r);
     gpr_free(req);
     gpr_free(hints);
diff --git a/src/core/lib/iomgr/resolve_address_windows.c b/src/core/lib/iomgr/resolve_address_windows.c
index fada5ec..2439ce3 100644
--- a/src/core/lib/iomgr/resolve_address_windows.c
+++ b/src/core/lib/iomgr/resolve_address_windows.c
@@ -154,7 +154,7 @@
   } else {
     GRPC_ERROR_REF(error);
   }
-  grpc_exec_ctx_sched(exec_ctx, r->on_done, error, NULL);
+  grpc_closure_sched(exec_ctx, r->on_done, error);
   gpr_free(r->name);
   gpr_free(r->default_port);
   gpr_free(r);
@@ -173,12 +173,13 @@
                                  grpc_closure *on_done,
                                  grpc_resolved_addresses **addresses) {
   request *r = gpr_malloc(sizeof(request));
-  grpc_closure_init(&r->request_closure, do_request_thread, r);
+  grpc_closure_init(&r->request_closure, do_request_thread, r,
+                    grpc_executor_scheduler);
   r->name = gpr_strdup(name);
   r->default_port = gpr_strdup(default_port);
   r->on_done = on_done;
   r->addresses = addresses;
-  grpc_executor_push(&r->request_closure, GRPC_ERROR_NONE);
+  grpc_closure_sched(exec_ctx, &r->request_closure, GRPC_ERROR_NONE);
 }
 
 void (*grpc_resolve_address)(
diff --git a/src/core/lib/iomgr/resource_quota.c b/src/core/lib/iomgr/resource_quota.c
index 213d296..8db539e 100644
--- a/src/core/lib/iomgr/resource_quota.c
+++ b/src/core/lib/iomgr/resource_quota.c
@@ -265,9 +265,8 @@
   if (resource_quota->step_scheduled) return;
   resource_quota->step_scheduled = true;
   grpc_resource_quota_internal_ref(resource_quota);
-  grpc_combiner_execute_finally(exec_ctx, resource_quota->combiner,
-                                &resource_quota->rq_step_closure,
-                                GRPC_ERROR_NONE, false);
+  grpc_closure_sched(exec_ctx, &resource_quota->rq_step_closure,
+                     GRPC_ERROR_NONE);
 }
 
 /* returns true if all allocations are completed */
@@ -294,7 +293,7 @@
     }
     if (resource_user->free_pool >= 0) {
       resource_user->allocating = false;
-      grpc_exec_ctx_enqueue_list(exec_ctx, &resource_user->on_allocated, NULL);
+      grpc_closure_list_sched(exec_ctx, &resource_user->on_allocated);
       gpr_mu_unlock(&resource_user->mu);
     } else {
       rulist_add_head(resource_user, GRPC_RULIST_AWAITING_ALLOCATION);
@@ -439,7 +438,7 @@
   resource_user->new_reclaimers[destructive] = NULL;
   GPR_ASSERT(resource_user->reclaimers[destructive] == NULL);
   if (gpr_atm_acq_load(&resource_user->shutdown) > 0) {
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED, NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_CANCELLED);
     return false;
   }
   resource_user->reclaimers[destructive] = closure;
@@ -480,10 +479,10 @@
 
 static void ru_shutdown(grpc_exec_ctx *exec_ctx, void *ru, grpc_error *error) {
   grpc_resource_user *resource_user = ru;
-  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
-                      GRPC_ERROR_CANCELLED, NULL);
-  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
-                      GRPC_ERROR_CANCELLED, NULL);
+  grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
+                     GRPC_ERROR_CANCELLED);
+  grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
+                     GRPC_ERROR_CANCELLED);
   resource_user->reclaimers[0] = NULL;
   resource_user->reclaimers[1] = NULL;
   rulist_remove(resource_user, GRPC_RULIST_RECLAIMER_BENIGN);
@@ -496,10 +495,10 @@
   for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
     rulist_remove(resource_user, (grpc_rulist)i);
   }
-  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[0],
-                      GRPC_ERROR_CANCELLED, NULL);
-  grpc_exec_ctx_sched(exec_ctx, resource_user->reclaimers[1],
-                      GRPC_ERROR_CANCELLED, NULL);
+  grpc_closure_sched(exec_ctx, resource_user->reclaimers[0],
+                     GRPC_ERROR_CANCELLED);
+  grpc_closure_sched(exec_ctx, resource_user->reclaimers[1],
+                     GRPC_ERROR_CANCELLED);
   if (resource_user->free_pool != 0) {
     resource_user->resource_quota->free_pool += resource_user->free_pool;
     rq_step_sched(exec_ctx, resource_user->resource_quota);
@@ -571,9 +570,12 @@
     gpr_asprintf(&resource_quota->name, "anonymous_pool_%" PRIxPTR,
                  (intptr_t)resource_quota);
   }
-  grpc_closure_init(&resource_quota->rq_step_closure, rq_step, resource_quota);
+  grpc_closure_init(
+      &resource_quota->rq_step_closure, rq_step, resource_quota,
+      grpc_combiner_finally_scheduler(resource_quota->combiner, true));
   grpc_closure_init(&resource_quota->rq_reclamation_done_closure,
-                    rq_reclamation_done, resource_quota);
+                    rq_reclamation_done, resource_quota,
+                    grpc_combiner_scheduler(resource_quota->combiner, false));
   for (int i = 0; i < GRPC_RULIST_COUNT; i++) {
     resource_quota->roots[i] = NULL;
   }
@@ -614,9 +616,8 @@
   rq_resize_args *a = gpr_malloc(sizeof(*a));
   a->resource_quota = grpc_resource_quota_internal_ref(resource_quota);
   a->size = (int64_t)size;
-  grpc_closure_init(&a->closure, rq_resize, a);
-  grpc_combiner_execute(&exec_ctx, resource_quota->combiner, &a->closure,
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_init(&a->closure, rq_resize, a, grpc_schedule_on_exec_ctx);
+  grpc_closure_sched(&exec_ctx, &a->closure, GRPC_ERROR_NONE);
   grpc_exec_ctx_finish(&exec_ctx);
 }
 
@@ -663,15 +664,19 @@
   resource_user->resource_quota =
       grpc_resource_quota_internal_ref(resource_quota);
   grpc_closure_init(&resource_user->allocate_closure, &ru_allocate,
-                    resource_user);
+                    resource_user,
+                    grpc_combiner_scheduler(resource_quota->combiner, false));
   grpc_closure_init(&resource_user->add_to_free_pool_closure,
-                    &ru_add_to_free_pool, resource_user);
+                    &ru_add_to_free_pool, resource_user,
+                    grpc_combiner_scheduler(resource_quota->combiner, false));
   grpc_closure_init(&resource_user->post_reclaimer_closure[0],
-                    &ru_post_benign_reclaimer, resource_user);
+                    &ru_post_benign_reclaimer, resource_user,
+                    grpc_combiner_scheduler(resource_quota->combiner, false));
   grpc_closure_init(&resource_user->post_reclaimer_closure[1],
-                    &ru_post_destructive_reclaimer, resource_user);
-  grpc_closure_init(&resource_user->destroy_closure, &ru_destroy,
-                    resource_user);
+                    &ru_post_destructive_reclaimer, resource_user,
+                    grpc_combiner_scheduler(resource_quota->combiner, false));
+  grpc_closure_init(&resource_user->destroy_closure, &ru_destroy, resource_user,
+                    grpc_combiner_scheduler(resource_quota->combiner, false));
   gpr_mu_init(&resource_user->mu);
   gpr_atm_rel_store(&resource_user->refs, 1);
   gpr_atm_rel_store(&resource_user->shutdown, 0);
@@ -706,9 +711,8 @@
   gpr_atm old = gpr_atm_full_fetch_add(&resource_user->refs, -amount);
   GPR_ASSERT(old >= amount);
   if (old == amount) {
-    grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
-                          &resource_user->destroy_closure, GRPC_ERROR_NONE,
-                          false);
+    grpc_closure_sched(exec_ctx, &resource_user->destroy_closure,
+                       GRPC_ERROR_NONE);
   }
 }
 
@@ -724,9 +728,12 @@
 void grpc_resource_user_shutdown(grpc_exec_ctx *exec_ctx,
                                  grpc_resource_user *resource_user) {
   if (gpr_atm_full_fetch_add(&resource_user->shutdown, 1) == 0) {
-    grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
-                          grpc_closure_create(ru_shutdown, resource_user),
-                          GRPC_ERROR_NONE, false);
+    grpc_closure_sched(exec_ctx,
+                       grpc_closure_create(
+                           ru_shutdown, resource_user,
+                           grpc_combiner_scheduler(
+                               resource_user->resource_quota->combiner, false)),
+                       GRPC_ERROR_NONE);
   }
 }
 
@@ -746,12 +753,11 @@
                              GRPC_ERROR_NONE);
     if (!resource_user->allocating) {
       resource_user->allocating = true;
-      grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
-                            &resource_user->allocate_closure, GRPC_ERROR_NONE,
-                            false);
+      grpc_closure_sched(exec_ctx, &resource_user->allocate_closure,
+                         GRPC_ERROR_NONE);
     }
   } else {
-    grpc_exec_ctx_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, optional_on_done, GRPC_ERROR_NONE);
   }
   gpr_mu_unlock(&resource_user->mu);
 }
@@ -770,9 +776,8 @@
   if (is_bigger_than_zero && was_zero_or_negative &&
       !resource_user->added_to_free_pool) {
     resource_user->added_to_free_pool = true;
-    grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
-                          &resource_user->add_to_free_pool_closure,
-                          GRPC_ERROR_NONE, false);
+    grpc_closure_sched(exec_ctx, &resource_user->add_to_free_pool_closure,
+                       GRPC_ERROR_NONE);
   }
   gpr_mu_unlock(&resource_user->mu);
   ru_unref_by(exec_ctx, resource_user, (gpr_atm)size);
@@ -784,9 +789,9 @@
                                        grpc_closure *closure) {
   GPR_ASSERT(resource_user->new_reclaimers[destructive] == NULL);
   resource_user->new_reclaimers[destructive] = closure;
-  grpc_combiner_execute(exec_ctx, resource_user->resource_quota->combiner,
-                        &resource_user->post_reclaimer_closure[destructive],
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(exec_ctx,
+                     &resource_user->post_reclaimer_closure[destructive],
+                     GRPC_ERROR_NONE);
 }
 
 void grpc_resource_user_finish_reclamation(grpc_exec_ctx *exec_ctx,
@@ -795,18 +800,20 @@
     gpr_log(GPR_DEBUG, "RQ %s %s: reclamation complete",
             resource_user->resource_quota->name, resource_user->name);
   }
-  grpc_combiner_execute(
-      exec_ctx, resource_user->resource_quota->combiner,
-      &resource_user->resource_quota->rq_reclamation_done_closure,
-      GRPC_ERROR_NONE, false);
+  grpc_closure_sched(
+      exec_ctx, &resource_user->resource_quota->rq_reclamation_done_closure,
+      GRPC_ERROR_NONE);
 }
 
 void grpc_resource_user_slice_allocator_init(
     grpc_resource_user_slice_allocator *slice_allocator,
     grpc_resource_user *resource_user, grpc_iomgr_cb_func cb, void *p) {
-  grpc_closure_init(&slice_allocator->on_allocated, ru_allocated_slices,
-                    slice_allocator);
-  grpc_closure_init(&slice_allocator->on_done, cb, p);
+  grpc_closure_init(
+      &slice_allocator->on_allocated, ru_allocated_slices, slice_allocator,
+      grpc_combiner_scheduler(resource_user->resource_quota->combiner, false));
+  grpc_closure_init(
+      &slice_allocator->on_done, cb, p,
+      grpc_combiner_scheduler(resource_user->resource_quota->combiner, false));
   slice_allocator->resource_user = resource_user;
 }
 
diff --git a/src/core/lib/iomgr/socket_windows.c b/src/core/lib/iomgr/socket_windows.c
index 54911e0..2f2e02f 100644
--- a/src/core/lib/iomgr/socket_windows.c
+++ b/src/core/lib/iomgr/socket_windows.c
@@ -131,7 +131,7 @@
   gpr_mu_lock(&socket->state_mu);
   if (info->has_pending_iocp) {
     info->has_pending_iocp = 0;
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
   } else {
     info->closure = closure;
   }
@@ -154,7 +154,7 @@
   GPR_ASSERT(!info->has_pending_iocp);
   gpr_mu_lock(&socket->state_mu);
   if (info->closure) {
-    grpc_exec_ctx_sched(exec_ctx, info->closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, info->closure, GRPC_ERROR_NONE);
     info->closure = NULL;
   } else {
     info->has_pending_iocp = 1;
diff --git a/src/core/lib/iomgr/tcp_client_posix.c b/src/core/lib/iomgr/tcp_client_posix.c
index a3a70a8..d089d2b 100644
--- a/src/core/lib/iomgr/tcp_client_posix.c
+++ b/src/core/lib/iomgr/tcp_client_posix.c
@@ -265,7 +265,7 @@
     grpc_channel_args_destroy(ac->channel_args);
     gpr_free(ac);
   }
-  grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+  grpc_closure_sched(exec_ctx, closure, error);
 }
 
 static void tcp_client_connect_impl(grpc_exec_ctx *exec_ctx,
@@ -294,7 +294,7 @@
 
   error = grpc_create_dualstack_socket(addr, SOCK_STREAM, 0, &dsmode, &fd);
   if (error != GRPC_ERROR_NONE) {
-    grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+    grpc_closure_sched(exec_ctx, closure, error);
     return;
   }
   if (dsmode == GRPC_DSMODE_IPV4) {
@@ -303,7 +303,7 @@
     addr = &addr4_copy;
   }
   if ((error = prepare_socket(addr, fd, channel_args)) != GRPC_ERROR_NONE) {
-    grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+    grpc_closure_sched(exec_ctx, closure, error);
     return;
   }
 
@@ -321,14 +321,13 @@
   if (err >= 0) {
     *ep =
         grpc_tcp_client_create_from_fd(exec_ctx, fdobj, channel_args, addr_str);
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_ERROR_NONE);
     goto done;
   }
 
   if (errno != EWOULDBLOCK && errno != EINPROGRESS) {
     grpc_fd_orphan(exec_ctx, fdobj, NULL, NULL, "tcp_client_connect_error");
-    grpc_exec_ctx_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, closure, GRPC_OS_ERROR(errno, "connect"));
     goto done;
   }
 
@@ -343,8 +342,8 @@
   addr_str = NULL;
   gpr_mu_init(&ac->mu);
   ac->refs = 2;
-  ac->write_closure.cb = on_writable;
-  ac->write_closure.cb_arg = ac;
+  grpc_closure_init(&ac->write_closure, on_writable, ac,
+                    grpc_schedule_on_exec_ctx);
   ac->channel_args = grpc_channel_args_copy(channel_args);
 
   if (grpc_tcp_trace) {
diff --git a/src/core/lib/iomgr/tcp_client_uv.c b/src/core/lib/iomgr/tcp_client_uv.c
index b07f9ce..b1664b8 100644
--- a/src/core/lib/iomgr/tcp_client_uv.c
+++ b/src/core/lib/iomgr/tcp_client_uv.c
@@ -110,7 +110,7 @@
   if (done) {
     uv_tcp_connect_cleanup(&exec_ctx, connect);
   }
-  grpc_exec_ctx_sched(&exec_ctx, closure, error, NULL);
+  grpc_closure_sched(&exec_ctx, closure, error);
   grpc_exec_ctx_finish(&exec_ctx);
 }
 
diff --git a/src/core/lib/iomgr/tcp_client_windows.c b/src/core/lib/iomgr/tcp_client_windows.c
index 1127588..692252b 100644
--- a/src/core/lib/iomgr/tcp_client_windows.c
+++ b/src/core/lib/iomgr/tcp_client_windows.c
@@ -129,7 +129,7 @@
   async_connect_unlock_and_cleanup(exec_ctx, ac, socket);
   /* If the connection was aborted, the callback was already called when
      the deadline was met. */
-  grpc_exec_ctx_sched(exec_ctx, on_done, error, NULL);
+  grpc_closure_sched(exec_ctx, on_done, error);
 }
 
 /* Tries to issue one async connection, then schedules both an IOCP
@@ -227,7 +227,7 @@
   ac->addr_name = grpc_sockaddr_to_uri(addr);
   ac->endpoint = endpoint;
   ac->resource_quota = resource_quota;
-  grpc_closure_init(&ac->on_connect, on_connect, ac);
+  grpc_closure_init(&ac->on_connect, on_connect, ac, grpc_schedule_on_exec_ctx);
 
   grpc_timer_init(exec_ctx, &ac->alarm, deadline, on_alarm, ac,
                   gpr_now(GPR_CLOCK_MONOTONIC));
@@ -247,7 +247,7 @@
     closesocket(sock);
   }
   grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
-  grpc_exec_ctx_sched(exec_ctx, on_done, final_error, NULL);
+  grpc_closure_sched(exec_ctx, on_done, final_error);
 }
 
 #endif /* GRPC_WINSOCK_SOCKET */
diff --git a/src/core/lib/iomgr/tcp_posix.c b/src/core/lib/iomgr/tcp_posix.c
index 540305e..21a0371 100644
--- a/src/core/lib/iomgr/tcp_posix.c
+++ b/src/core/lib/iomgr/tcp_posix.c
@@ -316,7 +316,7 @@
     tcp->finished_edge = false;
     grpc_fd_notify_on_read(exec_ctx, tcp->em_fd, &tcp->read_closure);
   } else {
-    grpc_exec_ctx_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &tcp->read_closure, GRPC_ERROR_NONE);
   }
 }
 
@@ -460,11 +460,10 @@
 
   if (buf->length == 0) {
     GPR_TIMER_END("tcp_write", 0);
-    grpc_exec_ctx_sched(exec_ctx, cb,
-                        grpc_fd_is_shutdown(tcp->em_fd)
-                            ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
-                            : GRPC_ERROR_NONE,
-                        NULL);
+    grpc_closure_sched(exec_ctx, cb,
+                       grpc_fd_is_shutdown(tcp->em_fd)
+                           ? tcp_annotate_error(GRPC_ERROR_CREATE("EOF"), tcp)
+                           : GRPC_ERROR_NONE);
     return;
   }
   tcp->outgoing_buffer = buf;
@@ -484,7 +483,7 @@
       gpr_log(GPR_DEBUG, "write: %s", str);
       grpc_error_free_string(str);
     }
-    grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+    grpc_closure_sched(exec_ctx, cb, error);
   }
 
   GPR_TIMER_END("tcp_write", 0);
@@ -552,10 +551,10 @@
   gpr_ref_init(&tcp->refcount, 1);
   gpr_atm_no_barrier_store(&tcp->shutdown_count, 0);
   tcp->em_fd = em_fd;
-  tcp->read_closure.cb = tcp_handle_read;
-  tcp->read_closure.cb_arg = tcp;
-  tcp->write_closure.cb = tcp_handle_write;
-  tcp->write_closure.cb_arg = tcp;
+  grpc_closure_init(&tcp->read_closure, tcp_handle_read, tcp,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&tcp->write_closure, tcp_handle_write, tcp,
+                    grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&tcp->last_read_buffer);
   tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
   grpc_resource_user_slice_allocator_init(
diff --git a/src/core/lib/iomgr/tcp_server_posix.c b/src/core/lib/iomgr/tcp_server_posix.c
index 179f47e..6db624d 100644
--- a/src/core/lib/iomgr/tcp_server_posix.c
+++ b/src/core/lib/iomgr/tcp_server_posix.c
@@ -208,7 +208,7 @@
   GPR_ASSERT(s->shutdown);
   gpr_mu_unlock(&s->mu);
   if (s->shutdown_complete != NULL) {
-    grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   gpr_mu_destroy(&s->mu);
@@ -254,8 +254,8 @@
     grpc_tcp_listener *sp;
     for (sp = s->head; sp; sp = sp->next) {
       grpc_unlink_if_unix_domain_socket(&sp->addr);
-      sp->destroyed_closure.cb = destroyed_port;
-      sp->destroyed_closure.cb_arg = s;
+      grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
+                        grpc_schedule_on_exec_ctx);
       grpc_fd_orphan(exec_ctx, sp->emfd, &sp->destroyed_closure, NULL,
                      "tcp_listener_shutdown");
     }
@@ -723,8 +723,8 @@
           "clone_port", clone_port(sp, (unsigned)(pollset_count - 1))));
       for (i = 0; i < pollset_count; i++) {
         grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
-        sp->read_closure.cb = on_read;
-        sp->read_closure.cb_arg = sp;
+        grpc_closure_init(&sp->read_closure, on_read, sp,
+                          grpc_schedule_on_exec_ctx);
         grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
         s->active_ports++;
         sp = sp->next;
@@ -733,8 +733,8 @@
       for (i = 0; i < pollset_count; i++) {
         grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
       }
-      sp->read_closure.cb = on_read;
-      sp->read_closure.cb_arg = sp;
+      grpc_closure_init(&sp->read_closure, on_read, sp,
+                        grpc_schedule_on_exec_ctx);
       grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
       s->active_ports++;
       sp = sp->next;
@@ -760,7 +760,7 @@
   if (gpr_unref(&s->refs)) {
     grpc_tcp_server_shutdown_listeners(exec_ctx, s);
     gpr_mu_lock(&s->mu);
-    grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
+    grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
     gpr_mu_unlock(&s->mu);
     tcp_server_destroy(exec_ctx, s);
   }
diff --git a/src/core/lib/iomgr/tcp_server_uv.c b/src/core/lib/iomgr/tcp_server_uv.c
index e1a174c..8abc60d 100644
--- a/src/core/lib/iomgr/tcp_server_uv.c
+++ b/src/core/lib/iomgr/tcp_server_uv.c
@@ -126,7 +126,7 @@
 
 static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_tcp_server *s) {
   if (s->shutdown_complete != NULL) {
-    grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   while (s->head) {
@@ -170,7 +170,7 @@
   if (gpr_unref(&s->refs)) {
     /* Complete shutdown_starting work before destroying. */
     grpc_exec_ctx local_exec_ctx = GRPC_EXEC_CTX_INIT;
-    grpc_exec_ctx_enqueue_list(&local_exec_ctx, &s->shutdown_starting, NULL);
+    grpc_closure_list_sched(&local_exec_ctx, &s->shutdown_starting);
     if (exec_ctx == NULL) {
       grpc_exec_ctx_flush(&local_exec_ctx);
       tcp_server_destroy(&local_exec_ctx, s);
diff --git a/src/core/lib/iomgr/tcp_server_windows.c b/src/core/lib/iomgr/tcp_server_windows.c
index b0c8586..6302bff 100644
--- a/src/core/lib/iomgr/tcp_server_windows.c
+++ b/src/core/lib/iomgr/tcp_server_windows.c
@@ -162,11 +162,12 @@
 static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
                                    grpc_tcp_server *s) {
   if (s->shutdown_complete != NULL) {
-    grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
-  grpc_exec_ctx_sched(exec_ctx, grpc_closure_create(destroy_server, s),
-                      GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, grpc_closure_create(destroy_server, s,
+                                                   grpc_schedule_on_exec_ctx),
+                     GRPC_ERROR_NONE);
 }
 
 grpc_tcp_server *grpc_tcp_server_ref(grpc_tcp_server *s) {
@@ -204,7 +205,7 @@
   if (gpr_unref(&s->refs)) {
     grpc_tcp_server_shutdown_listeners(exec_ctx, s);
     gpr_mu_lock(&s->mu);
-    grpc_exec_ctx_enqueue_list(exec_ctx, &s->shutdown_starting, NULL);
+    grpc_closure_list_sched(exec_ctx, &s->shutdown_starting);
     gpr_mu_unlock(&s->mu);
     tcp_server_destroy(exec_ctx, s);
   }
@@ -465,7 +466,7 @@
   sp->new_socket = INVALID_SOCKET;
   sp->port = port;
   sp->port_index = port_index;
-  grpc_closure_init(&sp->on_accept, on_accept, sp);
+  grpc_closure_init(&sp->on_accept, on_accept, sp, grpc_schedule_on_exec_ctx);
   GPR_ASSERT(sp->socket);
   gpr_mu_unlock(&s->mu);
   *listener = sp;
diff --git a/src/core/lib/iomgr/tcp_uv.c b/src/core/lib/iomgr/tcp_uv.c
index 6e2ad1d..4be9545 100644
--- a/src/core/lib/iomgr/tcp_uv.c
+++ b/src/core/lib/iomgr/tcp_uv.c
@@ -169,7 +169,7 @@
     // nread < 0: Error
     error = GRPC_ERROR_CREATE("TCP Read failed");
   }
-  grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+  grpc_closure_sched(&exec_ctx, cb, error);
   grpc_exec_ctx_finish(&exec_ctx);
 }
 
@@ -190,7 +190,7 @@
     error = GRPC_ERROR_CREATE("TCP Read failed at start");
     error =
         grpc_error_set_str(error, GRPC_ERROR_STR_OS_ERROR, uv_strerror(status));
-    grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+    grpc_closure_sched(exec_ctx, cb, error);
   }
   if (grpc_tcp_trace) {
     const char *str = grpc_error_string(error);
@@ -217,7 +217,7 @@
   gpr_free(tcp->write_buffers);
   grpc_resource_user_free(&exec_ctx, tcp->resource_user,
                           sizeof(uv_buf_t) * tcp->write_slices->count);
-  grpc_exec_ctx_sched(&exec_ctx, cb, error, NULL);
+  grpc_closure_sched(&exec_ctx, cb, error);
   grpc_exec_ctx_finish(&exec_ctx);
 }
 
@@ -243,8 +243,8 @@
   }
 
   if (tcp->shutting_down) {
-    grpc_exec_ctx_sched(exec_ctx, cb,
-                        GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+    grpc_closure_sched(exec_ctx, cb,
+                       GRPC_ERROR_CREATE("TCP socket is shutting down"));
     return;
   }
 
@@ -254,7 +254,7 @@
   if (tcp->write_slices->count == 0) {
     // No slices means we don't have to do anything,
     // and libuv doesn't like empty writes
-    grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
     return;
   }
 
diff --git a/src/core/lib/iomgr/tcp_windows.c b/src/core/lib/iomgr/tcp_windows.c
index d4613b6..fa13ac1 100644
--- a/src/core/lib/iomgr/tcp_windows.c
+++ b/src/core/lib/iomgr/tcp_windows.c
@@ -188,7 +188,7 @@
 
   tcp->read_cb = NULL;
   TCP_UNREF(exec_ctx, tcp, "read");
-  grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+  grpc_closure_sched(exec_ctx, cb, error);
 }
 
 static void win_read(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -202,8 +202,8 @@
   WSABUF buffer;
 
   if (tcp->shutting_down) {
-    grpc_exec_ctx_sched(exec_ctx, cb,
-                        GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+    grpc_closure_sched(exec_ctx, cb,
+                       GRPC_ERROR_CREATE("TCP socket is shutting down"));
     return;
   }
 
@@ -227,7 +227,7 @@
   /* Did we get data immediately ? Yay. */
   if (info->wsa_error != WSAEWOULDBLOCK) {
     info->bytes_transfered = bytes_read;
-    grpc_exec_ctx_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &tcp->on_read, GRPC_ERROR_NONE);
     return;
   }
 
@@ -240,8 +240,8 @@
     int wsa_error = WSAGetLastError();
     if (wsa_error != WSA_IO_PENDING) {
       info->wsa_error = wsa_error;
-      grpc_exec_ctx_sched(exec_ctx, &tcp->on_read,
-                          GRPC_WSA_ERROR(info->wsa_error, "WSARecv"), NULL);
+      grpc_closure_sched(exec_ctx, &tcp->on_read,
+                         GRPC_WSA_ERROR(info->wsa_error, "WSARecv"));
       return;
     }
   }
@@ -272,7 +272,7 @@
   }
 
   TCP_UNREF(exec_ctx, tcp, "write");
-  grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+  grpc_closure_sched(exec_ctx, cb, error);
 }
 
 /* Initiates a write. */
@@ -290,8 +290,8 @@
   size_t len;
 
   if (tcp->shutting_down) {
-    grpc_exec_ctx_sched(exec_ctx, cb,
-                        GRPC_ERROR_CREATE("TCP socket is shutting down"), NULL);
+    grpc_closure_sched(exec_ctx, cb,
+                       GRPC_ERROR_CREATE("TCP socket is shutting down"));
     return;
   }
 
@@ -322,7 +322,7 @@
     grpc_error *error = status == 0
                             ? GRPC_ERROR_NONE
                             : GRPC_WSA_ERROR(info->wsa_error, "WSASend");
-    grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+    grpc_closure_sched(exec_ctx, cb, error);
     if (allocated) gpr_free(allocated);
     return;
   }
@@ -340,8 +340,7 @@
     int wsa_error = WSAGetLastError();
     if (wsa_error != WSA_IO_PENDING) {
       TCP_UNREF(exec_ctx, tcp, "write");
-      grpc_exec_ctx_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"),
-                          NULL);
+      grpc_closure_sched(exec_ctx, cb, GRPC_WSA_ERROR(wsa_error, "WSASend"));
       return;
     }
   }
@@ -424,8 +423,8 @@
   tcp->socket = socket;
   gpr_mu_init(&tcp->mu);
   gpr_ref_init(&tcp->refcount, 1);
-  grpc_closure_init(&tcp->on_read, on_read, tcp);
-  grpc_closure_init(&tcp->on_write, on_write, tcp);
+  grpc_closure_init(&tcp->on_read, on_read, tcp, grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&tcp->on_write, on_write, tcp, grpc_schedule_on_exec_ctx);
   tcp->peer_string = gpr_strdup(peer_string);
   tcp->resource_user = grpc_resource_user_create(resource_quota, peer_string);
   /* Tell network status tracking code about the new endpoint */
diff --git a/src/core/lib/iomgr/timer_generic.c b/src/core/lib/iomgr/timer_generic.c
index 00058f9..ecd3b28 100644
--- a/src/core/lib/iomgr/timer_generic.c
+++ b/src/core/lib/iomgr/timer_generic.c
@@ -184,22 +184,22 @@
   shard_type *shard = &g_shards[shard_idx(timer)];
   GPR_ASSERT(deadline.clock_type == g_clock_type);
   GPR_ASSERT(now.clock_type == g_clock_type);
-  grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+  grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg,
+                    grpc_schedule_on_exec_ctx);
   timer->deadline = deadline;
   timer->triggered = 0;
 
   if (!g_initialized) {
     timer->triggered = 1;
-    grpc_exec_ctx_sched(
+    grpc_closure_sched(
         exec_ctx, &timer->closure,
-        GRPC_ERROR_CREATE("Attempt to create timer before initialization"),
-        NULL);
+        GRPC_ERROR_CREATE("Attempt to create timer before initialization"));
     return;
   }
 
   if (gpr_time_cmp(deadline, now) <= 0) {
     timer->triggered = 1;
-    grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE);
     return;
   }
 
@@ -251,7 +251,7 @@
   shard_type *shard = &g_shards[shard_idx(timer)];
   gpr_mu_lock(&shard->mu);
   if (!timer->triggered) {
-    grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+    grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED);
     timer->triggered = 1;
     if (timer->heap_index == INVALID_HEAP_INDEX) {
       list_remove(timer);
@@ -317,7 +317,7 @@
   grpc_timer *timer;
   gpr_mu_lock(&shard->mu);
   while ((timer = pop_one(shard, now))) {
-    grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error), NULL);
+    grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_REF(error));
     n++;
   }
   *new_min_deadline = compute_min_deadline(shard);
diff --git a/src/core/lib/iomgr/timer_uv.c b/src/core/lib/iomgr/timer_uv.c
index cfcb892..00b835f 100644
--- a/src/core/lib/iomgr/timer_uv.c
+++ b/src/core/lib/iomgr/timer_uv.c
@@ -55,7 +55,7 @@
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   GPR_ASSERT(!timer->triggered);
   timer->triggered = 1;
-  grpc_exec_ctx_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(&exec_ctx, &timer->closure, GRPC_ERROR_NONE);
   stop_uv_timer(handle);
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -65,10 +65,11 @@
                      void *timer_cb_arg, gpr_timespec now) {
   uint64_t timeout;
   uv_timer_t *uv_timer;
-  grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg);
+  grpc_closure_init(&timer->closure, timer_cb, timer_cb_arg,
+                    grpc_schedule_on_exec_ctx);
   if (gpr_time_cmp(deadline, now) <= 0) {
     timer->triggered = 1;
-    grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_NONE);
     return;
   }
   timer->triggered = 0;
@@ -83,7 +84,7 @@
 void grpc_timer_cancel(grpc_exec_ctx *exec_ctx, grpc_timer *timer) {
   if (!timer->triggered) {
     timer->triggered = 1;
-    grpc_exec_ctx_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED, NULL);
+    grpc_closure_sched(exec_ctx, &timer->closure, GRPC_ERROR_CANCELLED);
     stop_uv_timer((uv_timer_t *)timer->uv_timer);
   }
 }
diff --git a/src/core/lib/iomgr/udp_server.c b/src/core/lib/iomgr/udp_server.c
index 3c24ea9..dfbd295 100644
--- a/src/core/lib/iomgr/udp_server.c
+++ b/src/core/lib/iomgr/udp_server.c
@@ -126,7 +126,7 @@
 
 static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_udp_server *s) {
   if (s->shutdown_complete != NULL) {
-    grpc_exec_ctx_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, s->shutdown_complete, GRPC_ERROR_NONE);
   }
 
   gpr_mu_destroy(&s->mu);
@@ -170,8 +170,8 @@
     for (sp = s->head; sp; sp = sp->next) {
       grpc_unlink_if_unix_domain_socket(&sp->addr);
 
-      sp->destroyed_closure.cb = destroyed_port;
-      sp->destroyed_closure.cb_arg = s;
+      grpc_closure_init(&sp->destroyed_closure, destroyed_port, s,
+                        grpc_schedule_on_exec_ctx);
 
       /* Call the orphan_cb to signal that the FD is about to be closed and
        * should no longer be used. */
@@ -446,8 +446,8 @@
     for (i = 0; i < pollset_count; i++) {
       grpc_pollset_add_fd(exec_ctx, pollsets[i], sp->emfd);
     }
-    sp->read_closure.cb = on_read;
-    sp->read_closure.cb_arg = sp;
+    grpc_closure_init(&sp->read_closure, on_read, sp,
+                      grpc_schedule_on_exec_ctx);
     grpc_fd_notify_on_read(exec_ctx, sp->emfd, &sp->read_closure);
 
     s->active_ports++;
diff --git a/src/core/lib/iomgr/workqueue.h b/src/core/lib/iomgr/workqueue.h
index 73d9849..371b0f5 100644
--- a/src/core/lib/iomgr/workqueue.h
+++ b/src/core/lib/iomgr/workqueue.h
@@ -72,17 +72,16 @@
 void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue);
 #endif
 
-/** Add a work item to a workqueue. Items added to a work queue will be started
-    in approximately the order they were enqueued, on some thread that may or
-    may not be the current thread. Successive closures enqueued onto a workqueue
-    MAY be executed concurrently.
+/** Fetch the workqueue closure scheduler. Items added to a work queue will be
+    started in approximately the order they were enqueued, on some thread that
+    may or may not be the current thread. Successive closures enqueued onto a
+    workqueue MAY be executed concurrently.
 
     It is generally more expensive to add a closure to a workqueue than to the
     execution context, both in terms of CPU work and in execution latency.
 
     Use work queues when it's important that other threads be given a chance to
     tackle some workload. */
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
-                            grpc_closure *closure, grpc_error *error);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue);
 
 #endif /* GRPC_CORE_LIB_IOMGR_WORKQUEUE_H */
diff --git a/src/core/lib/iomgr/workqueue_uv.c b/src/core/lib/iomgr/workqueue_uv.c
index e58ca47..4d61b40 100644
--- a/src/core/lib/iomgr/workqueue_uv.c
+++ b/src/core/lib/iomgr/workqueue_uv.c
@@ -58,9 +58,8 @@
 void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
 #endif
 
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
-                            grpc_closure *closure, grpc_error *error) {
-  grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
+  return grpc_schedule_on_exec_ctx;
 }
 
 #endif /* GPR_UV */
diff --git a/src/core/lib/iomgr/workqueue_windows.c b/src/core/lib/iomgr/workqueue_windows.c
index 5c93d3c..234b47c 100644
--- a/src/core/lib/iomgr/workqueue_windows.c
+++ b/src/core/lib/iomgr/workqueue_windows.c
@@ -56,9 +56,8 @@
 void grpc_workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue) {}
 #endif
 
-void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
-                            grpc_closure *closure, grpc_error *error) {
-  grpc_exec_ctx_sched(exec_ctx, closure, error, NULL);
+grpc_closure_scheduler *grpc_workqueue_scheduler(grpc_workqueue *workqueue) {
+  return grpc_schedule_on_exec_ctx;
 }
 
 #endif /* GPR_WINDOWS */
diff --git a/src/core/lib/security/credentials/fake/fake_credentials.c b/src/core/lib/security/credentials/fake/fake_credentials.c
index ea4cb76..1cf142f 100644
--- a/src/core/lib/security/credentials/fake/fake_credentials.c
+++ b/src/core/lib/security/credentials/fake/fake_credentials.c
@@ -113,9 +113,10 @@
   if (c->is_async) {
     grpc_credentials_metadata_request *cb_arg =
         grpc_credentials_metadata_request_create(creds, cb, user_data);
-    grpc_executor_push(
-        grpc_closure_create(on_simulated_token_fetch_done, cb_arg),
-        GRPC_ERROR_NONE);
+    grpc_closure_sched(exec_ctx,
+                       grpc_closure_create(on_simulated_token_fetch_done,
+                                           cb_arg, grpc_executor_scheduler),
+                       GRPC_ERROR_NONE);
   } else {
     cb(exec_ctx, user_data, c->md_store->entries, 1, GRPC_CREDENTIALS_OK, NULL);
   }
diff --git a/src/core/lib/security/credentials/google_default/google_default_credentials.c b/src/core/lib/security/credentials/google_default/google_default_credentials.c
index afe0e3d..caf57c8 100644
--- a/src/core/lib/security/credentials/google_default/google_default_credentials.c
+++ b/src/core/lib/security/credentials/google_default/google_default_credentials.c
@@ -130,7 +130,8 @@
   grpc_httpcli_get(
       &exec_ctx, &context, &detector.pollent, resource_quota, &request,
       gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), max_detection_delay),
-      grpc_closure_create(on_compute_engine_detection_http_response, &detector),
+      grpc_closure_create(on_compute_engine_detection_http_response, &detector,
+                          grpc_schedule_on_exec_ctx),
       &detector.response);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
 
@@ -155,7 +156,8 @@
 
   grpc_httpcli_context_destroy(&context);
   grpc_closure_init(&destroy_closure, destroy_pollset,
-                    grpc_polling_entity_pollset(&detector.pollent));
+                    grpc_polling_entity_pollset(&detector.pollent),
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx,
                         grpc_polling_entity_pollset(&detector.pollent),
                         &destroy_closure);
diff --git a/src/core/lib/security/credentials/jwt/jwt_verifier.c b/src/core/lib/security/credentials/jwt/jwt_verifier.c
index 03097a5..8c75098 100644
--- a/src/core/lib/security/credentials/jwt/jwt_verifier.c
+++ b/src/core/lib/security/credentials/jwt/jwt_verifier.c
@@ -677,7 +677,7 @@
   grpc_httpcli_get(
       exec_ctx, &ctx->verifier->http_ctx, &ctx->pollent, resource_quota, &req,
       gpr_time_add(gpr_now(GPR_CLOCK_REALTIME), grpc_jwt_verifier_max_delay),
-      grpc_closure_create(on_keys_retrieved, ctx),
+      grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx),
       &ctx->responses[HTTP_RESPONSE_KEYS]);
   grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
   grpc_json_destroy(json);
@@ -778,7 +778,8 @@
       *(path_prefix++) = '\0';
       gpr_asprintf(&req.http.path, "/%s/%s", path_prefix, iss);
     }
-    http_cb = grpc_closure_create(on_keys_retrieved, ctx);
+    http_cb =
+        grpc_closure_create(on_keys_retrieved, ctx, grpc_schedule_on_exec_ctx);
     rsp_idx = HTTP_RESPONSE_KEYS;
   } else {
     req.host = gpr_strdup(strstr(iss, "https://") == iss ? iss + 8 : iss);
@@ -790,7 +791,8 @@
       gpr_asprintf(&req.http.path, "/%s%s", path_prefix,
                    GRPC_OPENID_CONFIG_URL_SUFFIX);
     }
-    http_cb = grpc_closure_create(on_openid_config_retrieved, ctx);
+    http_cb = grpc_closure_create(on_openid_config_retrieved, ctx,
+                                  grpc_schedule_on_exec_ctx);
     rsp_idx = HTTP_RESPONSE_OPENID;
   }
 
diff --git a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
index b3625b2..9aa7863 100644
--- a/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
+++ b/src/core/lib/security/credentials/oauth2/oauth2_credentials.c
@@ -312,9 +312,10 @@
      extreme memory pressure. */
   grpc_resource_quota *resource_quota =
       grpc_resource_quota_create("oauth2_credentials");
-  grpc_httpcli_get(exec_ctx, httpcli_context, pollent, resource_quota, &request,
-                   deadline, grpc_closure_create(response_cb, metadata_req),
-                   &metadata_req->response);
+  grpc_httpcli_get(
+      exec_ctx, httpcli_context, pollent, resource_quota, &request, deadline,
+      grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
+      &metadata_req->response);
   grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
 }
 
@@ -368,10 +369,11 @@
      extreme memory pressure. */
   grpc_resource_quota *resource_quota =
       grpc_resource_quota_create("oauth2_credentials_refresh");
-  grpc_httpcli_post(exec_ctx, httpcli_context, pollent, resource_quota,
-                    &request, body, strlen(body), deadline,
-                    grpc_closure_create(response_cb, metadata_req),
-                    &metadata_req->response);
+  grpc_httpcli_post(
+      exec_ctx, httpcli_context, pollent, resource_quota, &request, body,
+      strlen(body), deadline,
+      grpc_closure_create(response_cb, metadata_req, grpc_schedule_on_exec_ctx),
+      &metadata_req->response);
   grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
   gpr_free(body);
 }
diff --git a/src/core/lib/security/transport/secure_endpoint.c b/src/core/lib/security/transport/secure_endpoint.c
index 331a8f1..750c367 100644
--- a/src/core/lib/security/transport/secure_endpoint.c
+++ b/src/core/lib/security/transport/secure_endpoint.c
@@ -146,7 +146,7 @@
     }
   }
   ep->read_buffer = NULL;
-  grpc_exec_ctx_sched(exec_ctx, ep->read_cb, error, NULL);
+  grpc_closure_sched(exec_ctx, ep->read_cb, error);
   SECURE_ENDPOINT_UNREF(exec_ctx, ep, "read");
 }
 
@@ -329,10 +329,9 @@
   if (result != TSI_OK) {
     /* TODO(yangg) do different things according to the error type? */
     grpc_slice_buffer_reset_and_unref(&ep->output_buffer);
-    grpc_exec_ctx_sched(
+    grpc_closure_sched(
         exec_ctx, cb,
-        grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result),
-        NULL);
+        grpc_set_tsi_error_result(GRPC_ERROR_CREATE("Wrap failed"), result));
     GPR_TIMER_END("secure_endpoint.endpoint_write", 0);
     return;
   }
@@ -417,7 +416,7 @@
   grpc_slice_buffer_init(&ep->output_buffer);
   grpc_slice_buffer_init(&ep->source_buffer);
   ep->read_buffer = NULL;
-  grpc_closure_init(&ep->on_read, on_read, ep);
+  grpc_closure_init(&ep->on_read, on_read, ep, grpc_schedule_on_exec_ctx);
   gpr_mu_init(&ep->protector_mu);
   gpr_ref_init(&ep->ref, 1);
   return &ep->base;
diff --git a/src/core/lib/security/transport/security_connector.c b/src/core/lib/security/transport/security_connector.c
index 5b088aa..17ad681 100644
--- a/src/core/lib/security/transport/security_connector.c
+++ b/src/core/lib/security/transport/security_connector.c
@@ -134,9 +134,9 @@
                                         grpc_auth_context **auth_context,
                                         grpc_closure *on_peer_checked) {
   if (sc == NULL) {
-    grpc_exec_ctx_sched(
+    grpc_closure_sched(
         exec_ctx, on_peer_checked,
-        GRPC_ERROR_CREATE("cannot check peer -- no security connector"), NULL);
+        GRPC_ERROR_CREATE("cannot check peer -- no security connector"));
     tsi_peer_destruct(&peer);
   } else {
     sc->vtable->check_peer(exec_ctx, sc, peer, auth_context, on_peer_checked);
@@ -273,7 +273,7 @@
       GRPC_FAKE_TRANSPORT_SECURITY_TYPE);
 
 end:
-  grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+  grpc_closure_sched(exec_ctx, on_peer_checked, error);
   tsi_peer_destruct(&peer);
 }
 
@@ -508,7 +508,7 @@
                                              ? c->overridden_target_name
                                              : c->target_name,
                                      &peer, auth_context);
-  grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+  grpc_closure_sched(exec_ctx, on_peer_checked, error);
   tsi_peer_destruct(&peer);
 }
 
@@ -518,7 +518,7 @@
                                   grpc_closure *on_peer_checked) {
   grpc_error *error = ssl_check_peer(sc, NULL, &peer, auth_context);
   tsi_peer_destruct(&peer);
-  grpc_exec_ctx_sched(exec_ctx, on_peer_checked, error, NULL);
+  grpc_closure_sched(exec_ctx, on_peer_checked, error);
 }
 
 static void add_shallow_auth_property_to_peer(tsi_peer *peer,
diff --git a/src/core/lib/security/transport/security_handshaker.c b/src/core/lib/security/transport/security_handshaker.c
index 41a775d..748bf4a 100644
--- a/src/core/lib/security/transport/security_handshaker.c
+++ b/src/core/lib/security/transport/security_handshaker.c
@@ -136,7 +136,7 @@
     h->shutdown = true;
   }
   // Invoke callback.
-  grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, error, NULL);
+  grpc_closure_sched(exec_ctx, h->on_handshake_done, error);
 }
 
 static void on_peer_checked(grpc_exec_ctx *exec_ctx, void *arg,
@@ -173,7 +173,7 @@
       grpc_channel_args_copy_and_add(tmp_args, &auth_context_arg, 1);
   grpc_channel_args_destroy(tmp_args);
   // Invoke callback.
-  grpc_exec_ctx_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, h->on_handshake_done, GRPC_ERROR_NONE);
   // Set shutdown to true so that subsequent calls to
   // security_handshaker_shutdown() do nothing.
   h->shutdown = true;
@@ -392,10 +392,13 @@
   h->handshake_buffer_size = GRPC_INITIAL_HANDSHAKE_BUFFER_SIZE;
   h->handshake_buffer = gpr_malloc(h->handshake_buffer_size);
   grpc_closure_init(&h->on_handshake_data_sent_to_peer,
-                    on_handshake_data_sent_to_peer, h);
+                    on_handshake_data_sent_to_peer, h,
+                    grpc_schedule_on_exec_ctx);
   grpc_closure_init(&h->on_handshake_data_received_from_peer,
-                    on_handshake_data_received_from_peer, h);
-  grpc_closure_init(&h->on_peer_checked, on_peer_checked, h);
+                    on_handshake_data_received_from_peer, h,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&h->on_peer_checked, on_peer_checked, h,
+                    grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&h->left_overs);
   grpc_slice_buffer_init(&h->outgoing);
   return &h->base;
@@ -418,9 +421,8 @@
                                          grpc_tcp_server_acceptor *acceptor,
                                          grpc_closure *on_handshake_done,
                                          grpc_handshaker_args *args) {
-  grpc_exec_ctx_sched(exec_ctx, on_handshake_done,
-                      GRPC_ERROR_CREATE("Failed to create security handshaker"),
-                      NULL);
+  grpc_closure_sched(exec_ctx, on_handshake_done,
+                     GRPC_ERROR_CREATE("Failed to create security handshaker"));
 }
 
 static const grpc_handshaker_vtable fail_handshaker_vtable = {
diff --git a/src/core/lib/security/transport/server_auth_filter.c b/src/core/lib/security/transport/server_auth_filter.c
index e6a242e..5b4adc4 100644
--- a/src/core/lib/security/transport/server_auth_filter.c
+++ b/src/core/lib/security/transport/server_auth_filter.c
@@ -132,7 +132,7 @@
     grpc_metadata_batch_filter(calld->recv_initial_metadata, remove_consumed_md,
                                elem);
     grpc_metadata_array_destroy(&calld->md);
-    grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(&exec_ctx, calld->on_done_recv, GRPC_ERROR_NONE);
   } else {
     grpc_slice message;
     grpc_transport_stream_op *close_op = gpr_malloc(sizeof(*close_op));
@@ -148,13 +148,13 @@
       calld->transport_op->send_message = NULL;
     }
     calld->transport_op->send_trailing_metadata = NULL;
-    close_op->on_complete = grpc_closure_create(destroy_op, close_op);
+    close_op->on_complete =
+        grpc_closure_create(destroy_op, close_op, grpc_schedule_on_exec_ctx);
     grpc_transport_stream_op_add_close(close_op, status, &message);
     grpc_call_next_op(&exec_ctx, elem, close_op);
-    grpc_exec_ctx_sched(&exec_ctx, calld->on_done_recv,
-                        grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
-                                           GRPC_ERROR_INT_GRPC_STATUS, status),
-                        NULL);
+    grpc_closure_sched(&exec_ctx, calld->on_done_recv,
+                       grpc_error_set_int(GRPC_ERROR_CREATE(error_details),
+                                          GRPC_ERROR_INT_GRPC_STATUS, status));
   }
 
   grpc_exec_ctx_finish(&exec_ctx);
@@ -174,8 +174,7 @@
       return;
     }
   }
-  grpc_exec_ctx_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error),
-                      NULL);
+  grpc_closure_sched(exec_ctx, calld->on_done_recv, GRPC_ERROR_REF(error));
 }
 
 static void set_recv_ops_md_callbacks(grpc_call_element *elem,
@@ -214,7 +213,8 @@
 
   /* initialize members */
   memset(calld, 0, sizeof(*calld));
-  grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem);
+  grpc_closure_init(&calld->auth_on_recv, auth_on_recv, elem,
+                    grpc_schedule_on_exec_ctx);
 
   if (args->context[GRPC_CONTEXT_SECURITY].value != NULL) {
     args->context[GRPC_CONTEXT_SECURITY].destroy(
diff --git a/src/core/lib/support/string.c b/src/core/lib/support/string.c
index f263f82..426fce2 100644
--- a/src/core/lib/support/string.c
+++ b/src/core/lib/support/string.c
@@ -279,7 +279,8 @@
 void *gpr_memrchr(const void *s, int c, size_t n) {
   if (s == NULL) return NULL;
   char *b = (char *)s;
-  for (size_t i = 0; i < n; i++) {
+  size_t i;
+  for (i = 0; i < n; i++) {
     if (b[n - i - 1] == c) {
       return &b[n - i - 1];
     }
diff --git a/src/core/lib/surface/call.c b/src/core/lib/surface/call.c
index 8ca3cab..b208010 100644
--- a/src/core/lib/surface/call.c
+++ b/src/core/lib/surface/call.c
@@ -794,7 +794,8 @@
   memset(&tc->op, 0, sizeof(tc->op));
   tc->op.cancel_error = tc->error;
   /* reuse closure to catch completion */
-  grpc_closure_init(&tc->closure, done_termination, tc);
+  grpc_closure_init(&tc->closure, done_termination, tc,
+                    grpc_schedule_on_exec_ctx);
   tc->op.on_complete = &tc->closure;
   execute_op(exec_ctx, tc->call, &tc->op);
 }
@@ -804,7 +805,8 @@
   memset(&tc->op, 0, sizeof(tc->op));
   tc->op.close_error = tc->error;
   /* reuse closure to catch completion */
-  grpc_closure_init(&tc->closure, done_termination, tc);
+  grpc_closure_init(&tc->closure, done_termination, tc,
+                    grpc_schedule_on_exec_ctx);
   tc->op.on_complete = &tc->closure;
   execute_op(exec_ctx, tc->call, &tc->op);
 }
@@ -814,13 +816,13 @@
   set_status_from_error(tc->call, STATUS_FROM_API_OVERRIDE, tc->error);
 
   if (tc->type == TC_CANCEL) {
-    grpc_closure_init(&tc->closure, send_cancel, tc);
+    grpc_closure_init(&tc->closure, send_cancel, tc, grpc_schedule_on_exec_ctx);
     GRPC_CALL_INTERNAL_REF(tc->call, "cancel");
   } else if (tc->type == TC_CLOSE) {
-    grpc_closure_init(&tc->closure, send_close, tc);
+    grpc_closure_init(&tc->closure, send_close, tc, grpc_schedule_on_exec_ctx);
     GRPC_CALL_INTERNAL_REF(tc->call, "close");
   }
-  grpc_exec_ctx_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, &tc->closure, GRPC_ERROR_NONE);
   return GRPC_CALL_OK;
 }
 
@@ -1138,8 +1140,8 @@
     } else {
       *call->receiving_buffer = grpc_raw_byte_buffer_create(NULL, 0);
     }
-    grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready,
-                      bctl);
+    grpc_closure_init(&call->receiving_slice_ready, receiving_slice_ready, bctl,
+                      grpc_schedule_on_exec_ctx);
     continue_receiving_slices(exec_ctx, bctl);
   }
 }
@@ -1251,9 +1253,10 @@
   call->has_initial_md_been_received = true;
   if (call->saved_receiving_stream_ready_bctlp != NULL) {
     grpc_closure *saved_rsr_closure = grpc_closure_create(
-        receiving_stream_ready, call->saved_receiving_stream_ready_bctlp);
+        receiving_stream_ready, call->saved_receiving_stream_ready_bctlp,
+        grpc_schedule_on_exec_ctx);
     call->saved_receiving_stream_ready_bctlp = NULL;
-    grpc_exec_ctx_sched(exec_ctx, saved_rsr_closure, error, NULL);
+    grpc_closure_sched(exec_ctx, saved_rsr_closure, error);
   }
 
   gpr_mu_unlock(&call->mu);
@@ -1558,7 +1561,8 @@
         call->received_initial_metadata = 1;
         call->buffered_metadata[0] = op->data.recv_initial_metadata;
         grpc_closure_init(&call->receiving_initial_metadata_ready,
-                          receiving_initial_metadata_ready, bctl);
+                          receiving_initial_metadata_ready, bctl,
+                          grpc_schedule_on_exec_ctx);
         bctl->recv_initial_metadata = 1;
         stream_op->recv_initial_metadata =
             &call->metadata_batch[1 /* is_receiving */][0 /* is_trailing */];
@@ -1581,7 +1585,7 @@
         call->receiving_buffer = op->data.recv_message;
         stream_op->recv_message = &call->receiving_stream;
         grpc_closure_init(&call->receiving_stream_ready, receiving_stream_ready,
-                          bctl);
+                          bctl, grpc_schedule_on_exec_ctx);
         stream_op->recv_message_ready = &call->receiving_stream_ready;
         num_completion_callbacks_needed++;
         break;
@@ -1646,7 +1650,8 @@
   gpr_ref_init(&bctl->steps_to_complete, num_completion_callbacks_needed);
 
   stream_op->context = call->context;
-  grpc_closure_init(&bctl->finish_batch, finish_batch, bctl);
+  grpc_closure_init(&bctl->finish_batch, finish_batch, bctl,
+                    grpc_schedule_on_exec_ctx);
   stream_op->on_complete = &bctl->finish_batch;
   gpr_mu_unlock(&call->mu);
 
diff --git a/src/core/lib/surface/channel_ping.c b/src/core/lib/surface/channel_ping.c
index 0d2f01a..e68febd 100644
--- a/src/core/lib/surface/channel_ping.c
+++ b/src/core/lib/surface/channel_ping.c
@@ -71,7 +71,7 @@
   GPR_ASSERT(reserved == NULL);
   pr->tag = tag;
   pr->cq = cq;
-  grpc_closure_init(&pr->closure, ping_done, pr);
+  grpc_closure_init(&pr->closure, ping_done, pr, grpc_schedule_on_exec_ctx);
   op->send_ping = &pr->closure;
   op->bind_pollset = grpc_cq_pollset(cq);
   grpc_cq_begin_op(cq, tag);
diff --git a/src/core/lib/surface/completion_queue.c b/src/core/lib/surface/completion_queue.c
index 184c1a1..4613c90 100644
--- a/src/core/lib/surface/completion_queue.c
+++ b/src/core/lib/surface/completion_queue.c
@@ -168,7 +168,8 @@
 #ifndef NDEBUG
   cc->outstanding_tag_count = 0;
 #endif
-  grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc);
+  grpc_closure_init(&cc->pollset_shutdown_done, on_pollset_shutdown_done, cc,
+                    grpc_schedule_on_exec_ctx);
 
   GPR_TIMER_END("grpc_completion_queue_create", 0);
 
diff --git a/src/core/lib/surface/lame_client.c b/src/core/lib/surface/lame_client.c
index 57da94a..f1ad137 100644
--- a/src/core/lib/surface/lame_client.c
+++ b/src/core/lib/surface/lame_client.c
@@ -98,16 +98,16 @@
   if (op->on_connectivity_state_change) {
     GPR_ASSERT(*op->connectivity_state != GRPC_CHANNEL_SHUTDOWN);
     *op->connectivity_state = GRPC_CHANNEL_SHUTDOWN;
-    grpc_exec_ctx_sched(exec_ctx, op->on_connectivity_state_change,
-                        GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, op->on_connectivity_state_change,
+                       GRPC_ERROR_NONE);
   }
   if (op->send_ping != NULL) {
-    grpc_exec_ctx_sched(exec_ctx, op->send_ping,
-                        GRPC_ERROR_CREATE("lame client channel"), NULL);
+    grpc_closure_sched(exec_ctx, op->send_ping,
+                       GRPC_ERROR_CREATE("lame client channel"));
   }
   GRPC_ERROR_UNREF(op->disconnect_with_error);
   if (op->on_consumed != NULL) {
-    grpc_exec_ctx_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
   }
 }
 
diff --git a/src/core/lib/surface/server.c b/src/core/lib/surface/server.c
index 62d7afc..78699e9 100644
--- a/src/core/lib/surface/server.c
+++ b/src/core/lib/surface/server.c
@@ -278,7 +278,8 @@
 static void send_shutdown(grpc_exec_ctx *exec_ctx, grpc_channel *channel,
                           int send_goaway, grpc_error *send_disconnect) {
   struct shutdown_cleanup_args *sc = gpr_malloc(sizeof(*sc));
-  grpc_closure_init(&sc->closure, shutdown_cleanup, sc);
+  grpc_closure_init(&sc->closure, shutdown_cleanup, sc,
+                    grpc_schedule_on_exec_ctx);
   grpc_transport_op *op = grpc_make_transport_op(&sc->closure);
   grpc_channel_element *elem;
 
@@ -346,9 +347,9 @@
     gpr_mu_unlock(&calld->mu_state);
     grpc_closure_init(
         &calld->kill_zombie_closure, kill_zombie,
-        grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
-    grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
-                        NULL);
+        grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+        grpc_schedule_on_exec_ctx);
+    grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
   }
 }
 
@@ -440,8 +441,8 @@
   orphan_channel(chand);
   server_ref(chand->server);
   maybe_finish_shutdown(exec_ctx, chand->server);
-  chand->finish_destroy_channel_closure.cb = finish_destroy_channel;
-  chand->finish_destroy_channel_closure.cb_arg = chand;
+  grpc_closure_init(&chand->finish_destroy_channel_closure,
+                    finish_destroy_channel, chand, grpc_schedule_on_exec_ctx);
 
   if (grpc_server_channel_trace && error != GRPC_ERROR_NONE) {
     const char *msg = grpc_error_string(error);
@@ -545,8 +546,9 @@
     gpr_mu_unlock(&calld->mu_state);
     grpc_closure_init(
         &calld->kill_zombie_closure, kill_zombie,
-        grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
-    grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, error, NULL);
+        grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+        grpc_schedule_on_exec_ctx);
+    grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, error);
     return;
   }
 
@@ -590,9 +592,9 @@
     gpr_mu_lock(&calld->mu_state);
     calld->state = ZOMBIED;
     gpr_mu_unlock(&calld->mu_state);
-    grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
-    grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE,
-                        NULL);
+    grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
+                      grpc_schedule_on_exec_ctx);
+    grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure, GRPC_ERROR_NONE);
     return;
   }
 
@@ -607,7 +609,8 @@
       memset(&op, 0, sizeof(op));
       op.op = GRPC_OP_RECV_MESSAGE;
       op.data.recv_message = &calld->payload;
-      grpc_closure_init(&calld->publish, publish_new_rpc, elem);
+      grpc_closure_init(&calld->publish, publish_new_rpc, elem,
+                        grpc_schedule_on_exec_ctx);
       grpc_call_start_batch_and_execute(exec_ctx, calld->call, &op, 1,
                                         &calld->publish);
       break;
@@ -813,9 +816,10 @@
     if (calld->state == NOT_STARTED) {
       calld->state = ZOMBIED;
       gpr_mu_unlock(&calld->mu_state);
-      grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem);
-      grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
-                          GRPC_ERROR_NONE, NULL);
+      grpc_closure_init(&calld->kill_zombie_closure, kill_zombie, elem,
+                        grpc_schedule_on_exec_ctx);
+      grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+                         GRPC_ERROR_NONE);
     } else if (calld->state == PENDING) {
       calld->state = ZOMBIED;
       gpr_mu_unlock(&calld->mu_state);
@@ -851,7 +855,8 @@
   memset(&op, 0, sizeof(op));
   op.op = GRPC_OP_RECV_INITIAL_METADATA;
   op.data.recv_initial_metadata = &calld->initial_metadata;
-  grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem);
+  grpc_closure_init(&calld->got_initial_metadata, got_initial_metadata, elem,
+                    grpc_schedule_on_exec_ctx);
   grpc_call_start_batch_and_execute(exec_ctx, call, &op, 1,
                                     &calld->got_initial_metadata);
 }
@@ -887,7 +892,8 @@
   gpr_mu_init(&calld->mu_state);
 
   grpc_closure_init(&calld->server_on_recv_initial_metadata,
-                    server_on_recv_initial_metadata, elem);
+                    server_on_recv_initial_metadata, elem,
+                    grpc_schedule_on_exec_ctx);
 
   server_ref(chand->server);
   return GRPC_ERROR_NONE;
@@ -926,7 +932,8 @@
   chand->registered_methods = NULL;
   chand->connectivity_state = GRPC_CHANNEL_IDLE;
   grpc_closure_init(&chand->channel_connectivity_changed,
-                    channel_connectivity_changed, chand);
+                    channel_connectivity_changed, chand,
+                    grpc_schedule_on_exec_ctx);
   return GRPC_ERROR_NONE;
 }
 
@@ -1278,7 +1285,8 @@
 
   /* Shutdown listeners */
   for (l = server->listeners; l; l = l->next) {
-    grpc_closure_init(&l->destroy_done, listener_destroy_done, server);
+    grpc_closure_init(&l->destroy_done, listener_destroy_done, server,
+                      grpc_schedule_on_exec_ctx);
     l->destroy(&exec_ctx, server, l->arg, &l->destroy_done);
   }
 
@@ -1384,9 +1392,10 @@
         gpr_mu_unlock(&calld->mu_state);
         grpc_closure_init(
             &calld->kill_zombie_closure, kill_zombie,
-            grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0));
-        grpc_exec_ctx_sched(exec_ctx, &calld->kill_zombie_closure,
-                            GRPC_ERROR_NONE, NULL);
+            grpc_call_stack_element(grpc_call_get_call_stack(calld->call), 0),
+            grpc_schedule_on_exec_ctx);
+        grpc_closure_sched(exec_ctx, &calld->kill_zombie_closure,
+                           GRPC_ERROR_NONE);
       } else {
         GPR_ASSERT(calld->state == PENDING);
         calld->state = ACTIVATED;
diff --git a/src/core/lib/transport/connectivity_state.c b/src/core/lib/transport/connectivity_state.c
index 4f49d7c..c656d93 100644
--- a/src/core/lib/transport/connectivity_state.c
+++ b/src/core/lib/transport/connectivity_state.c
@@ -81,7 +81,7 @@
     } else {
       error = GRPC_ERROR_CREATE("Shutdown connectivity owner");
     }
-    grpc_exec_ctx_sched(exec_ctx, w->notify, error, NULL);
+    grpc_closure_sched(exec_ctx, w->notify, error);
     gpr_free(w);
   }
   GRPC_ERROR_UNREF(tracker->current_error);
@@ -121,7 +121,7 @@
   if (current == NULL) {
     grpc_connectivity_state_watcher *w = tracker->watchers;
     if (w != NULL && w->notify == notify) {
-      grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
+      grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
       tracker->watchers = w->next;
       gpr_free(w);
       return false;
@@ -129,7 +129,7 @@
     while (w != NULL) {
       grpc_connectivity_state_watcher *rm_candidate = w->next;
       if (rm_candidate != NULL && rm_candidate->notify == notify) {
-        grpc_exec_ctx_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED, NULL);
+        grpc_closure_sched(exec_ctx, notify, GRPC_ERROR_CANCELLED);
         w->next = w->next->next;
         gpr_free(rm_candidate);
         return false;
@@ -140,8 +140,8 @@
   } else {
     if (tracker->current_state != *current) {
       *current = tracker->current_state;
-      grpc_exec_ctx_sched(exec_ctx, notify,
-                          GRPC_ERROR_REF(tracker->current_error), NULL);
+      grpc_closure_sched(exec_ctx, notify,
+                         GRPC_ERROR_REF(tracker->current_error));
     } else {
       grpc_connectivity_state_watcher *w = gpr_malloc(sizeof(*w));
       w->current = current;
@@ -191,8 +191,8 @@
       gpr_log(GPR_DEBUG, "NOTIFY: %p %s: %p", tracker, tracker->name,
               w->notify);
     }
-    grpc_exec_ctx_sched(exec_ctx, w->notify,
-                        GRPC_ERROR_REF(tracker->current_error), NULL);
+    grpc_closure_sched(exec_ctx, w->notify,
+                       GRPC_ERROR_REF(tracker->current_error));
     gpr_free(w);
   }
 }
diff --git a/src/core/lib/transport/transport.c b/src/core/lib/transport/transport.c
index b448126..0d24062 100644
--- a/src/core/lib/transport/transport.c
+++ b/src/core/lib/transport/transport.c
@@ -68,7 +68,7 @@
                        grpc_stream_refcount *refcount) {
 #endif
   if (gpr_unref(&refcount->refs)) {
-    grpc_exec_ctx_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, &refcount->destroy, GRPC_ERROR_NONE);
   }
 }
 
@@ -82,7 +82,7 @@
                           grpc_iomgr_cb_func cb, void *cb_arg) {
 #endif
   gpr_ref_init(&refcount->refs, initial_refs);
-  grpc_closure_init(&refcount->destroy, cb, cb_arg);
+  grpc_closure_init(&refcount->destroy, cb, cb_arg, grpc_schedule_on_exec_ctx);
 }
 
 static void move64(uint64_t *from, uint64_t *to) {
@@ -168,11 +168,10 @@
 void grpc_transport_stream_op_finish_with_failure(grpc_exec_ctx *exec_ctx,
                                                   grpc_transport_stream_op *op,
                                                   grpc_error *error) {
-  grpc_exec_ctx_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error),
-                      NULL);
-  grpc_exec_ctx_sched(exec_ctx, op->recv_initial_metadata_ready,
-                      GRPC_ERROR_REF(error), NULL);
-  grpc_exec_ctx_sched(exec_ctx, op->on_complete, error, NULL);
+  grpc_closure_sched(exec_ctx, op->recv_message_ready, GRPC_ERROR_REF(error));
+  grpc_closure_sched(exec_ctx, op->recv_initial_metadata_ready,
+                     GRPC_ERROR_REF(error));
+  grpc_closure_sched(exec_ctx, op->on_complete, error);
 }
 
 typedef struct {
@@ -196,7 +195,8 @@
   cmd = gpr_malloc(sizeof(*cmd));
   cmd->error = error;
   cmd->then_call = op->on_complete;
-  grpc_closure_init(&cmd->closure, free_message, cmd);
+  grpc_closure_init(&cmd->closure, free_message, cmd,
+                    grpc_schedule_on_exec_ctx);
   op->on_complete = &cmd->closure;
   *which = error;
 }
@@ -269,14 +269,14 @@
 static void destroy_made_transport_op(grpc_exec_ctx *exec_ctx, void *arg,
                                       grpc_error *error) {
   made_transport_op *op = arg;
-  grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
-                      NULL);
+  grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
   gpr_free(op);
 }
 
 grpc_transport_op *grpc_make_transport_op(grpc_closure *on_complete) {
   made_transport_op *op = gpr_malloc(sizeof(*op));
-  grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op);
+  grpc_closure_init(&op->outer_on_complete, destroy_made_transport_op, op,
+                    grpc_schedule_on_exec_ctx);
   op->inner_on_complete = on_complete;
   memset(&op->op, 0, sizeof(op->op));
   op->op.on_consumed = &op->outer_on_complete;
@@ -292,8 +292,7 @@
 static void destroy_made_transport_stream_op(grpc_exec_ctx *exec_ctx, void *arg,
                                              grpc_error *error) {
   made_transport_stream_op *op = arg;
-  grpc_exec_ctx_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error),
-                      NULL);
+  grpc_closure_sched(exec_ctx, op->inner_on_complete, GRPC_ERROR_REF(error));
   gpr_free(op);
 }
 
@@ -301,7 +300,7 @@
     grpc_closure *on_complete) {
   made_transport_stream_op *op = gpr_malloc(sizeof(*op));
   grpc_closure_init(&op->outer_on_complete, destroy_made_transport_stream_op,
-                    op);
+                    op, grpc_schedule_on_exec_ctx);
   op->inner_on_complete = on_complete;
   memset(&op->op, 0, sizeof(op->op));
   op->op.on_complete = &op->outer_on_complete;
diff --git a/src/php/lib/Grpc/AbstractCall.php b/src/php/lib/Grpc/AbstractCall.php
index c4d5679..9f0b02b 100644
--- a/src/php/lib/Grpc/AbstractCall.php
+++ b/src/php/lib/Grpc/AbstractCall.php
@@ -62,7 +62,7 @@
         Channel $channel,
         $method,
         $deserialize,
-        $options = []
+        array $options = []
     ) {
         if (array_key_exists('timeout', $options) &&
             is_numeric($timeout = $options['timeout'])
@@ -89,7 +89,7 @@
     }
 
     /**
-     * @return mixed The metadata sent by the server.
+     * @return mixed The metadata sent by the server
      */
     public function getMetadata()
     {
@@ -97,7 +97,7 @@
     }
 
     /**
-     * @return mixed The trailing metadata sent by the server.
+     * @return mixed The trailing metadata sent by the server
      */
     public function getTrailingMetadata()
     {
@@ -105,7 +105,7 @@
     }
 
     /**
-     * @return string The URI of the endpoint.
+     * @return string The URI of the endpoint
      */
     public function getPeer()
     {
@@ -167,8 +167,7 @@
     /**
      * Set the CallCredentials for the underlying Call.
      *
-     * @param CallCredentials $call_credentials The CallCredentials
-     *                                          object
+     * @param CallCredentials $call_credentials The CallCredentials object
      */
     public function setCallCredentials($call_credentials)
     {
diff --git a/src/php/lib/Grpc/BaseStub.php b/src/php/lib/Grpc/BaseStub.php
index d0baeae..aec60af 100644
--- a/src/php/lib/Grpc/BaseStub.php
+++ b/src/php/lib/Grpc/BaseStub.php
@@ -48,14 +48,14 @@
     private $update_metadata;
 
     /**
-     * @param $hostname string
-     * @param $opts array
+     * @param string  $hostname
+     * @param array   $opts
      *  - 'update_metadata': (optional) a callback function which takes in a
      * metadata array, and returns an updated metadata array
      *  - 'grpc.primary_user_agent': (optional) a user-agent string
-     * @param $channel Channel An already created Channel object
+     * @param Channel $channel An already created Channel object (optional)
      */
-    public function __construct($hostname, $opts, $channel = null)
+    public function __construct($hostname, $opts, Channel $channel = null)
     {
         $ssl_roots = file_get_contents(
             dirname(__FILE__).'/../../../../etc/roots.pem');
@@ -98,7 +98,7 @@
     }
 
     /**
-     * @return string The URI of the endpoint.
+     * @return string The URI of the endpoint
      */
     public function getTarget()
     {
@@ -106,7 +106,7 @@
     }
 
     /**
-     * @param $try_to_connect bool
+     * @param bool $try_to_connect (optional)
      *
      * @return int The grpc connectivity state
      */
@@ -145,6 +145,12 @@
         return $this->_checkConnectivityState($new_state);
     }
 
+    /**
+     * @param $new_state Connect state
+     *
+     * @return bool true if state is CHANNEL_READY
+     * @throw Exception if state is CHANNEL_FATAL_FAILURE
+     */
     private function _checkConnectivityState($new_state)
     {
         if ($new_state == \Grpc\CHANNEL_READY) {
@@ -167,6 +173,10 @@
 
     /**
      * constructs the auth uri for the jwt.
+     *
+     * @param string $method The method string
+     *
+     * @return string The URL string
      */
     private function _get_jwt_aud_uri($method)
     {
@@ -191,7 +201,7 @@
      *
      * @param array $metadata The metadata map
      *
-     * @return $metadata Validated and key-normalized metadata map
+     * @return array $metadata Validated and key-normalized metadata map
      * @throw InvalidArgumentException if key contains invalid characters
      */
     private function _validate_and_normalize_metadata($metadata)
@@ -220,14 +230,16 @@
      * @param mixed    $argument    The argument to the method
      * @param callable $deserialize A function that deserializes the response
      * @param array    $metadata    A metadata map to send to the server
+     *                              (optional)
+     * @param array    $options     An array of options (optional)
      *
      * @return SimpleSurfaceActiveCall The active call object
      */
     public function _simpleRequest($method,
                                    $argument,
                                    $deserialize,
-                                   $metadata = [],
-                                   $options = [])
+                                   array $metadata = [],
+                                   array $options = [])
     {
         $call = new UnaryCall($this->channel,
                               $method,
@@ -251,17 +263,17 @@
      * output.
      *
      * @param string   $method      The name of the method to call
-     * @param array    $arguments   An array or Traversable of arguments to stream to the
-     *                              server
      * @param callable $deserialize A function that deserializes the response
      * @param array    $metadata    A metadata map to send to the server
+     *                              (optional)
+     * @param array    $options     An array of options (optional)
      *
      * @return ClientStreamingSurfaceActiveCall The active call object
      */
     public function _clientStreamRequest($method,
                                          callable $deserialize,
-                                         $metadata = [],
-                                         $options = [])
+                                         array $metadata = [],
+                                         array $options = [])
     {
         $call = new ClientStreamingCall($this->channel,
                                         $method,
@@ -281,21 +293,23 @@
     }
 
     /**
-     * Call a remote method that takes a single argument and returns a stream of
-     * responses.
+     * Call a remote method that takes a single argument and returns a stream
+     * of responses.
      *
      * @param string   $method      The name of the method to call
      * @param mixed    $argument    The argument to the method
      * @param callable $deserialize A function that deserializes the responses
      * @param array    $metadata    A metadata map to send to the server
+     *                              (optional)
+     * @param array    $options     An array of options (optional)
      *
      * @return ServerStreamingSurfaceActiveCall The active call object
      */
     public function _serverStreamRequest($method,
                                          $argument,
                                          callable $deserialize,
-                                         $metadata = [],
-                                         $options = [])
+                                         array $metadata = [],
+                                         array $options = [])
     {
         $call = new ServerStreamingCall($this->channel,
                                         $method,
@@ -320,13 +334,15 @@
      * @param string   $method      The name of the method to call
      * @param callable $deserialize A function that deserializes the responses
      * @param array    $metadata    A metadata map to send to the server
+     *                              (optional)
+     * @param array    $options     An array of options (optional)
      *
      * @return BidiStreamingSurfaceActiveCall The active call object
      */
     public function _bidiRequest($method,
                                  callable $deserialize,
-                                 $metadata = [],
-                                 $options = [])
+                                 array $metadata = [],
+                                 array $options = [])
     {
         $call = new BidiStreamingCall($this->channel,
                                       $method,
diff --git a/src/php/lib/Grpc/BidiStreamingCall.php b/src/php/lib/Grpc/BidiStreamingCall.php
index f0e1e81..b03bbd2 100644
--- a/src/php/lib/Grpc/BidiStreamingCall.php
+++ b/src/php/lib/Grpc/BidiStreamingCall.php
@@ -35,8 +35,8 @@
 namespace Grpc;
 
 /**
- * Represents an active call that allows for sending and recieving messages in
- * streams in any order.
+ * Represents an active call that allows for sending and recieving messages
+ * in streams in any order.
  */
 class BidiStreamingCall extends AbstractCall
 {
@@ -44,6 +44,7 @@
      * Start the call.
      *
      * @param array $metadata Metadata to send with the call, if applicable
+     *                        (optional)
      */
     public function start(array $metadata = [])
     {
@@ -76,10 +77,10 @@
      * writesDone is called.
      *
      * @param ByteBuffer $data    The data to write
-     * @param array      $options an array of options, possible keys:
-     *                            'flags' => a number
+     * @param array      $options An array of options, possible keys:
+     *                            'flags' => a number (optional)
      */
-    public function write($data, $options = [])
+    public function write($data, array $options = [])
     {
         $message_array = ['message' => $this->serializeMessage($data)];
         if (array_key_exists('flags', $options)) {
@@ -103,8 +104,8 @@
     /**
      * Wait for the server to send the status, and return it.
      *
-     * @return \stdClass The status object, with integer $code, string $details,
-     *                   and array $metadata members
+     * @return \stdClass The status object, with integer $code, string
+     *                   $details, and array $metadata members
      */
     public function getStatus()
     {
diff --git a/src/php/lib/Grpc/ClientStreamingCall.php b/src/php/lib/Grpc/ClientStreamingCall.php
index 20db809..c542f08 100644
--- a/src/php/lib/Grpc/ClientStreamingCall.php
+++ b/src/php/lib/Grpc/ClientStreamingCall.php
@@ -35,8 +35,8 @@
 namespace Grpc;
 
 /**
- * Represents an active call that sends a stream of messages and then gets a
- * single response.
+ * Represents an active call that sends a stream of messages and then gets
+ * a single response.
  */
 class ClientStreamingCall extends AbstractCall
 {
@@ -44,8 +44,9 @@
      * Start the call.
      *
      * @param array $metadata Metadata to send with the call, if applicable
+     *                        (optional)
      */
-    public function start($metadata = [])
+    public function start(array $metadata = [])
     {
         $this->call->startBatch([
             OP_SEND_INITIAL_METADATA => $metadata,
@@ -57,8 +58,8 @@
      * wait is called.
      *
      * @param ByteBuffer $data    The data to write
-     * @param array      $options an array of options, possible keys:
-     *                            'flags' => a number
+     * @param array      $options An array of options, possible keys:
+     *                            'flags' => a number (optional)
      */
     public function write($data, array $options = [])
     {
diff --git a/src/php/lib/Grpc/ServerStreamingCall.php b/src/php/lib/Grpc/ServerStreamingCall.php
index 5aeeafa..406512b 100644
--- a/src/php/lib/Grpc/ServerStreamingCall.php
+++ b/src/php/lib/Grpc/ServerStreamingCall.php
@@ -35,8 +35,8 @@
 namespace Grpc;
 
 /**
- * Represents an active call that sends a single message and then gets a stream
- * of responses.
+ * Represents an active call that sends a single message and then gets a
+ * stream of responses.
  */
 class ServerStreamingCall extends AbstractCall
 {
@@ -45,10 +45,11 @@
      *
      * @param mixed $data     The data to send
      * @param array $metadata Metadata to send with the call, if applicable
-     * @param array $options  an array of options, possible keys:
-     *                        'flags' => a number
+     *                        (optional)
+     * @param array $options  An array of options, possible keys:
+     *                        'flags' => a number (optional)
      */
-    public function start($data, $metadata = [], $options = [])
+    public function start($data, array $metadata = [], array $options = [])
     {
         $message_array = ['message' => $this->serializeMessage($data)];
         if (array_key_exists('flags', $options)) {
@@ -82,8 +83,8 @@
     /**
      * Wait for the server to send the status, and return it.
      *
-     * @return \stdClass The status object, with integer $code, string $details,
-     *                   and array $metadata members
+     * @return \stdClass The status object, with integer $code, string
+     *                   $details, and array $metadata members
      */
     public function getStatus()
     {
diff --git a/src/php/lib/Grpc/UnaryCall.php b/src/php/lib/Grpc/UnaryCall.php
index e8eb648..3c1cb15 100644
--- a/src/php/lib/Grpc/UnaryCall.php
+++ b/src/php/lib/Grpc/UnaryCall.php
@@ -35,8 +35,8 @@
 namespace Grpc;
 
 /**
- * Represents an active call that sends a single message and then gets a single
- * response.
+ * Represents an active call that sends a single message and then gets a
+ * single response.
  */
 class UnaryCall extends AbstractCall
 {
@@ -45,10 +45,11 @@
      *
      * @param mixed $data     The data to send
      * @param array $metadata Metadata to send with the call, if applicable
-     * @param array $options  an array of options, possible keys:
-     *                        'flags' => a number
+     *                        (optional)
+     * @param array $options  An array of options, possible keys:
+     *                        'flags' => a number (optional)
      */
-    public function start($data, $metadata = [], $options = [])
+    public function start($data, array $metadata = [], array $options = [])
     {
         $message_array = ['message' => $this->serializeMessage($data)];
         if (isset($options['flags'])) {
diff --git a/src/python/grpcio/commands.py b/src/python/grpcio/commands.py
index ea3b6f3..701c6af 100644
--- a/src/python/grpcio/commands.py
+++ b/src/python/grpcio/commands.py
@@ -62,6 +62,7 @@
 napoleon_include_special_with_doc = True
 
 html_theme = 'sphinx_rtd_theme'
+copyright = "2016, The gRPC Authors"
 """
 
 API_GLOSSARY = """
diff --git a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
similarity index 94%
rename from src/python/grpcio_tests/tests/interop/_insecure_interop_test.py
rename to src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
index 936c895..4fb22b4 100644
--- a/src/python/grpcio_tests/tests/interop/_insecure_interop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_insecure_intraop_test.py
@@ -35,13 +35,13 @@
 import grpc
 from src.proto.grpc.testing import test_pb2
 
-from tests.interop import _interop_test_case
+from tests.interop import _intraop_test_case
 from tests.interop import methods
 from tests.interop import server
 
 
-class InsecureInteropTest(
-    _interop_test_case.InteropTestCase,
+class InsecureIntraopTest(
+    _intraop_test_case.IntraopTestCase,
     unittest.TestCase):
 
   def setUp(self):
diff --git a/src/python/grpcio_tests/tests/interop/_interop_test_case.py b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
similarity index 98%
rename from src/python/grpcio_tests/tests/interop/_interop_test_case.py
rename to src/python/grpcio_tests/tests/interop/_intraop_test_case.py
index ccea17a..fe1c173 100644
--- a/src/python/grpcio_tests/tests/interop/_interop_test_case.py
+++ b/src/python/grpcio_tests/tests/interop/_intraop_test_case.py
@@ -32,7 +32,7 @@
 from tests.interop import methods
 
 
-class InteropTestCase(object):
+class IntraopTestCase(object):
   """Unit test methods.
 
   This class must be mixed in with unittest.TestCase and a class that defines
diff --git a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
similarity index 95%
rename from src/python/grpcio_tests/tests/interop/_secure_interop_test.py
rename to src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
index eaca553..3665c69 100644
--- a/src/python/grpcio_tests/tests/interop/_secure_interop_test.py
+++ b/src/python/grpcio_tests/tests/interop/_secure_intraop_test.py
@@ -35,15 +35,15 @@
 import grpc
 from src.proto.grpc.testing import test_pb2
 
-from tests.interop import _interop_test_case
+from tests.interop import _intraop_test_case
 from tests.interop import methods
 from tests.interop import resources
 
 _SERVER_HOST_OVERRIDE = 'foo.test.google.fr'
 
 
-class SecureInteropTest(
-    _interop_test_case.InteropTestCase,
+class SecureIntraopTest(
+    _intraop_test_case.IntraopTestCase,
     unittest.TestCase):
 
   def setUp(self):
diff --git a/src/python/grpcio_tests/tests/tests.json b/src/python/grpcio_tests/tests/tests.json
index c31a5f9..0109ee2 100644
--- a/src/python/grpcio_tests/tests/tests.json
+++ b/src/python/grpcio_tests/tests/tests.json
@@ -1,7 +1,7 @@
 [
   "health_check._health_servicer_test.HealthServicerTest",
-  "interop._insecure_interop_test.InsecureInteropTest",
-  "interop._secure_interop_test.SecureInteropTest",
+  "interop._insecure_intraop_test.InsecureIntraopTest",
+  "interop._secure_intraop_test.SecureIntraopTest",
   "protoc_plugin._python_plugin_test.PythonPluginTest",
   "protoc_plugin._split_definitions_test.SameCommonTest",
   "protoc_plugin._split_definitions_test.SameSeparateTest",
diff --git a/src/ruby/lib/grpc/generic/bidi_call.rb b/src/ruby/lib/grpc/generic/bidi_call.rb
index 8943f3f..adc77bb 100644
--- a/src/ruby/lib/grpc/generic/bidi_call.rb
+++ b/src/ruby/lib/grpc/generic/bidi_call.rb
@@ -200,6 +200,7 @@
             if is_client
               batch_result = @call.run_batch(RECV_STATUS_ON_CLIENT => nil)
               @call.status = batch_result.status
+              @call.trailing_metadata = @call.status.metadata if @call.status
               batch_result.check_status
               GRPC.logger.debug("bidi-read-loop: done status #{@call.status}")
             end
diff --git a/src/ruby/lib/grpc/generic/service.rb b/src/ruby/lib/grpc/generic/service.rb
index 84f1ce7..f5a6b49 100644
--- a/src/ruby/lib/grpc/generic/service.rb
+++ b/src/ruby/lib/grpc/generic/service.rb
@@ -110,7 +110,7 @@
         rpc_descs[name] = RpcDesc.new(name, input, output,
                                       marshal_class_method,
                                       unmarshal_class_method)
-        define_method(GenericService.underscore(name.to_s).to_sym) do
+        define_method(GenericService.underscore(name.to_s).to_sym) do |_, _|
           fail GRPC::BadStatus.new_status_exception(
             GRPC::Core::StatusCodes::UNIMPLEMENTED)
         end
diff --git a/src/ruby/spec/generic/rpc_server_pool_spec.rb b/src/ruby/spec/generic/rpc_server_pool_spec.rb
index 48ccaee..69e8222 100644
--- a/src/ruby/spec/generic/rpc_server_pool_spec.rb
+++ b/src/ruby/spec/generic/rpc_server_pool_spec.rb
@@ -94,18 +94,6 @@
       expect(q.pop).to be(o)
       p.stop
     end
-
-    it 'it throws an error if all of the workers have tasks to do' do
-      p = Pool.new(5)
-      p.start
-      job = proc {}
-      5.times do
-        expect(p.ready_for_work?).to be(true)
-        p.schedule(&job)
-      end
-      expect { p.schedule(&job) }.to raise_error
-      expect { p.schedule(&job) }.to raise_error
-    end
   end
 
   describe '#stop' do
diff --git a/test/core/bad_client/bad_client.c b/test/core/bad_client/bad_client.c
index 07fcd99..d579dcc 100644
--- a/test/core/bad_client/bad_client.c
+++ b/test/core/bad_client/bad_client.c
@@ -148,7 +148,8 @@
 
   grpc_slice_buffer_init(&outgoing);
   grpc_slice_buffer_add(&outgoing, slice);
-  grpc_closure_init(&done_write_closure, done_write, &a);
+  grpc_closure_init(&done_write_closure, done_write, &a,
+                    grpc_schedule_on_exec_ctx);
 
   /* Write data */
   grpc_endpoint_write(&exec_ctx, sfd.client, &outgoing, &done_write_closure);
@@ -175,7 +176,8 @@
       grpc_slice_buffer_init(&args.incoming);
       gpr_event_init(&args.read_done);
       grpc_closure read_done_closure;
-      grpc_closure_init(&read_done_closure, read_done, &args);
+      grpc_closure_init(&read_done_closure, read_done, &args,
+                        grpc_schedule_on_exec_ctx);
       grpc_endpoint_read(&exec_ctx, sfd.client, &args.incoming,
                          &read_done_closure);
       grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
index b421720..169323e 100644
--- a/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
+++ b/test/core/client_channel/resolvers/dns_resolver_connectivity_test.c
@@ -108,16 +108,18 @@
   grpc_resolver *resolver = create_resolver(&exec_ctx, "dns:test");
   gpr_event ev1;
   gpr_event_init(&ev1);
-  grpc_resolver_next(&exec_ctx, resolver, &result,
-                     grpc_closure_create(on_done, &ev1));
+  grpc_resolver_next(
+      &exec_ctx, resolver, &result,
+      grpc_closure_create(on_done, &ev1, grpc_schedule_on_exec_ctx));
   grpc_exec_ctx_flush(&exec_ctx);
   GPR_ASSERT(wait_loop(5, &ev1));
   GPR_ASSERT(result == NULL);
 
   gpr_event ev2;
   gpr_event_init(&ev2);
-  grpc_resolver_next(&exec_ctx, resolver, &result,
-                     grpc_closure_create(on_done, &ev2));
+  grpc_resolver_next(
+      &exec_ctx, resolver, &result,
+      grpc_closure_create(on_done, &ev2, grpc_schedule_on_exec_ctx));
   grpc_exec_ctx_flush(&exec_ctx);
   GPR_ASSERT(wait_loop(30, &ev2));
   GPR_ASSERT(result != NULL);
diff --git a/test/core/client_channel/resolvers/sockaddr_resolver_test.c b/test/core/client_channel/resolvers/sockaddr_resolver_test.c
index a9fd85a..d6c8920 100644
--- a/test/core/client_channel/resolvers/sockaddr_resolver_test.c
+++ b/test/core/client_channel/resolvers/sockaddr_resolver_test.c
@@ -68,8 +68,8 @@
   on_resolution_arg on_res_arg;
   memset(&on_res_arg, 0, sizeof(on_res_arg));
   on_res_arg.expected_server_name = uri->path;
-  grpc_closure *on_resolution =
-      grpc_closure_create(on_resolution_cb, &on_res_arg);
+  grpc_closure *on_resolution = grpc_closure_create(
+      on_resolution_cb, &on_res_arg, grpc_schedule_on_exec_ctx);
 
   grpc_resolver_next(&exec_ctx, resolver, &on_res_arg.resolver_result,
                      on_resolution);
diff --git a/test/core/client_channel/set_initial_connect_string_test.c b/test/core/client_channel/set_initial_connect_string_test.c
index 11e5743..2082f65 100644
--- a/test/core/client_channel/set_initial_connect_string_test.c
+++ b/test/core/client_channel/set_initial_connect_string_test.c
@@ -94,7 +94,7 @@
                        grpc_tcp_server_acceptor *acceptor) {
   gpr_free(acceptor);
   test_tcp_server *server = arg;
-  grpc_closure_init(&on_read, handle_read, NULL);
+  grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&state.incoming_buffer);
   grpc_slice_buffer_init(&state.temp_incoming_buffer);
   state.tcp = tcp;
diff --git a/test/core/end2end/bad_server_response_test.c b/test/core/end2end/bad_server_response_test.c
index 3046855..f6a9cbe 100644
--- a/test/core/end2end/bad_server_response_test.c
+++ b/test/core/end2end/bad_server_response_test.c
@@ -147,8 +147,8 @@
                        grpc_tcp_server_acceptor *acceptor) {
   gpr_free(acceptor);
   test_tcp_server *server = arg;
-  grpc_closure_init(&on_read, handle_read, NULL);
-  grpc_closure_init(&on_write, done_write, NULL);
+  grpc_closure_init(&on_read, handle_read, NULL, grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&on_write, done_write, NULL, grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&state.temp_incoming_buffer);
   grpc_slice_buffer_init(&state.outgoing_buffer);
   state.tcp = tcp;
diff --git a/test/core/end2end/fake_resolver.c b/test/core/end2end/fake_resolver.c
index ed85030..45d4872 100644
--- a/test/core/end2end/fake_resolver.c
+++ b/test/core/end2end/fake_resolver.c
@@ -87,7 +87,7 @@
   gpr_mu_lock(&r->mu);
   if (r->next_completion != NULL) {
     *r->target_result = NULL;
-    grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = NULL;
   }
   gpr_mu_unlock(&r->mu);
@@ -100,7 +100,7 @@
     grpc_arg arg = grpc_lb_addresses_create_channel_arg(r->addresses);
     *r->target_result =
         grpc_channel_args_copy_and_add(r->channel_args, &arg, 1);
-    grpc_exec_ctx_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, r->next_completion, GRPC_ERROR_NONE);
     r->next_completion = NULL;
   }
 }
diff --git a/test/core/end2end/fixtures/http_proxy.c b/test/core/end2end/fixtures/http_proxy.c
index 80865fc..ca7d9e9 100644
--- a/test/core/end2end/fixtures/http_proxy.c
+++ b/test/core/end2end/fixtures/http_proxy.c
@@ -376,15 +376,20 @@
   gpr_ref_init(&conn->refcount, 1);
   conn->pollset_set = grpc_pollset_set_create();
   grpc_pollset_set_add_pollset(exec_ctx, conn->pollset_set, proxy->pollset);
-  grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn);
-  grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done,
-                    conn);
-  grpc_closure_init(&conn->on_write_response_done, on_write_response_done,
-                    conn);
-  grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn);
-  grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn);
-  grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn);
-  grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn);
+  grpc_closure_init(&conn->on_read_request_done, on_read_request_done, conn,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&conn->on_server_connect_done, on_server_connect_done, conn,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&conn->on_write_response_done, on_write_response_done, conn,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&conn->on_client_read_done, on_client_read_done, conn,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&conn->on_client_write_done, on_client_write_done, conn,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&conn->on_server_read_done, on_server_read_done, conn,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&conn->on_server_write_done, on_server_write_done, conn,
+                    grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&conn->client_read_buffer);
   grpc_slice_buffer_init(&conn->client_deferred_write_buffer);
   grpc_slice_buffer_init(&conn->client_write_buffer);
@@ -471,7 +476,8 @@
   gpr_free(proxy->proxy_name);
   grpc_channel_args_destroy(proxy->channel_args);
   grpc_closure destroyed;
-  grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, proxy->pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, proxy->pollset, &destroyed);
   gpr_free(proxy);
   grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/end2end/fuzzers/api_fuzzer.c b/test/core/end2end/fuzzers/api_fuzzer.c
index 746134c..8136f93 100644
--- a/test/core/end2end/fuzzers/api_fuzzer.c
+++ b/test/core/end2end/fuzzers/api_fuzzer.c
@@ -349,11 +349,11 @@
     addrs->addrs = gpr_malloc(sizeof(*addrs->addrs));
     addrs->addrs[0].len = 0;
     *r->addrs = addrs;
-    grpc_exec_ctx_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, r->on_done, GRPC_ERROR_NONE);
   } else {
-    grpc_exec_ctx_sched(
+    grpc_closure_sched(
         exec_ctx, r->on_done,
-        GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1), NULL);
+        GRPC_ERROR_CREATE_REFERENCING("Resolution failed", &error, 1));
   }
 
   gpr_free(r->addr);
@@ -398,7 +398,7 @@
   future_connect *fc = arg;
   if (error != GRPC_ERROR_NONE) {
     *fc->ep = NULL;
-    grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error), NULL);
+    grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_REF(error));
   } else if (g_server != NULL) {
     grpc_endpoint *client;
     grpc_endpoint *server;
@@ -410,7 +410,7 @@
     grpc_server_setup_transport(exec_ctx, g_server, transport, NULL, NULL);
     grpc_chttp2_transport_start_reading(exec_ctx, transport, NULL);
 
-    grpc_exec_ctx_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, fc->closure, GRPC_ERROR_NONE);
   } else {
     sched_connect(exec_ctx, fc->closure, fc->ep, fc->deadline);
   }
@@ -421,8 +421,8 @@
                           grpc_endpoint **ep, gpr_timespec deadline) {
   if (gpr_time_cmp(deadline, gpr_now(deadline.clock_type)) < 0) {
     *ep = NULL;
-    grpc_exec_ctx_sched(exec_ctx, closure,
-                        GRPC_ERROR_CREATE("Connect deadline exceeded"), NULL);
+    grpc_closure_sched(exec_ctx, closure,
+                       GRPC_ERROR_CREATE("Connect deadline exceeded"));
     return;
   }
 
diff --git a/test/core/end2end/tests/filter_causes_close.c b/test/core/end2end/tests/filter_causes_close.c
index 21905b9..7a7129c 100644
--- a/test/core/end2end/tests/filter_causes_close.c
+++ b/test/core/end2end/tests/filter_causes_close.c
@@ -217,9 +217,9 @@
                                        &message);
     grpc_call_next_op(exec_ctx, elem, op);
   }
-  grpc_exec_ctx_sched(
+  grpc_closure_sched(
       exec_ctx, calld->recv_im_ready,
-      GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1), NULL);
+      GRPC_ERROR_CREATE_REFERENCING("Forced call to close", &error, 1));
 }
 
 static void start_transport_stream_op(grpc_exec_ctx *exec_ctx,
@@ -228,7 +228,8 @@
   call_data *calld = elem->call_data;
   if (op->recv_initial_metadata != NULL) {
     calld->recv_im_ready = op->recv_initial_metadata_ready;
-    op->recv_initial_metadata_ready = grpc_closure_create(recv_im_ready, elem);
+    op->recv_initial_metadata_ready =
+        grpc_closure_create(recv_im_ready, elem, grpc_schedule_on_exec_ctx);
   }
   grpc_call_next_op(exec_ctx, elem, op);
 }
diff --git a/test/core/http/httpcli_test.c b/test/core/http/httpcli_test.c
index 3e312c1..4f00cad 100644
--- a/test/core/http/httpcli_test.c
+++ b/test/core/http/httpcli_test.c
@@ -90,9 +90,10 @@
   grpc_http_response response;
   memset(&response, 0, sizeof(response));
   grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
-  grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
-                   n_seconds_time(15),
-                   grpc_closure_create(on_finish, &response), &response);
+  grpc_httpcli_get(
+      &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
+      grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+      &response);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
@@ -130,9 +131,11 @@
   grpc_http_response response;
   memset(&response, 0, sizeof(response));
   grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
-  grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
-                    "hello", 5, n_seconds_time(15),
-                    grpc_closure_create(on_finish, &response), &response);
+  grpc_httpcli_post(
+      &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
+      n_seconds_time(15),
+      grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+      &response);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
@@ -207,7 +210,8 @@
   test_post(port);
 
   grpc_httpcli_context_destroy(&g_context);
-  grpc_closure_init(&destroyed, destroy_pops, &g_pops);
+  grpc_closure_init(&destroyed, destroy_pops, &g_pops,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
                         &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/http/httpscli_test.c b/test/core/http/httpscli_test.c
index d060351..53b26b6 100644
--- a/test/core/http/httpscli_test.c
+++ b/test/core/http/httpscli_test.c
@@ -91,9 +91,10 @@
   grpc_http_response response;
   memset(&response, 0, sizeof(response));
   grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_get");
-  grpc_httpcli_get(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
-                   n_seconds_time(15),
-                   grpc_closure_create(on_finish, &response), &response);
+  grpc_httpcli_get(
+      &exec_ctx, &g_context, &g_pops, resource_quota, &req, n_seconds_time(15),
+      grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+      &response);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
@@ -132,9 +133,11 @@
   grpc_http_response response;
   memset(&response, 0, sizeof(response));
   grpc_resource_quota *resource_quota = grpc_resource_quota_create("test_post");
-  grpc_httpcli_post(&exec_ctx, &g_context, &g_pops, resource_quota, &req,
-                    "hello", 5, n_seconds_time(15),
-                    grpc_closure_create(on_finish, &response), &response);
+  grpc_httpcli_post(
+      &exec_ctx, &g_context, &g_pops, resource_quota, &req, "hello", 5,
+      n_seconds_time(15),
+      grpc_closure_create(on_finish, &response, grpc_schedule_on_exec_ctx),
+      &response);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
   gpr_mu_lock(g_mu);
   while (!g_done) {
@@ -210,7 +213,8 @@
   test_post(port);
 
   grpc_httpcli_context_destroy(&g_context);
-  grpc_closure_init(&destroyed, destroy_pops, &g_pops);
+  grpc_closure_init(&destroyed, destroy_pops, &g_pops,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, grpc_polling_entity_pollset(&g_pops),
                         &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/internal_api_canaries/iomgr.c b/test/core/internal_api_canaries/iomgr.c
index de03c47..773ef60 100644
--- a/test/core/internal_api_canaries/iomgr.c
+++ b/test/core/internal_api_canaries/iomgr.c
@@ -60,9 +60,9 @@
   closure_list.head = NULL;
   closure_list.tail = NULL;
 
-  grpc_closure_init(&closure, NULL, NULL);
+  grpc_closure_init(&closure, NULL, NULL, grpc_schedule_on_exec_ctx);
 
-  grpc_closure_create(NULL, NULL);
+  grpc_closure_create(NULL, NULL, grpc_schedule_on_exec_ctx);
 
   grpc_closure_list_move(NULL, NULL);
   grpc_closure_list_append(NULL, NULL, GRPC_ERROR_CREATE("Foo"));
@@ -72,8 +72,8 @@
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_exec_ctx_flush(&exec_ctx);
   grpc_exec_ctx_finish(&exec_ctx);
-  grpc_exec_ctx_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"), NULL);
-  grpc_exec_ctx_enqueue_list(&exec_ctx, &closure_list, NULL);
+  grpc_closure_sched(&exec_ctx, &closure, GRPC_ERROR_CREATE("Foo"));
+  grpc_closure_list_sched(&exec_ctx, &closure_list);
 
   /* endpoint.h */
   grpc_endpoint endpoint;
@@ -99,7 +99,6 @@
 
   /* executor.h */
   grpc_executor_init();
-  grpc_executor_push(&closure, GRPC_ERROR_CREATE("Phi"));
   grpc_executor_shutdown();
 
   /* pollset.h */
diff --git a/test/core/iomgr/combiner_test.c b/test/core/iomgr/combiner_test.c
index f7d5809..9b6d6ff 100644
--- a/test/core/iomgr/combiner_test.c
+++ b/test/core/iomgr/combiner_test.c
@@ -59,9 +59,10 @@
   grpc_combiner *lock = grpc_combiner_create(NULL);
   bool done = false;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_combiner_execute(&exec_ctx, lock,
-                        grpc_closure_create(set_bool_to_true, &done),
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(&exec_ctx,
+                     grpc_closure_create(set_bool_to_true, &done,
+                                         grpc_combiner_scheduler(lock, false)),
+                     GRPC_ERROR_NONE);
   grpc_exec_ctx_flush(&exec_ctx);
   GPR_ASSERT(done);
   grpc_combiner_destroy(&exec_ctx, lock);
@@ -94,9 +95,10 @@
       ex_args *c = gpr_malloc(sizeof(*c));
       c->ctr = &args->ctr;
       c->value = n++;
-      grpc_combiner_execute(&exec_ctx, args->lock,
-                            grpc_closure_create(check_one, c), GRPC_ERROR_NONE,
-                            false);
+      grpc_closure_sched(
+          &exec_ctx, grpc_closure_create(check_one, c, grpc_combiner_scheduler(
+                                                           args->lock, false)),
+          GRPC_ERROR_NONE);
       grpc_exec_ctx_flush(&exec_ctx);
     }
     // sleep for a little bit, to test a combiner draining and another thread
@@ -134,9 +136,10 @@
 }
 
 static void add_finally(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
-  grpc_combiner_execute_finally(exec_ctx, arg,
-                                grpc_closure_create(in_finally, NULL),
-                                GRPC_ERROR_NONE, false);
+  grpc_closure_sched(exec_ctx, grpc_closure_create(
+                                   in_finally, NULL,
+                                   grpc_combiner_finally_scheduler(arg, false)),
+                     GRPC_ERROR_NONE);
 }
 
 static void test_execute_finally(void) {
@@ -144,8 +147,10 @@
 
   grpc_combiner *lock = grpc_combiner_create(NULL);
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_combiner_execute(&exec_ctx, lock, grpc_closure_create(add_finally, lock),
-                        GRPC_ERROR_NONE, false);
+  grpc_closure_sched(&exec_ctx,
+                     grpc_closure_create(add_finally, lock,
+                                         grpc_combiner_scheduler(lock, false)),
+                     GRPC_ERROR_NONE);
   grpc_exec_ctx_flush(&exec_ctx);
   GPR_ASSERT(got_in_finally);
   grpc_combiner_destroy(&exec_ctx, lock);
diff --git a/test/core/iomgr/endpoint_pair_test.c b/test/core/iomgr/endpoint_pair_test.c
index 2a257a7..f02171f 100644
--- a/test/core/iomgr/endpoint_pair_test.c
+++ b/test/core/iomgr/endpoint_pair_test.c
@@ -81,7 +81,8 @@
   g_pollset = gpr_malloc(grpc_pollset_size());
   grpc_pollset_init(g_pollset, &g_mu);
   grpc_endpoint_tests(configs[0], g_pollset, g_mu);
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
diff --git a/test/core/iomgr/endpoint_tests.c b/test/core/iomgr/endpoint_tests.c
index 8186ea7..87a9d79 100644
--- a/test/core/iomgr/endpoint_tests.c
+++ b/test/core/iomgr/endpoint_tests.c
@@ -211,9 +211,10 @@
   state.write_done = 0;
   state.current_read_data = 0;
   state.current_write_data = 0;
-  grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state);
+  grpc_closure_init(&state.done_read, read_and_write_test_read_handler, &state,
+                    grpc_schedule_on_exec_ctx);
   grpc_closure_init(&state.done_write, read_and_write_test_write_handler,
-                    &state);
+                    &state, grpc_schedule_on_exec_ctx);
   grpc_slice_buffer_init(&state.outgoing);
   grpc_slice_buffer_init(&state.incoming);
 
@@ -290,16 +291,19 @@
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_endpoint_add_to_pollset(&exec_ctx, f.client_ep, g_pollset);
   grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
-                     grpc_closure_create(inc_on_failure, &fail_count));
+                     grpc_closure_create(inc_on_failure, &fail_count,
+                                         grpc_schedule_on_exec_ctx));
   wait_for_fail_count(&exec_ctx, &fail_count, 0);
   grpc_endpoint_shutdown(&exec_ctx, f.client_ep);
   wait_for_fail_count(&exec_ctx, &fail_count, 1);
   grpc_endpoint_read(&exec_ctx, f.client_ep, &slice_buffer,
-                     grpc_closure_create(inc_on_failure, &fail_count));
+                     grpc_closure_create(inc_on_failure, &fail_count,
+                                         grpc_schedule_on_exec_ctx));
   wait_for_fail_count(&exec_ctx, &fail_count, 2);
   grpc_slice_buffer_add(&slice_buffer, grpc_slice_from_copied_string("a"));
   grpc_endpoint_write(&exec_ctx, f.client_ep, &slice_buffer,
-                      grpc_closure_create(inc_on_failure, &fail_count));
+                      grpc_closure_create(inc_on_failure, &fail_count,
+                                          grpc_schedule_on_exec_ctx));
   wait_for_fail_count(&exec_ctx, &fail_count, 3);
   grpc_endpoint_shutdown(&exec_ctx, f.client_ep);
   wait_for_fail_count(&exec_ctx, &fail_count, 3);
diff --git a/test/core/iomgr/ev_epoll_linux_test.c b/test/core/iomgr/ev_epoll_linux_test.c
index 564b05d..5bce980 100644
--- a/test/core/iomgr/ev_epoll_linux_test.c
+++ b/test/core/iomgr/ev_epoll_linux_test.c
@@ -102,7 +102,8 @@
   int i;
 
   for (i = 0; i < num_pollsets; i++) {
-    grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset);
+    grpc_closure_init(&destroyed, destroy_pollset, pollsets[i].pollset,
+                      grpc_schedule_on_exec_ctx);
     grpc_pollset_shutdown(exec_ctx, pollsets[i].pollset, &destroyed);
 
     grpc_exec_ctx_flush(exec_ctx);
diff --git a/test/core/iomgr/fd_posix_test.c b/test/core/iomgr/fd_posix_test.c
index 6166699..4dd4765 100644
--- a/test/core/iomgr/fd_posix_test.c
+++ b/test/core/iomgr/fd_posix_test.c
@@ -219,8 +219,8 @@
   se->sv = sv;
   se->em_fd = grpc_fd_create(fd, "listener");
   grpc_pollset_add_fd(exec_ctx, g_pollset, se->em_fd);
-  se->session_read_closure.cb = session_read_cb;
-  se->session_read_closure.cb_arg = se;
+  grpc_closure_init(&se->session_read_closure, session_read_cb, se,
+                    grpc_schedule_on_exec_ctx);
   grpc_fd_notify_on_read(exec_ctx, se->em_fd, &se->session_read_closure);
 
   grpc_fd_notify_on_read(exec_ctx, listen_em_fd, &sv->listen_closure);
@@ -249,8 +249,8 @@
   sv->em_fd = grpc_fd_create(fd, "server");
   grpc_pollset_add_fd(exec_ctx, g_pollset, sv->em_fd);
   /* Register to be interested in reading from listen_fd. */
-  sv->listen_closure.cb = listen_cb;
-  sv->listen_closure.cb_arg = sv;
+  grpc_closure_init(&sv->listen_closure, listen_cb, sv,
+                    grpc_schedule_on_exec_ctx);
   grpc_fd_notify_on_read(exec_ctx, sv->em_fd, &sv->listen_closure);
 
   return port;
@@ -333,8 +333,8 @@
   if (errno == EAGAIN) {
     gpr_mu_lock(g_mu);
     if (cl->client_write_cnt < CLIENT_TOTAL_WRITE_CNT) {
-      cl->write_closure.cb = client_session_write;
-      cl->write_closure.cb_arg = cl;
+      grpc_closure_init(&cl->write_closure, client_session_write, cl,
+                        grpc_schedule_on_exec_ctx);
       grpc_fd_notify_on_write(exec_ctx, cl->em_fd, &cl->write_closure);
       cl->client_write_cnt++;
     } else {
@@ -459,10 +459,10 @@
   grpc_closure second_closure;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
 
-  first_closure.cb = first_read_callback;
-  first_closure.cb_arg = &a;
-  second_closure.cb = second_read_callback;
-  second_closure.cb_arg = &b;
+  grpc_closure_init(&first_closure, first_read_callback, &a,
+                    grpc_schedule_on_exec_ctx);
+  grpc_closure_init(&second_closure, second_read_callback, &b,
+                    grpc_schedule_on_exec_ctx);
 
   init_change_data(&a);
   init_change_data(&b);
@@ -546,7 +546,8 @@
   grpc_pollset_init(g_pollset, &g_mu);
   test_grpc_fd();
   test_grpc_fd_change();
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   gpr_free(g_pollset);
diff --git a/test/core/iomgr/resolve_address_test.c b/test/core/iomgr/resolve_address_test.c
index e4136a7..d844e6e 100644
--- a/test/core/iomgr/resolve_address_test.c
+++ b/test/core/iomgr/resolve_address_test.c
@@ -71,7 +71,8 @@
   grpc_pollset_set_del_pollset(exec_ctx, args->pollset_set, args->pollset);
   grpc_pollset_set_destroy(args->pollset_set);
   grpc_closure do_nothing_cb;
-  grpc_closure_init(&do_nothing_cb, do_nothing, NULL);
+  grpc_closure_init(&do_nothing_cb, do_nothing, NULL,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(exec_ctx, args->pollset, &do_nothing_cb);
   // exec_ctx needs to be flushed before calling grpc_pollset_destroy()
   grpc_exec_ctx_flush(exec_ctx);
@@ -136,8 +137,10 @@
   args_struct args;
   args_init(&exec_ctx, &args);
   poll_pollset_until_request_done(&args);
-  grpc_resolve_address(&exec_ctx, "localhost:1", NULL, args.pollset_set,
-                       grpc_closure_create(must_succeed, &args), &args.addrs);
+  grpc_resolve_address(
+      &exec_ctx, "localhost:1", NULL, args.pollset_set,
+      grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+      &args.addrs);
   args_finish(&exec_ctx, &args);
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -147,8 +150,10 @@
   args_struct args;
   args_init(&exec_ctx, &args);
   poll_pollset_until_request_done(&args);
-  grpc_resolve_address(&exec_ctx, "localhost", "1", args.pollset_set,
-                       grpc_closure_create(must_succeed, &args), &args.addrs);
+  grpc_resolve_address(
+      &exec_ctx, "localhost", "1", args.pollset_set,
+      grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+      &args.addrs);
   args_finish(&exec_ctx, &args);
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -158,8 +163,10 @@
   args_struct args;
   args_init(&exec_ctx, &args);
   poll_pollset_until_request_done(&args);
-  grpc_resolve_address(&exec_ctx, "localhost", NULL, args.pollset_set,
-                       grpc_closure_create(must_fail, &args), &args.addrs);
+  grpc_resolve_address(
+      &exec_ctx, "localhost", NULL, args.pollset_set,
+      grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
+      &args.addrs);
   args_finish(&exec_ctx, &args);
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -169,8 +176,10 @@
   args_struct args;
   args_init(&exec_ctx, &args);
   poll_pollset_until_request_done(&args);
-  grpc_resolve_address(&exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set,
-                       grpc_closure_create(must_succeed, &args), &args.addrs);
+  grpc_resolve_address(
+      &exec_ctx, "[2001:db8::1]:1", NULL, args.pollset_set,
+      grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+      &args.addrs);
   args_finish(&exec_ctx, &args);
   grpc_exec_ctx_finish(&exec_ctx);
 }
@@ -185,8 +194,10 @@
     args_struct args;
     args_init(&exec_ctx, &args);
     poll_pollset_until_request_done(&args);
-    grpc_resolve_address(&exec_ctx, kCases[i], "80", args.pollset_set,
-                         grpc_closure_create(must_succeed, &args), &args.addrs);
+    grpc_resolve_address(
+        &exec_ctx, kCases[i], "80", args.pollset_set,
+        grpc_closure_create(must_succeed, &args, grpc_schedule_on_exec_ctx),
+        &args.addrs);
     args_finish(&exec_ctx, &args);
     grpc_exec_ctx_finish(&exec_ctx);
   }
@@ -202,8 +213,10 @@
     args_struct args;
     args_init(&exec_ctx, &args);
     poll_pollset_until_request_done(&args);
-    grpc_resolve_address(&exec_ctx, kCases[i], NULL, args.pollset_set,
-                         grpc_closure_create(must_fail, &args), &args.addrs);
+    grpc_resolve_address(
+        &exec_ctx, kCases[i], NULL, args.pollset_set,
+        grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
+        &args.addrs);
     args_finish(&exec_ctx, &args);
     grpc_exec_ctx_finish(&exec_ctx);
   }
@@ -219,8 +232,10 @@
     args_struct args;
     args_init(&exec_ctx, &args);
     poll_pollset_until_request_done(&args);
-    grpc_resolve_address(&exec_ctx, kCases[i], "1", args.pollset_set,
-                         grpc_closure_create(must_fail, &args), &args.addrs);
+    grpc_resolve_address(
+        &exec_ctx, kCases[i], "1", args.pollset_set,
+        grpc_closure_create(must_fail, &args, grpc_schedule_on_exec_ctx),
+        &args.addrs);
     args_finish(&exec_ctx, &args);
     grpc_exec_ctx_finish(&exec_ctx);
   }
diff --git a/test/core/iomgr/resource_quota_test.c b/test/core/iomgr/resource_quota_test.c
index a82d44f..1817763 100644
--- a/test/core/iomgr/resource_quota_test.c
+++ b/test/core/iomgr/resource_quota_test.c
@@ -45,7 +45,9 @@
 static void set_bool_cb(grpc_exec_ctx *exec_ctx, void *a, grpc_error *error) {
   *(bool *)a = true;
 }
-grpc_closure *set_bool(bool *p) { return grpc_closure_create(set_bool_cb, p); }
+grpc_closure *set_bool(bool *p) {
+  return grpc_closure_create(set_bool_cb, p, grpc_schedule_on_exec_ctx);
+}
 
 typedef struct {
   size_t size;
@@ -67,7 +69,7 @@
   a->size = size;
   a->resource_user = resource_user;
   a->then = then;
-  return grpc_closure_create(reclaimer_cb, a);
+  return grpc_closure_create(reclaimer_cb, a, grpc_schedule_on_exec_ctx);
 }
 
 static void unused_reclaimer_cb(grpc_exec_ctx *exec_ctx, void *arg,
@@ -76,7 +78,8 @@
   grpc_closure_run(exec_ctx, arg, GRPC_ERROR_NONE);
 }
 grpc_closure *make_unused_reclaimer(grpc_closure *then) {
-  return grpc_closure_create(unused_reclaimer_cb, then);
+  return grpc_closure_create(unused_reclaimer_cb, then,
+                             grpc_schedule_on_exec_ctx);
 }
 
 static void destroy_user(grpc_resource_user *usr) {
diff --git a/test/core/iomgr/tcp_client_posix_test.c b/test/core/iomgr/tcp_client_posix_test.c
index 5fab826..0ea7a00 100644
--- a/test/core/iomgr/tcp_client_posix_test.c
+++ b/test/core/iomgr/tcp_client_posix_test.c
@@ -113,7 +113,7 @@
   /* connect to it */
   GPR_ASSERT(getsockname(svr_fd, (struct sockaddr *)addr,
                          (socklen_t *)&resolved_addr.len) == 0);
-  grpc_closure_init(&done, must_succeed, NULL);
+  grpc_closure_init(&done, must_succeed, NULL, grpc_schedule_on_exec_ctx);
   grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
                           &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
 
@@ -163,7 +163,7 @@
   gpr_mu_unlock(g_mu);
 
   /* connect to a broken address */
-  grpc_closure_init(&done, must_fail, NULL);
+  grpc_closure_init(&done, must_fail, NULL, grpc_schedule_on_exec_ctx);
   grpc_tcp_client_connect(&exec_ctx, &done, &g_connecting, g_pollset_set, NULL,
                           &resolved_addr, gpr_inf_future(GPR_CLOCK_REALTIME));
 
@@ -207,7 +207,8 @@
   gpr_log(GPR_ERROR, "End of first test");
   test_fails();
   grpc_pollset_set_destroy(g_pollset_set);
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
diff --git a/test/core/iomgr/tcp_posix_test.c b/test/core/iomgr/tcp_posix_test.c
index 5eafa57..c646e61 100644
--- a/test/core/iomgr/tcp_posix_test.c
+++ b/test/core/iomgr/tcp_posix_test.c
@@ -194,7 +194,7 @@
   state.read_bytes = 0;
   state.target_read_bytes = written_bytes;
   grpc_slice_buffer_init(&state.incoming);
-  grpc_closure_init(&state.read_cb, read_cb, &state);
+  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
 
   grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
 
@@ -245,7 +245,7 @@
   state.read_bytes = 0;
   state.target_read_bytes = (size_t)written_bytes;
   grpc_slice_buffer_init(&state.incoming);
-  grpc_closure_init(&state.read_cb, read_cb, &state);
+  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
 
   grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
 
@@ -384,7 +384,8 @@
 
   grpc_slice_buffer_init(&outgoing);
   grpc_slice_buffer_addn(&outgoing, slices, num_blocks);
-  grpc_closure_init(&write_done_closure, write_done, &state);
+  grpc_closure_init(&write_done_closure, write_done, &state,
+                    grpc_schedule_on_exec_ctx);
 
   grpc_endpoint_write(&exec_ctx, ep, &outgoing, &write_done_closure);
   drain_socket_blocking(sv[0], num_bytes, num_bytes);
@@ -429,7 +430,8 @@
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   grpc_closure fd_released_cb;
   int fd_released_done = 0;
-  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done);
+  grpc_closure_init(&fd_released_cb, &on_fd_released, &fd_released_done,
+                    grpc_schedule_on_exec_ctx);
 
   gpr_log(GPR_INFO,
           "Release fd read_test of size %" PRIuPTR ", slice size %" PRIuPTR,
@@ -452,7 +454,7 @@
   state.read_bytes = 0;
   state.target_read_bytes = written_bytes;
   grpc_slice_buffer_init(&state.incoming);
-  grpc_closure_init(&state.read_cb, read_cb, &state);
+  grpc_closure_init(&state.read_cb, read_cb, &state, grpc_schedule_on_exec_ctx);
 
   grpc_endpoint_read(&exec_ctx, ep, &state.incoming, &state.read_cb);
 
@@ -561,7 +563,8 @@
   grpc_pollset_init(g_pollset, &g_mu);
   grpc_endpoint_tests(configs[0], g_pollset, g_mu);
   run_tests();
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
diff --git a/test/core/iomgr/tcp_server_posix_test.c b/test/core/iomgr/tcp_server_posix_test.c
index 9a7810e..020f005 100644
--- a/test/core/iomgr/tcp_server_posix_test.c
+++ b/test/core/iomgr/tcp_server_posix_test.c
@@ -104,7 +104,7 @@
 static void server_weak_ref_init(server_weak_ref *weak_ref) {
   weak_ref->server = NULL;
   grpc_closure_init(&weak_ref->server_shutdown, server_weak_ref_shutdown,
-                    weak_ref);
+                    weak_ref, grpc_schedule_on_exec_ctx);
 }
 
 /* Make weak_ref->server_shutdown a shutdown_starting cb on server.
@@ -366,7 +366,8 @@
   test_connect(1);
   test_connect(10);
 
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
diff --git a/test/core/iomgr/udp_server_test.c b/test/core/iomgr/udp_server_test.c
index 9bea229..0a247ca 100644
--- a/test/core/iomgr/udp_server_test.c
+++ b/test/core/iomgr/udp_server_test.c
@@ -234,7 +234,8 @@
   test_receive(1);
   test_receive(10);
 
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   gpr_free(g_pollset);
diff --git a/test/core/security/credentials_test.c b/test/core/security/credentials_test.c
index d4c7550..d624a38 100644
--- a/test/core/security/credentials_test.c
+++ b/test/core/security/credentials_test.c
@@ -565,7 +565,7 @@
     grpc_httpcli_response *response) {
   validate_compute_engine_http_request(request);
   *response = http_response(200, valid_oauth2_json_response);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -575,7 +575,7 @@
     grpc_httpcli_response *response) {
   validate_compute_engine_http_request(request);
   *response = http_response(403, "Not Authorized.");
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -668,7 +668,7 @@
     grpc_closure *on_done, grpc_httpcli_response *response) {
   validate_refresh_token_http_request(request, body, body_size);
   *response = http_response(200, valid_oauth2_json_response);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -678,7 +678,7 @@
     grpc_closure *on_done, grpc_httpcli_response *response) {
   validate_refresh_token_http_request(request, body, body_size);
   *response = http_response(403, "Not Authorized.");
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -917,7 +917,7 @@
   response->hdrs = headers;
   GPR_ASSERT(strcmp(request->http.path, "/") == 0);
   GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -975,7 +975,7 @@
   GPR_ASSERT(strcmp(request->http.path, "/") == 0);
   GPR_ASSERT(strcmp(request->host, "metadata.google.internal") == 0);
   *response = http_response(200, "");
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
diff --git a/test/core/security/jwt_verifier_test.c b/test/core/security/jwt_verifier_test.c
index 9a21814..a4d65dc 100644
--- a/test/core/security/jwt_verifier_test.c
+++ b/test/core/security/jwt_verifier_test.c
@@ -346,7 +346,7 @@
                     "/robot/v1/metadata/x509/"
                     "777-abaslkan11hlb6nmim3bpspl31ud@developer."
                     "gserviceaccount.com") == 0);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -390,7 +390,7 @@
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
   GPR_ASSERT(strcmp(request->host, "keys.bar.com") == 0);
   GPR_ASSERT(strcmp(request->http.path, "/jwk/foo@bar.com") == 0);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -424,7 +424,7 @@
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
   GPR_ASSERT(strcmp(request->host, "www.googleapis.com") == 0);
   GPR_ASSERT(strcmp(request->http.path, "/oauth2/v3/certs") == 0);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -439,7 +439,7 @@
   GPR_ASSERT(strcmp(request->http.path, GRPC_OPENID_CONFIG_URL_SUFFIX) == 0);
   grpc_httpcli_set_override(httpcli_get_jwk_set,
                             httpcli_post_should_not_be_called);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
@@ -479,7 +479,7 @@
                                 grpc_httpcli_response *response) {
   *response = http_response(200, gpr_strdup("{\"bad\": \"stuff\"}"));
   GPR_ASSERT(request->handshaker == &grpc_httpcli_ssl);
-  grpc_exec_ctx_sched(exec_ctx, on_done, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_NONE);
   return 1;
 }
 
diff --git a/test/core/security/oauth2_utils.c b/test/core/security/oauth2_utils.c
index 44a2092..ff77af9 100644
--- a/test/core/security/oauth2_utils.c
+++ b/test/core/security/oauth2_utils.c
@@ -92,7 +92,8 @@
   request.pops = grpc_polling_entity_create_from_pollset(pollset);
   request.is_done = 0;
 
-  grpc_closure_init(&do_nothing_closure, do_nothing, NULL);
+  grpc_closure_init(&do_nothing_closure, do_nothing, NULL,
+                    grpc_schedule_on_exec_ctx);
 
   grpc_call_credentials_get_request_metadata(
       &exec_ctx, creds, &request.pops, null_ctx, on_oauth2_response, &request);
diff --git a/test/core/security/secure_endpoint_test.c b/test/core/security/secure_endpoint_test.c
index b5d9500..cbf8a17 100644
--- a/test/core/security/secure_endpoint_test.c
+++ b/test/core/security/secure_endpoint_test.c
@@ -158,7 +158,7 @@
   gpr_log(GPR_INFO, "Start test left over");
 
   grpc_slice_buffer_init(&incoming);
-  grpc_closure_init(&done_closure, inc_call_ctr, &n);
+  grpc_closure_init(&done_closure, inc_call_ctr, &n, grpc_schedule_on_exec_ctx);
   grpc_endpoint_read(&exec_ctx, f.client_ep, &incoming, &done_closure);
   grpc_exec_ctx_finish(&exec_ctx);
   GPR_ASSERT(n == 1);
@@ -191,7 +191,8 @@
   grpc_pollset_init(g_pollset, &g_mu);
   grpc_endpoint_tests(configs[0], g_pollset, g_mu);
   test_leftover(configs[1], 1);
-  grpc_closure_init(&destroyed, destroy_pollset, g_pollset);
+  grpc_closure_init(&destroyed, destroy_pollset, g_pollset,
+                    grpc_schedule_on_exec_ctx);
   grpc_pollset_shutdown(&exec_ctx, g_pollset, &destroyed);
   grpc_exec_ctx_finish(&exec_ctx);
   grpc_shutdown();
diff --git a/test/core/surface/concurrent_connectivity_test.c b/test/core/surface/concurrent_connectivity_test.c
index 93a4794..8ebe8d0 100644
--- a/test/core/surface/concurrent_connectivity_test.c
+++ b/test/core/surface/concurrent_connectivity_test.c
@@ -229,9 +229,9 @@
   gpr_atm_rel_store(&args.stop, 1);
   gpr_thd_join(server);
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
-  grpc_pollset_shutdown(
-      &exec_ctx, args.pollset,
-      grpc_closure_create(done_pollset_shutdown, args.pollset));
+  grpc_pollset_shutdown(&exec_ctx, args.pollset,
+                        grpc_closure_create(done_pollset_shutdown, args.pollset,
+                                            grpc_schedule_on_exec_ctx));
   grpc_exec_ctx_finish(&exec_ctx);
 
   grpc_shutdown();
diff --git a/test/core/surface/lame_client_test.c b/test/core/surface/lame_client_test.c
index 6afcefc..b6db6a6 100644
--- a/test/core/surface/lame_client_test.c
+++ b/test/core/surface/lame_client_test.c
@@ -62,7 +62,8 @@
   grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
 
-  grpc_closure_init(&transport_op_cb, verify_connectivity, &state);
+  grpc_closure_init(&transport_op_cb, verify_connectivity, &state,
+                    grpc_schedule_on_exec_ctx);
 
   op = grpc_make_transport_op(NULL);
   op->on_connectivity_state_change = &transport_op_cb;
@@ -71,7 +72,8 @@
   elem->filter->start_transport_op(&exec_ctx, elem, op);
   grpc_exec_ctx_finish(&exec_ctx);
 
-  grpc_closure_init(&transport_op_cb, do_nothing, NULL);
+  grpc_closure_init(&transport_op_cb, do_nothing, NULL,
+                    grpc_schedule_on_exec_ctx);
   op = grpc_make_transport_op(&transport_op_cb);
   elem->filter->start_transport_op(&exec_ctx, elem, op);
   grpc_exec_ctx_finish(&exec_ctx);
diff --git a/test/core/transport/connectivity_state_test.c b/test/core/transport/connectivity_state_test.c
index 1050059..3520ef0 100644
--- a/test/core/transport/connectivity_state_test.c
+++ b/test/core/transport/connectivity_state_test.c
@@ -86,7 +86,8 @@
 
 static void test_subscribe_then_unsubscribe(void) {
   grpc_connectivity_state_tracker tracker;
-  grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG);
+  grpc_closure *closure =
+      grpc_closure_create(must_fail, THE_ARG, grpc_schedule_on_exec_ctx);
   grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   gpr_log(GPR_DEBUG, "test_subscribe_then_unsubscribe");
@@ -109,7 +110,8 @@
 
 static void test_subscribe_then_destroy(void) {
   grpc_connectivity_state_tracker tracker;
-  grpc_closure *closure = grpc_closure_create(must_succeed, THE_ARG);
+  grpc_closure *closure =
+      grpc_closure_create(must_succeed, THE_ARG, grpc_schedule_on_exec_ctx);
   grpc_connectivity_state state = GRPC_CHANNEL_IDLE;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   gpr_log(GPR_DEBUG, "test_subscribe_then_destroy");
@@ -128,7 +130,8 @@
 
 static void test_subscribe_with_failure_then_destroy(void) {
   grpc_connectivity_state_tracker tracker;
-  grpc_closure *closure = grpc_closure_create(must_fail, THE_ARG);
+  grpc_closure *closure =
+      grpc_closure_create(must_fail, THE_ARG, grpc_schedule_on_exec_ctx);
   grpc_connectivity_state state = GRPC_CHANNEL_SHUTDOWN;
   grpc_exec_ctx exec_ctx = GRPC_EXEC_CTX_INIT;
   gpr_log(GPR_DEBUG, "test_subscribe_with_failure_then_destroy");
diff --git a/test/core/util/mock_endpoint.c b/test/core/util/mock_endpoint.c
index bf6d852..04793bc 100644
--- a/test/core/util/mock_endpoint.c
+++ b/test/core/util/mock_endpoint.c
@@ -55,7 +55,7 @@
   gpr_mu_lock(&m->mu);
   if (m->read_buffer.count > 0) {
     grpc_slice_buffer_swap(&m->read_buffer, slices);
-    grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
   } else {
     m->on_read = cb;
     m->on_read_out = slices;
@@ -69,7 +69,7 @@
   for (size_t i = 0; i < slices->count; i++) {
     m->on_write(slices->slices[i]);
   }
-  grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+  grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
 }
 
 static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -82,8 +82,8 @@
   grpc_mock_endpoint *m = (grpc_mock_endpoint *)ep;
   gpr_mu_lock(&m->mu);
   if (m->on_read) {
-    grpc_exec_ctx_sched(exec_ctx, m->on_read,
-                        GRPC_ERROR_CREATE("Endpoint Shutdown"), NULL);
+    grpc_closure_sched(exec_ctx, m->on_read,
+                       GRPC_ERROR_CREATE("Endpoint Shutdown"));
     m->on_read = NULL;
   }
   gpr_mu_unlock(&m->mu);
@@ -144,7 +144,7 @@
   gpr_mu_lock(&m->mu);
   if (m->on_read != NULL) {
     grpc_slice_buffer_add(m->on_read_out, slice);
-    grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE);
     m->on_read = NULL;
   } else {
     grpc_slice_buffer_add(&m->read_buffer, slice);
diff --git a/test/core/util/passthru_endpoint.c b/test/core/util/passthru_endpoint.c
index b3405f02..15ba092 100644
--- a/test/core/util/passthru_endpoint.c
+++ b/test/core/util/passthru_endpoint.c
@@ -63,11 +63,10 @@
   half *m = (half *)ep;
   gpr_mu_lock(&m->parent->mu);
   if (m->parent->shutdown) {
-    grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_CREATE("Already shutdown"));
   } else if (m->read_buffer.count > 0) {
     grpc_slice_buffer_swap(&m->read_buffer, slices);
-    grpc_exec_ctx_sched(exec_ctx, cb, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, cb, GRPC_ERROR_NONE);
   } else {
     m->on_read = cb;
     m->on_read_out = slices;
@@ -91,7 +90,7 @@
     for (size_t i = 0; i < slices->count; i++) {
       grpc_slice_buffer_add(m->on_read_out, grpc_slice_ref(slices->slices[i]));
     }
-    grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE, NULL);
+    grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_NONE);
     m->on_read = NULL;
   } else {
     for (size_t i = 0; i < slices->count; i++) {
@@ -99,7 +98,7 @@
     }
   }
   gpr_mu_unlock(&m->parent->mu);
-  grpc_exec_ctx_sched(exec_ctx, cb, error, NULL);
+  grpc_closure_sched(exec_ctx, cb, error);
 }
 
 static void me_add_to_pollset(grpc_exec_ctx *exec_ctx, grpc_endpoint *ep,
@@ -113,14 +112,12 @@
   gpr_mu_lock(&m->parent->mu);
   m->parent->shutdown = true;
   if (m->on_read) {
-    grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"));
     m->on_read = NULL;
   }
   m = other_half(m);
   if (m->on_read) {
-    grpc_exec_ctx_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"),
-                        NULL);
+    grpc_closure_sched(exec_ctx, m->on_read, GRPC_ERROR_CREATE("Shutdown"));
     m->on_read = NULL;
   }
   gpr_mu_unlock(&m->parent->mu);
diff --git a/test/core/util/port_server_client.c b/test/core/util/port_server_client.c
index b2342fe..0bde726 100644
--- a/test/core/util/port_server_client.c
+++ b/test/core/util/port_server_client.c
@@ -92,7 +92,8 @@
   grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
   grpc_pollset_init(pollset, &pr.mu);
   pr.pops = grpc_polling_entity_create_from_pollset(pollset);
-  shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops);
+  shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops,
+                                         grpc_schedule_on_exec_ctx);
 
   req.host = server;
   gpr_asprintf(&path, "/drop/%d", port);
@@ -103,7 +104,9 @@
       grpc_resource_quota_create("port_server_client/free");
   grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
                    GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
-                   grpc_closure_create(freed_port_from_server, &pr), &rsp);
+                   grpc_closure_create(freed_port_from_server, &pr,
+                                       grpc_schedule_on_exec_ctx),
+                   &rsp);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
   gpr_mu_lock(pr.mu);
   while (!pr.done) {
@@ -174,7 +177,8 @@
         grpc_resource_quota_create("port_server_client/pick_retry");
     grpc_httpcli_get(exec_ctx, pr->ctx, &pr->pops, resource_quota, &req,
                      GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
-                     grpc_closure_create(got_port_from_server, pr),
+                     grpc_closure_create(got_port_from_server, pr,
+                                         grpc_schedule_on_exec_ctx),
                      &pr->response);
     grpc_resource_quota_internal_unref(exec_ctx, resource_quota);
     return;
@@ -208,7 +212,8 @@
   grpc_pollset *pollset = gpr_malloc(grpc_pollset_size());
   grpc_pollset_init(pollset, &pr.mu);
   pr.pops = grpc_polling_entity_create_from_pollset(pollset);
-  shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops);
+  shutdown_closure = grpc_closure_create(destroy_pops_and_shutdown, &pr.pops,
+                                         grpc_schedule_on_exec_ctx);
   pr.port = -1;
   pr.server = server;
   pr.ctx = &context;
@@ -219,10 +224,11 @@
   grpc_httpcli_context_init(&context);
   grpc_resource_quota *resource_quota =
       grpc_resource_quota_create("port_server_client/pick");
-  grpc_httpcli_get(&exec_ctx, &context, &pr.pops, resource_quota, &req,
-                   GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
-                   grpc_closure_create(got_port_from_server, &pr),
-                   &pr.response);
+  grpc_httpcli_get(
+      &exec_ctx, &context, &pr.pops, resource_quota, &req,
+      GRPC_TIMEOUT_SECONDS_TO_DEADLINE(10),
+      grpc_closure_create(got_port_from_server, &pr, grpc_schedule_on_exec_ctx),
+      &pr.response);
   grpc_resource_quota_internal_unref(&exec_ctx, resource_quota);
   grpc_exec_ctx_finish(&exec_ctx);
   gpr_mu_lock(pr.mu);
diff --git a/test/core/util/test_tcp_server.c b/test/core/util/test_tcp_server.c
index 16df91d..2338b81 100644
--- a/test/core/util/test_tcp_server.c
+++ b/test/core/util/test_tcp_server.c
@@ -57,7 +57,8 @@
                           grpc_tcp_server_cb on_connect, void *user_data) {
   grpc_init();
   server->tcp_server = NULL;
-  grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server);
+  grpc_closure_init(&server->shutdown_complete, on_server_destroyed, server,
+                    grpc_schedule_on_exec_ctx);
   server->shutdown = 0;
   server->pollset = gpr_malloc(grpc_pollset_size());
   grpc_pollset_init(server->pollset, &server->mu);
@@ -111,7 +112,8 @@
   gpr_timespec shutdown_deadline;
   grpc_closure do_nothing_cb;
   grpc_tcp_server_unref(&exec_ctx, server->tcp_server);
-  grpc_closure_init(&do_nothing_cb, do_nothing, NULL);
+  grpc_closure_init(&do_nothing_cb, do_nothing, NULL,
+                    grpc_schedule_on_exec_ctx);
   shutdown_deadline = gpr_time_add(gpr_now(GPR_CLOCK_MONOTONIC),
                                    gpr_time_from_seconds(5, GPR_TIMESPAN));
   while (!server->shutdown &&
diff --git a/tools/codegen/core/gen_nano_proto.sh b/tools/codegen/core/gen_nano_proto.sh
index df107c2..99e4981 100755
--- a/tools/codegen/core/gen_nano_proto.sh
+++ b/tools/codegen/core/gen_nano_proto.sh
@@ -42,46 +42,6 @@
 # 4: Output dir not an absolute path.
 # 5: Couldn't create output directory (2nd argument).
 
-read -r -d '' COPYRIGHT <<'EOF'
-/*
- *
- * Copyright <YEAR>, Google Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met:
- *
- *     * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following disclaimer
- * in the documentation and/or other materials provided with the
- * distribution.
- *     * Neither the name of Google Inc. nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-EOF
-
-CURRENT_YEAR=$(date +%Y)
-COPYRIGHT_FILE=$(mktemp)
-echo "${COPYRIGHT/<YEAR>/$CURRENT_YEAR}" > $COPYRIGHT_FILE
-
 set -ex
 if [ $# -lt 2 ] || [ $# -gt 3 ]; then
   echo "Usage: $0 <input.proto> <absolute path to output dir> [grpc path]"
@@ -143,13 +103,6 @@
 sed -i "s:PB_${UC_PROTO_BASENAME}_PB_H_INCLUDED:GRPC_${INCLUDE_GUARD_BASE}_${UC_PROTO_BASENAME}_PB_H:g" \
   "$OUTPUT_DIR/$PROTO_BASENAME.pb.h"
 
-# prepend copyright
-TMPFILE=$(mktemp)
-cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c" > $TMPFILE
-mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.c"
-cat $COPYRIGHT_FILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h" > $TMPFILE
-mv -v $TMPFILE "$OUTPUT_DIR/$PROTO_BASENAME.pb.h"
-
 deactivate
 rm -rf $VENV_DIR
 
diff --git a/tools/distrib/check_copyright.py b/tools/distrib/check_copyright.py
index f06e5f1..718bb56 100755
--- a/tools/distrib/check_copyright.py
+++ b/tools/distrib/check_copyright.py
@@ -92,9 +92,23 @@
   'LICENSE':    '',
 }
 
-KNOWN_BAD = set([
+_EXEMPT = frozenset((
+  # Generated protocol compiler output.
+  'examples/python/helloworld/helloworld_pb2.py',
+  'examples/python/helloworld/helloworld_pb2_grpc.py',
+  'examples/python/multiplex/helloworld_pb2.py',
+  'examples/python/multiplex/helloworld_pb2_grpc.py',
+  'examples/python/multiplex/route_guide_pb2.py',
+  'examples/python/multiplex/route_guide_pb2_grpc.py',
+  'examples/python/route_guide/route_guide_pb2.py',
+  'examples/python/route_guide/route_guide_pb2_grpc.py',
+
+  'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.h',
+  'src/core/ext/lb_policy/grpclb/proto/grpc/lb/v1/load_balancer.pb.c',
+
+  # An older file originally from outside gRPC.
   'src/php/tests/bootstrap.php',
-])
+))
 
 
 RE_YEAR = r'Copyright (?P<first_year>[0-9]+\-)?(?P<last_year>[0-9]+), Google Inc\.'
@@ -140,7 +154,8 @@
   sys.exit(0)
 
 for filename in filename_list:
-  if filename in KNOWN_BAD: continue
+  if filename in _EXEMPT:
+    continue
   ext = os.path.splitext(filename)[1]
   base = os.path.basename(filename)
   if ext in RE_LICENSE:
diff --git a/tools/distrib/python/docgen.py b/tools/distrib/python/docgen.py
index 6223179..38ffcd6 100755
--- a/tools/distrib/python/docgen.py
+++ b/tools/distrib/python/docgen.py
@@ -94,6 +94,7 @@
   # specified repository, edit it, and push it. It's up to the user to then go
   # onto GitHub and make a PR against grpc/grpc:gh-pages.
   repo_parent_dir = tempfile.mkdtemp()
+  print('Documentation parent directory: {}'.format(repo_parent_dir))
   repo_dir = os.path.join(repo_parent_dir, 'grpc')
   python_doc_dir = os.path.join(repo_dir, 'python')
   doc_branch = args.doc_branch