Removing instances of exec_ctx being passed around in functions in
src/core. exec_ctx is now a thread_local pointer of type ExecCtx instead of
grpc_exec_ctx which is initialized whenever ExecCtx is instantiated. ExecCtx
also keeps track of the previous exec_ctx so that nesting of exec_ctx is
allowed. This means that there is only one exec_ctx being used at any
time. Also, grpc_exec_ctx_finish is called in the destructor of the
object, and the previous exec_ctx is restored to avoid breaking current
functionality. The code still explicitly calls grpc_exec_ctx_finish
because removing all such instances causes the code to break.
diff --git a/src/core/ext/filters/client_channel/client_channel.cc b/src/core/ext/filters/client_channel/client_channel.cc
index 22c2bc8..973cc5f 100644
--- a/src/core/ext/filters/client_channel/client_channel.cc
+++ b/src/core/ext/filters/client_channel/client_channel.cc
@@ -87,7 +87,7 @@
   }
 }
 
-static void method_parameters_free(grpc_exec_ctx *exec_ctx, void *value) {
+static void method_parameters_free(void *value) {
   method_parameters_unref((method_parameters *)value);
 }
 
@@ -223,12 +223,11 @@
   grpc_lb_policy *lb_policy;
 } lb_policy_connectivity_watcher;
 
-static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
+static void watch_lb_policy_locked(channel_data *chand,
                                    grpc_lb_policy *lb_policy,
                                    grpc_connectivity_state current_state);
 
-static void set_channel_connectivity_state_locked(grpc_exec_ctx *exec_ctx,
-                                                  channel_data *chand,
+static void set_channel_connectivity_state_locked(channel_data *chand,
                                                   grpc_connectivity_state state,
                                                   grpc_error *error,
                                                   const char *reason) {
@@ -240,12 +239,12 @@
     if (state == GRPC_CHANNEL_TRANSIENT_FAILURE) {
       /* cancel picks with wait_for_ready=false */
       grpc_lb_policy_cancel_picks_locked(
-          exec_ctx, chand->lb_policy,
+          chand->lb_policy,
           /* mask= */ GRPC_INITIAL_METADATA_WAIT_FOR_READY,
           /* check= */ 0, GRPC_ERROR_REF(error));
     } else if (state == GRPC_CHANNEL_SHUTDOWN) {
       /* cancel all picks */
-      grpc_lb_policy_cancel_picks_locked(exec_ctx, chand->lb_policy,
+      grpc_lb_policy_cancel_picks_locked(chand->lb_policy,
                                          /* mask= */ 0, /* check= */ 0,
                                          GRPC_ERROR_REF(error));
     }
@@ -254,12 +253,10 @@
     gpr_log(GPR_DEBUG, "chand=%p: setting connectivity state to %s", chand,
             grpc_connectivity_state_name(state));
   }
-  grpc_connectivity_state_set(exec_ctx, &chand->state_tracker, state, error,
-                              reason);
+  grpc_connectivity_state_set(&chand->state_tracker, state, error, reason);
 }
 
-static void on_lb_policy_state_changed_locked(grpc_exec_ctx *exec_ctx,
-                                              void *arg, grpc_error *error) {
+static void on_lb_policy_state_changed_locked(void *arg, grpc_error *error) {
   lb_policy_connectivity_watcher *w = (lb_policy_connectivity_watcher *)arg;
   grpc_connectivity_state publish_state = w->state;
   /* check if the notification is for the latest policy */
@@ -270,21 +267,21 @@
     }
     if (publish_state == GRPC_CHANNEL_SHUTDOWN && w->chand->resolver != NULL) {
       publish_state = GRPC_CHANNEL_TRANSIENT_FAILURE;
-      grpc_resolver_channel_saw_error_locked(exec_ctx, w->chand->resolver);
-      GRPC_LB_POLICY_UNREF(exec_ctx, w->chand->lb_policy, "channel");
+      grpc_resolver_channel_saw_error_locked(w->chand->resolver);
+      GRPC_LB_POLICY_UNREF(w->chand->lb_policy, "channel");
       w->chand->lb_policy = NULL;
     }
-    set_channel_connectivity_state_locked(exec_ctx, w->chand, publish_state,
+    set_channel_connectivity_state_locked(w->chand, publish_state,
                                           GRPC_ERROR_REF(error), "lb_changed");
     if (w->state != GRPC_CHANNEL_SHUTDOWN) {
-      watch_lb_policy_locked(exec_ctx, w->chand, w->lb_policy, w->state);
+      watch_lb_policy_locked(w->chand, w->lb_policy, w->state);
     }
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack, "watch_lb_policy");
+  GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack, "watch_lb_policy");
   gpr_free(w);
 }
 
-static void watch_lb_policy_locked(grpc_exec_ctx *exec_ctx, channel_data *chand,
+static void watch_lb_policy_locked(channel_data *chand,
                                    grpc_lb_policy *lb_policy,
                                    grpc_connectivity_state current_state) {
   lb_policy_connectivity_watcher *w =
@@ -295,19 +292,18 @@
                     grpc_combiner_scheduler(chand->combiner));
   w->state = current_state;
   w->lb_policy = lb_policy;
-  grpc_lb_policy_notify_on_state_change_locked(exec_ctx, lb_policy, &w->state,
+  grpc_lb_policy_notify_on_state_change_locked(lb_policy, &w->state,
                                                &w->on_changed);
 }
 
-static void start_resolving_locked(grpc_exec_ctx *exec_ctx,
-                                   channel_data *chand) {
+static void start_resolving_locked(channel_data *chand) {
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
     gpr_log(GPR_DEBUG, "chand=%p: starting name resolution", chand);
   }
   GPR_ASSERT(!chand->started_resolving);
   chand->started_resolving = true;
   GRPC_CHANNEL_STACK_REF(chand->owning_stack, "resolver");
-  grpc_resolver_next_locked(exec_ctx, chand->resolver, &chand->resolver_result,
+  grpc_resolver_next_locked(chand->resolver, &chand->resolver_result,
                             &chand->on_resolver_result_changed);
 }
 
@@ -371,8 +367,7 @@
   }
 }
 
-static void on_resolver_result_changed_locked(grpc_exec_ctx *exec_ctx,
-                                              void *arg, grpc_error *error) {
+static void on_resolver_result_changed_locked(void *arg, grpc_error *error) {
   channel_data *chand = (channel_data *)arg;
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
     gpr_log(GPR_DEBUG, "chand=%p: got resolver result: error=%s", chand,
@@ -437,11 +432,10 @@
     if (chand->lb_policy != NULL && !lb_policy_name_changed) {
       // Continue using the same LB policy.  Update with new addresses.
       lb_policy_updated = true;
-      grpc_lb_policy_update_locked(exec_ctx, chand->lb_policy, &lb_policy_args);
+      grpc_lb_policy_update_locked(chand->lb_policy, &lb_policy_args);
     } else {
       // Instantiate new LB policy.
-      new_lb_policy =
-          grpc_lb_policy_create(exec_ctx, lb_policy_name, &lb_policy_args);
+      new_lb_policy = grpc_lb_policy_create(lb_policy_name, &lb_policy_args);
       if (new_lb_policy == NULL) {
         gpr_log(GPR_ERROR, "could not create LB policy \"%s\"", lb_policy_name);
       }
@@ -459,8 +453,7 @@
             grpc_channel_args_find(chand->resolver_result, GRPC_ARG_SERVER_URI);
         GPR_ASSERT(channel_arg != NULL);
         GPR_ASSERT(channel_arg->type == GRPC_ARG_STRING);
-        grpc_uri *uri =
-            grpc_uri_parse(exec_ctx, channel_arg->value.string, true);
+        grpc_uri *uri = grpc_uri_parse(channel_arg->value.string, true);
         GPR_ASSERT(uri->path[0] != '\0');
         service_config_parsing_state parsing_state;
         memset(&parsing_state, 0, sizeof(parsing_state));
@@ -471,7 +464,7 @@
         grpc_uri_destroy(uri);
         retry_throttle_data = parsing_state.retry_throttle_data;
         method_params_table = grpc_service_config_create_method_config_table(
-            exec_ctx, service_config, method_parameters_create_from_json,
+            service_config, method_parameters_create_from_json,
             method_parameters_free);
         grpc_service_config_destroy(service_config);
       }
@@ -480,7 +473,7 @@
     // be pointing to data inside chand->resolver_result.
     // The copy will be saved in chand->lb_policy_name below.
     lb_policy_name_dup = gpr_strdup(lb_policy_name);
-    grpc_channel_args_destroy(exec_ctx, chand->resolver_result);
+    grpc_channel_args_destroy(chand->resolver_result);
     chand->resolver_result = NULL;
   }
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -512,7 +505,7 @@
   chand->retry_throttle_data = retry_throttle_data;
   // Swap out the method params table.
   if (chand->method_params_table != NULL) {
-    grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
+    grpc_slice_hash_table_unref(chand->method_params_table);
   }
   chand->method_params_table = method_params_table;
   // If we have a new LB policy or are shutting down (in which case
@@ -528,10 +521,9 @@
         gpr_log(GPR_DEBUG, "chand=%p: unreffing lb_policy=%p", chand,
                 chand->lb_policy);
       }
-      grpc_pollset_set_del_pollset_set(exec_ctx,
-                                       chand->lb_policy->interested_parties,
+      grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
                                        chand->interested_parties);
-      GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+      GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
     }
     chand->lb_policy = new_lb_policy;
   }
@@ -545,21 +537,20 @@
       if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
         gpr_log(GPR_DEBUG, "chand=%p: shutting down resolver", chand);
       }
-      grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
-      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+      grpc_resolver_shutdown_locked(chand->resolver);
+      GRPC_RESOLVER_UNREF(chand->resolver, "channel");
       chand->resolver = NULL;
     }
     set_channel_connectivity_state_locked(
-        exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
+        chand, GRPC_CHANNEL_SHUTDOWN,
         GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
             "Got resolver result after disconnection", &error, 1),
         "resolver_gone");
-    GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "resolver");
+    GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "resolver");
     grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
                                GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                                    "Channel disconnected", &error, 1));
-    GRPC_CLOSURE_LIST_SCHED(exec_ctx,
-                            &chand->waiting_for_resolver_result_closures);
+    GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
   } else {  // Not shutting down.
     grpc_connectivity_state state = GRPC_CHANNEL_TRANSIENT_FAILURE;
     grpc_error *state_error =
@@ -569,33 +560,28 @@
         gpr_log(GPR_DEBUG, "chand=%p: initializing new LB policy", chand);
       }
       GRPC_ERROR_UNREF(state_error);
-      state = grpc_lb_policy_check_connectivity_locked(exec_ctx, new_lb_policy,
-                                                       &state_error);
-      grpc_pollset_set_add_pollset_set(exec_ctx,
-                                       new_lb_policy->interested_parties,
+      state =
+          grpc_lb_policy_check_connectivity_locked(new_lb_policy, &state_error);
+      grpc_pollset_set_add_pollset_set(new_lb_policy->interested_parties,
                                        chand->interested_parties);
-      GRPC_CLOSURE_LIST_SCHED(exec_ctx,
-                              &chand->waiting_for_resolver_result_closures);
+      GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
       if (chand->exit_idle_when_lb_policy_arrives) {
-        grpc_lb_policy_exit_idle_locked(exec_ctx, new_lb_policy);
+        grpc_lb_policy_exit_idle_locked(new_lb_policy);
         chand->exit_idle_when_lb_policy_arrives = false;
       }
-      watch_lb_policy_locked(exec_ctx, chand, new_lb_policy, state);
+      watch_lb_policy_locked(chand, new_lb_policy, state);
     }
     if (!lb_policy_updated) {
-      set_channel_connectivity_state_locked(exec_ctx, chand, state,
-                                            GRPC_ERROR_REF(state_error),
-                                            "new_lb+resolver");
+      set_channel_connectivity_state_locked(
+          chand, state, GRPC_ERROR_REF(state_error), "new_lb+resolver");
     }
-    grpc_resolver_next_locked(exec_ctx, chand->resolver,
-                              &chand->resolver_result,
+    grpc_resolver_next_locked(chand->resolver, &chand->resolver_result,
                               &chand->on_resolver_result_changed);
     GRPC_ERROR_UNREF(state_error);
   }
 }
 
-static void start_transport_op_locked(grpc_exec_ctx *exec_ctx, void *arg,
-                                      grpc_error *error_ignored) {
+static void start_transport_op_locked(void *arg, grpc_error *error_ignored) {
   grpc_transport_op *op = (grpc_transport_op *)arg;
   grpc_channel_element *elem =
       (grpc_channel_element *)op->handler_private.extra_arg;
@@ -603,7 +589,7 @@
 
   if (op->on_connectivity_state_change != NULL) {
     grpc_connectivity_state_notify_on_state_change(
-        exec_ctx, &chand->state_tracker, op->connectivity_state,
+        &chand->state_tracker, op->connectivity_state,
         op->on_connectivity_state_change);
     op->on_connectivity_state_change = NULL;
     op->connectivity_state = NULL;
@@ -611,11 +597,10 @@
 
   if (op->send_ping != NULL) {
     if (chand->lb_policy == NULL) {
-      GRPC_CLOSURE_SCHED(
-          exec_ctx, op->send_ping,
-          GRPC_ERROR_CREATE_FROM_STATIC_STRING("Ping with no load balancing"));
+      GRPC_CLOSURE_SCHED(op->send_ping, GRPC_ERROR_CREATE_FROM_STATIC_STRING(
+                                            "Ping with no load balancing"));
     } else {
-      grpc_lb_policy_ping_one_locked(exec_ctx, chand->lb_policy, op->send_ping);
+      grpc_lb_policy_ping_one_locked(chand->lb_policy, op->send_ping);
       op->bind_pollset = NULL;
     }
     op->send_ping = NULL;
@@ -624,54 +609,48 @@
   if (op->disconnect_with_error != GRPC_ERROR_NONE) {
     if (chand->resolver != NULL) {
       set_channel_connectivity_state_locked(
-          exec_ctx, chand, GRPC_CHANNEL_SHUTDOWN,
+          chand, GRPC_CHANNEL_SHUTDOWN,
           GRPC_ERROR_REF(op->disconnect_with_error), "disconnect");
-      grpc_resolver_shutdown_locked(exec_ctx, chand->resolver);
-      GRPC_RESOLVER_UNREF(exec_ctx, chand->resolver, "channel");
+      grpc_resolver_shutdown_locked(chand->resolver);
+      GRPC_RESOLVER_UNREF(chand->resolver, "channel");
       chand->resolver = NULL;
       if (!chand->started_resolving) {
         grpc_closure_list_fail_all(&chand->waiting_for_resolver_result_closures,
                                    GRPC_ERROR_REF(op->disconnect_with_error));
-        GRPC_CLOSURE_LIST_SCHED(exec_ctx,
-                                &chand->waiting_for_resolver_result_closures);
+        GRPC_CLOSURE_LIST_SCHED(&chand->waiting_for_resolver_result_closures);
       }
       if (chand->lb_policy != NULL) {
-        grpc_pollset_set_del_pollset_set(exec_ctx,
-                                         chand->lb_policy->interested_parties,
+        grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
                                          chand->interested_parties);
-        GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+        GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
         chand->lb_policy = NULL;
       }
     }
     GRPC_ERROR_UNREF(op->disconnect_with_error);
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "start_transport_op");
+  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "start_transport_op");
 
-  GRPC_CLOSURE_SCHED(exec_ctx, op->on_consumed, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(op->on_consumed, GRPC_ERROR_NONE);
 }
 
-static void cc_start_transport_op(grpc_exec_ctx *exec_ctx,
-                                  grpc_channel_element *elem,
+static void cc_start_transport_op(grpc_channel_element *elem,
                                   grpc_transport_op *op) {
   channel_data *chand = (channel_data *)elem->channel_data;
 
   GPR_ASSERT(op->set_accept_stream == false);
   if (op->bind_pollset != NULL) {
-    grpc_pollset_set_add_pollset(exec_ctx, chand->interested_parties,
-                                 op->bind_pollset);
+    grpc_pollset_set_add_pollset(chand->interested_parties, op->bind_pollset);
   }
 
   op->handler_private.extra_arg = elem;
   GRPC_CHANNEL_STACK_REF(chand->owning_stack, "start_transport_op");
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&op->handler_private.closure, start_transport_op_locked,
                         op, grpc_combiner_scheduler(chand->combiner)),
       GRPC_ERROR_NONE);
 }
 
-static void cc_get_channel_info(grpc_exec_ctx *exec_ctx,
-                                grpc_channel_element *elem,
+static void cc_get_channel_info(grpc_channel_element *elem,
                                 const grpc_channel_info *info) {
   channel_data *chand = (channel_data *)elem->channel_data;
   gpr_mu_lock(&chand->info_mu);
@@ -690,8 +669,7 @@
 }
 
 /* Constructor for channel_data */
-static grpc_error *cc_init_channel_elem(grpc_exec_ctx *exec_ctx,
-                                        grpc_channel_element *elem,
+static grpc_error *cc_init_channel_elem(grpc_channel_element *elem,
                                         grpc_channel_element_args *args) {
   channel_data *chand = (channel_data *)elem->channel_data;
   GPR_ASSERT(args->is_last);
@@ -739,15 +717,15 @@
   }
   char *proxy_name = NULL;
   grpc_channel_args *new_args = NULL;
-  grpc_proxy_mappers_map_name(exec_ctx, arg->value.string, args->channel_args,
+  grpc_proxy_mappers_map_name(arg->value.string, args->channel_args,
                               &proxy_name, &new_args);
   // Instantiate resolver.
-  chand->resolver = grpc_resolver_create(
-      exec_ctx, proxy_name != NULL ? proxy_name : arg->value.string,
-      new_args != NULL ? new_args : args->channel_args,
-      chand->interested_parties, chand->combiner);
+  chand->resolver =
+      grpc_resolver_create(proxy_name != NULL ? proxy_name : arg->value.string,
+                           new_args != NULL ? new_args : args->channel_args,
+                           chand->interested_parties, chand->combiner);
   if (proxy_name != NULL) gpr_free(proxy_name);
-  if (new_args != NULL) grpc_channel_args_destroy(exec_ctx, new_args);
+  if (new_args != NULL) grpc_channel_args_destroy(new_args);
   if (chand->resolver == NULL) {
     return GRPC_ERROR_CREATE_FROM_STATIC_STRING("resolver creation failed");
   }
@@ -756,31 +734,28 @@
   return GRPC_ERROR_NONE;
 }
 
-static void shutdown_resolver_locked(grpc_exec_ctx *exec_ctx, void *arg,
-                                     grpc_error *error) {
+static void shutdown_resolver_locked(void *arg, grpc_error *error) {
   grpc_resolver *resolver = (grpc_resolver *)arg;
-  grpc_resolver_shutdown_locked(exec_ctx, resolver);
-  GRPC_RESOLVER_UNREF(exec_ctx, resolver, "channel");
+  grpc_resolver_shutdown_locked(resolver);
+  GRPC_RESOLVER_UNREF(resolver, "channel");
 }
 
 /* Destructor for channel_data */
-static void cc_destroy_channel_elem(grpc_exec_ctx *exec_ctx,
-                                    grpc_channel_element *elem) {
+static void cc_destroy_channel_elem(grpc_channel_element *elem) {
   channel_data *chand = (channel_data *)elem->channel_data;
   if (chand->resolver != NULL) {
     GRPC_CLOSURE_SCHED(
-        exec_ctx, GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
-                                      grpc_combiner_scheduler(chand->combiner)),
+        GRPC_CLOSURE_CREATE(shutdown_resolver_locked, chand->resolver,
+                            grpc_combiner_scheduler(chand->combiner)),
         GRPC_ERROR_NONE);
   }
   if (chand->client_channel_factory != NULL) {
-    grpc_client_channel_factory_unref(exec_ctx, chand->client_channel_factory);
+    grpc_client_channel_factory_unref(chand->client_channel_factory);
   }
   if (chand->lb_policy != NULL) {
-    grpc_pollset_set_del_pollset_set(exec_ctx,
-                                     chand->lb_policy->interested_parties,
+    grpc_pollset_set_del_pollset_set(chand->lb_policy->interested_parties,
                                      chand->interested_parties);
-    GRPC_LB_POLICY_UNREF(exec_ctx, chand->lb_policy, "channel");
+    GRPC_LB_POLICY_UNREF(chand->lb_policy, "channel");
   }
   gpr_free(chand->info_lb_policy_name);
   gpr_free(chand->info_service_config_json);
@@ -788,11 +763,11 @@
     grpc_server_retry_throttle_data_unref(chand->retry_throttle_data);
   }
   if (chand->method_params_table != NULL) {
-    grpc_slice_hash_table_unref(exec_ctx, chand->method_params_table);
+    grpc_slice_hash_table_unref(chand->method_params_table);
   }
-  grpc_connectivity_state_destroy(exec_ctx, &chand->state_tracker);
-  grpc_pollset_set_destroy(exec_ctx, chand->interested_parties);
-  GRPC_COMBINER_UNREF(exec_ctx, chand->combiner, "client_channel");
+  grpc_connectivity_state_destroy(&chand->state_tracker);
+  grpc_pollset_set_destroy(chand->interested_parties);
+  GRPC_COMBINER_UNREF(chand->combiner, "client_channel");
   gpr_mu_destroy(&chand->info_mu);
   gpr_mu_destroy(&chand->external_connectivity_watcher_list_mu);
 }
@@ -879,21 +854,18 @@
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void fail_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
-                                                void *arg, grpc_error *error) {
+static void fail_pending_batch_in_call_combiner(void *arg, grpc_error *error) {
   call_data *calld = (call_data *)arg;
   if (calld->waiting_for_pick_batches_count > 0) {
     --calld->waiting_for_pick_batches_count;
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx,
         calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count],
         GRPC_ERROR_REF(error), calld->call_combiner);
   }
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_fail(grpc_exec_ctx *exec_ctx,
-                                          grpc_call_element *elem,
+static void waiting_for_pick_batches_fail(grpc_call_element *elem,
                                           grpc_error *error) {
   call_data *calld = (call_data *)elem->call_data;
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -906,37 +878,34 @@
     GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
                       fail_pending_batch_in_call_combiner, calld,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
-                             &calld->handle_pending_batch_in_call_combiner[i],
-                             GRPC_ERROR_REF(error),
-                             "waiting_for_pick_batches_fail");
+    GRPC_CALL_COMBINER_START(
+        calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
+        GRPC_ERROR_REF(error), "waiting_for_pick_batches_fail");
   }
   if (calld->initial_metadata_batch != NULL) {
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, calld->initial_metadata_batch, GRPC_ERROR_REF(error),
+        calld->initial_metadata_batch, GRPC_ERROR_REF(error),
         calld->call_combiner);
   } else {
-    GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+    GRPC_CALL_COMBINER_STOP(calld->call_combiner,
                             "waiting_for_pick_batches_fail");
   }
   GRPC_ERROR_UNREF(error);
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void run_pending_batch_in_call_combiner(grpc_exec_ctx *exec_ctx,
-                                               void *arg, grpc_error *ignored) {
+static void run_pending_batch_in_call_combiner(void *arg, grpc_error *ignored) {
   call_data *calld = (call_data *)arg;
   if (calld->waiting_for_pick_batches_count > 0) {
     --calld->waiting_for_pick_batches_count;
     grpc_subchannel_call_process_op(
-        exec_ctx, calld->subchannel_call,
+        calld->subchannel_call,
         calld->waiting_for_pick_batches[calld->waiting_for_pick_batches_count]);
   }
 }
 
 // This is called via the call combiner, so access to calld is synchronized.
-static void waiting_for_pick_batches_resume(grpc_exec_ctx *exec_ctx,
-                                            grpc_call_element *elem) {
+static void waiting_for_pick_batches_resume(grpc_call_element *elem) {
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -949,20 +918,18 @@
     GRPC_CLOSURE_INIT(&calld->handle_pending_batch_in_call_combiner[i],
                       run_pending_batch_in_call_combiner, calld,
                       grpc_schedule_on_exec_ctx);
-    GRPC_CALL_COMBINER_START(exec_ctx, calld->call_combiner,
-                             &calld->handle_pending_batch_in_call_combiner[i],
-                             GRPC_ERROR_NONE,
-                             "waiting_for_pick_batches_resume");
+    GRPC_CALL_COMBINER_START(
+        calld->call_combiner, &calld->handle_pending_batch_in_call_combiner[i],
+        GRPC_ERROR_NONE, "waiting_for_pick_batches_resume");
   }
   GPR_ASSERT(calld->initial_metadata_batch != NULL);
-  grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call,
+  grpc_subchannel_call_process_op(calld->subchannel_call,
                                   calld->initial_metadata_batch);
 }
 
 // Applies service config to the call.  Must be invoked once we know
 // that the resolver has returned results to the channel.
-static void apply_service_config_to_call_locked(grpc_exec_ctx *exec_ctx,
-                                                grpc_call_element *elem) {
+static void apply_service_config_to_call_locked(grpc_call_element *elem) {
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -975,7 +942,7 @@
   }
   if (chand->method_params_table != NULL) {
     calld->method_params = (method_parameters *)grpc_method_config_table_get(
-        exec_ctx, chand->method_params_table, calld->path);
+        chand->method_params_table, calld->path);
     if (calld->method_params != NULL) {
       method_parameters_ref(calld->method_params);
       // If the deadline from the service config is shorter than the one
@@ -987,15 +954,14 @@
             calld->method_params->timeout;
         if (per_method_deadline < calld->deadline) {
           calld->deadline = per_method_deadline;
-          grpc_deadline_state_reset(exec_ctx, elem, calld->deadline);
+          grpc_deadline_state_reset(elem, calld->deadline);
         }
       }
     }
   }
 }
 
-static void create_subchannel_call_locked(grpc_exec_ctx *exec_ctx,
-                                          grpc_call_element *elem,
+static void create_subchannel_call_locked(grpc_call_element *elem,
                                           grpc_error *error) {
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
@@ -1009,24 +975,22 @@
       calld->call_combiner             // call_combiner
   };
   grpc_error *new_error = grpc_connected_subchannel_create_call(
-      exec_ctx, calld->connected_subchannel, &call_args,
-      &calld->subchannel_call);
+      calld->connected_subchannel, &call_args, &calld->subchannel_call);
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: create subchannel_call=%p: error=%s",
             chand, calld, calld->subchannel_call, grpc_error_string(new_error));
   }
   if (new_error != GRPC_ERROR_NONE) {
     new_error = grpc_error_add_child(new_error, error);
-    waiting_for_pick_batches_fail(exec_ctx, elem, new_error);
+    waiting_for_pick_batches_fail(elem, new_error);
   } else {
-    waiting_for_pick_batches_resume(exec_ctx, elem);
+    waiting_for_pick_batches_resume(elem);
   }
   GRPC_ERROR_UNREF(error);
 }
 
 // Invoked when a pick is completed, on both success or failure.
-static void pick_done_locked(grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
-                             grpc_error *error) {
+static void pick_done_locked(grpc_call_element *elem, grpc_error *error) {
   call_data *calld = (call_data *)elem->call_data;
   channel_data *chand = (channel_data *)elem->channel_data;
   if (calld->connected_subchannel == NULL) {
@@ -1042,10 +1006,10 @@
               "chand=%p calld=%p: failed to create subchannel: error=%s", chand,
               calld, grpc_error_string(calld->error));
     }
-    waiting_for_pick_batches_fail(exec_ctx, elem, GRPC_ERROR_REF(calld->error));
+    waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
   } else {
     /* Create call on subchannel. */
-    create_subchannel_call_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+    create_subchannel_call_locked(elem, GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
 }
@@ -1054,19 +1018,17 @@
 // either (a) the pick was deferred pending a resolver result or (b) the
 // pick was done asynchronously.  Removes the call's polling entity from
 // chand->interested_parties before invoking pick_done_locked().
-static void async_pick_done_locked(grpc_exec_ctx *exec_ctx,
-                                   grpc_call_element *elem, grpc_error *error) {
+static void async_pick_done_locked(grpc_call_element *elem, grpc_error *error) {
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
-  grpc_polling_entity_del_from_pollset_set(exec_ctx, calld->pollent,
+  grpc_polling_entity_del_from_pollset_set(calld->pollent,
                                            chand->interested_parties);
-  pick_done_locked(exec_ctx, elem, error);
+  pick_done_locked(elem, error);
 }
 
 // Note: This runs under the client_channel combiner, but will NOT be
 // holding the call combiner.
-static void pick_callback_cancel_locked(grpc_exec_ctx *exec_ctx, void *arg,
-                                        grpc_error *error) {
+static void pick_callback_cancel_locked(void *arg, grpc_error *error) {
   grpc_call_element *elem = (grpc_call_element *)arg;
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
@@ -1075,17 +1037,15 @@
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: cancelling pick from LB policy %p",
               chand, calld, calld->lb_policy);
     }
-    grpc_lb_policy_cancel_pick_locked(exec_ctx, calld->lb_policy,
-                                      &calld->connected_subchannel,
-                                      GRPC_ERROR_REF(error));
+    grpc_lb_policy_cancel_pick_locked(
+        calld->lb_policy, &calld->connected_subchannel, GRPC_ERROR_REF(error));
   }
-  GRPC_CALL_STACK_UNREF(exec_ctx, calld->owning_call, "pick_callback_cancel");
+  GRPC_CALL_STACK_UNREF(calld->owning_call, "pick_callback_cancel");
 }
 
 // Callback invoked by grpc_lb_policy_pick_locked() for async picks.
 // Unrefs the LB policy and invokes async_pick_done_locked().
-static void pick_callback_done_locked(grpc_exec_ctx *exec_ctx, void *arg,
-                                      grpc_error *error) {
+static void pick_callback_done_locked(void *arg, grpc_error *error) {
   grpc_call_element *elem = (grpc_call_element *)arg;
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
@@ -1094,23 +1054,22 @@
             chand, calld);
   }
   GPR_ASSERT(calld->lb_policy != NULL);
-  GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
+  GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
   calld->lb_policy = NULL;
-  async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+  async_pick_done_locked(elem, GRPC_ERROR_REF(error));
 }
 
 // Takes a ref to chand->lb_policy and calls grpc_lb_policy_pick_locked().
 // If the pick was completed synchronously, unrefs the LB policy and
 // returns true.
-static bool pick_callback_start_locked(grpc_exec_ctx *exec_ctx,
-                                       grpc_call_element *elem) {
+static bool pick_callback_start_locked(grpc_call_element *elem) {
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
     gpr_log(GPR_DEBUG, "chand=%p calld=%p: starting pick on lb_policy=%p",
             chand, calld, chand->lb_policy);
   }
-  apply_service_config_to_call_locked(exec_ctx, elem);
+  apply_service_config_to_call_locked(elem);
   // If the application explicitly set wait_for_ready, use that.
   // Otherwise, if the service config specified a value for this
   // method, use that.
@@ -1140,7 +1099,7 @@
   GRPC_CLOSURE_INIT(&calld->lb_pick_closure, pick_callback_done_locked, elem,
                     grpc_combiner_scheduler(chand->combiner));
   const bool pick_done = grpc_lb_policy_pick_locked(
-      exec_ctx, chand->lb_policy, &inputs, &calld->connected_subchannel,
+      chand->lb_policy, &inputs, &calld->connected_subchannel,
       calld->subchannel_call_context, NULL, &calld->lb_pick_closure);
   if (pick_done) {
     /* synchronous grpc_lb_policy_pick call. Unref the LB policy. */
@@ -1148,12 +1107,12 @@
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: pick completed synchronously",
               chand, calld);
     }
-    GRPC_LB_POLICY_UNREF(exec_ctx, calld->lb_policy, "pick_subchannel");
+    GRPC_LB_POLICY_UNREF(calld->lb_policy, "pick_subchannel");
     calld->lb_policy = NULL;
   } else {
     GRPC_CALL_STACK_REF(calld->owning_call, "pick_callback_cancel");
     grpc_call_combiner_set_notify_on_cancel(
-        exec_ctx, calld->call_combiner,
+        calld->call_combiner,
         GRPC_CLOSURE_INIT(&calld->lb_pick_cancel_closure,
                           pick_callback_cancel_locked, elem,
                           grpc_combiner_scheduler(chand->combiner)));
@@ -1170,8 +1129,7 @@
 
 // Note: This runs under the client_channel combiner, but will NOT be
 // holding the call combiner.
-static void pick_after_resolver_result_cancel_locked(grpc_exec_ctx *exec_ctx,
-                                                     void *arg,
+static void pick_after_resolver_result_cancel_locked(void *arg,
                                                      grpc_error *error) {
   pick_after_resolver_result_args *args =
       (pick_after_resolver_result_args *)arg;
@@ -1200,13 +1158,11 @@
   // it's safe to call async_pick_done_locked() here -- we are
   // essentially calling it here instead of calling it in
   // pick_after_resolver_result_done_locked().
-  async_pick_done_locked(exec_ctx, elem,
-                         GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
-                             "Pick cancelled", &error, 1));
+  async_pick_done_locked(elem, GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
+                                   "Pick cancelled", &error, 1));
 }
 
-static void pick_after_resolver_result_done_locked(grpc_exec_ctx *exec_ctx,
-                                                   void *arg,
+static void pick_after_resolver_result_done_locked(void *arg,
                                                    grpc_error *error) {
   pick_after_resolver_result_args *args =
       (pick_after_resolver_result_args *)arg;
@@ -1227,25 +1183,24 @@
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver failed to return data",
               chand, calld);
     }
-    async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_REF(error));
+    async_pick_done_locked(elem, GRPC_ERROR_REF(error));
   } else {
     if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
       gpr_log(GPR_DEBUG, "chand=%p calld=%p: resolver returned, doing pick",
               chand, calld);
     }
-    if (pick_callback_start_locked(exec_ctx, elem)) {
+    if (pick_callback_start_locked(elem)) {
       // Even if the LB policy returns a result synchronously, we have
       // already added our polling entity to chand->interested_parties
       // in order to wait for the resolver result, so we need to
       // remove it here.  Therefore, we call async_pick_done_locked()
       // instead of pick_done_locked().
-      async_pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
+      async_pick_done_locked(elem, GRPC_ERROR_NONE);
     }
   }
 }
 
-static void pick_after_resolver_result_start_locked(grpc_exec_ctx *exec_ctx,
-                                                    grpc_call_element *elem) {
+static void pick_after_resolver_result_start_locked(grpc_call_element *elem) {
   channel_data *chand = (channel_data *)elem->channel_data;
   call_data *calld = (call_data *)elem->call_data;
   if (GRPC_TRACER_ON(grpc_client_channel_trace)) {
@@ -1261,47 +1216,46 @@
   grpc_closure_list_append(&chand->waiting_for_resolver_result_closures,
                            &args->closure, GRPC_ERROR_NONE);
   grpc_call_combiner_set_notify_on_cancel(
-      exec_ctx, calld->call_combiner,
+      calld->call_combiner,
       GRPC_CLOSURE_INIT(&args->cancel_closure,
                         pick_after_resolver_result_cancel_locked, args,
                         grpc_combiner_scheduler(chand->combiner)));
 }
 
-static void start_pick_locked(grpc_exec_ctx *exec_ctx, void *arg,
-                              grpc_error *ignored) {
+static void start_pick_locked(void *arg, grpc_error *ignored) {
   grpc_call_element *elem = (grpc_call_element *)arg;
   call_data *calld = (call_data *)elem->call_data;
   channel_data *chand = (channel_data *)elem->channel_data;
   GPR_ASSERT(calld->connected_subchannel == NULL);
   if (chand->lb_policy != NULL) {
     // We already have an LB policy, so ask it for a pick.
-    if (pick_callback_start_locked(exec_ctx, elem)) {
+    if (pick_callback_start_locked(elem)) {
       // Pick completed synchronously.
-      pick_done_locked(exec_ctx, elem, GRPC_ERROR_NONE);
+      pick_done_locked(elem, GRPC_ERROR_NONE);
       return;
     }
   } else {
     // We do not yet have an LB policy, so wait for a resolver result.
     if (chand->resolver == NULL) {
-      pick_done_locked(exec_ctx, elem,
+      pick_done_locked(elem,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING("Disconnected"));
       return;
     }
     if (!chand->started_resolving) {
-      start_resolving_locked(exec_ctx, chand);
+      start_resolving_locked(chand);
     }
-    pick_after_resolver_result_start_locked(exec_ctx, elem);
+    pick_after_resolver_result_start_locked(elem);
   }
   // We need to wait for either a resolver result or for an async result
   // from the LB policy.  Add the polling entity from call_data to the
   // channel_data's interested_parties, so that the I/O of the LB policy
   // and resolver can be done under it.  The polling entity will be
   // removed in async_pick_done_locked().
-  grpc_polling_entity_add_to_pollset_set(exec_ctx, calld->pollent,
+  grpc_polling_entity_add_to_pollset_set(calld->pollent,
                                          chand->interested_parties);
 }
 
-static void on_complete(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
+static void on_complete(void *arg, grpc_error *error) {
   grpc_call_element *elem = (grpc_call_element *)arg;
   call_data *calld = (call_data *)elem->call_data;
   if (calld->retry_throttle_data != NULL) {
@@ -1317,18 +1271,15 @@
           calld->retry_throttle_data);
     }
   }
-  GRPC_CLOSURE_RUN(exec_ctx, calld->original_on_complete,
-                   GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(calld->original_on_complete, GRPC_ERROR_REF(error));
 }
 
 static void cc_start_transport_stream_op_batch(
-    grpc_exec_ctx *exec_ctx, grpc_call_element *elem,
-    grpc_transport_stream_op_batch *batch) {
+    grpc_call_element *elem, grpc_transport_stream_op_batch *batch) {
   call_data *calld = (call_data *)elem->call_data;
   channel_data *chand = (channel_data *)elem->channel_data;
   if (chand->deadline_checking_enabled) {
-    grpc_deadline_state_client_start_transport_stream_op_batch(exec_ctx, elem,
-                                                               batch);
+    grpc_deadline_state_client_start_transport_stream_op_batch(elem, batch);
   }
   GPR_TIMER_BEGIN("cc_start_transport_stream_op_batch", 0);
   // If we've previously been cancelled, immediately fail any new batches.
@@ -1338,7 +1289,7 @@
               chand, calld, grpc_error_string(calld->error));
     }
     grpc_transport_stream_op_batch_finish_with_failure(
-        exec_ctx, batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
+        batch, GRPC_ERROR_REF(calld->error), calld->call_combiner);
     goto done;
   }
   if (batch->cancel_stream) {
@@ -1356,11 +1307,10 @@
     // If we have a subchannel call, send the cancellation batch down.
     // Otherwise, fail all pending batches.
     if (calld->subchannel_call != NULL) {
-      grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+      grpc_subchannel_call_process_op(calld->subchannel_call, batch);
     } else {
       waiting_for_pick_batches_add(calld, batch);
-      waiting_for_pick_batches_fail(exec_ctx, elem,
-                                    GRPC_ERROR_REF(calld->error));
+      waiting_for_pick_batches_fail(elem, GRPC_ERROR_REF(calld->error));
     }
     goto done;
   }
@@ -1383,7 +1333,7 @@
               "chand=%p calld=%p: sending batch to subchannel_call=%p", chand,
               calld, calld->subchannel_call);
     }
-    grpc_subchannel_call_process_op(exec_ctx, calld->subchannel_call, batch);
+    grpc_subchannel_call_process_op(calld->subchannel_call, batch);
     goto done;
   }
   // We do not yet have a subchannel call.
@@ -1397,7 +1347,6 @@
               chand, calld);
     }
     GRPC_CLOSURE_SCHED(
-        exec_ctx,
         GRPC_CLOSURE_INIT(&batch->handler_private.closure, start_pick_locked,
                           elem, grpc_combiner_scheduler(chand->combiner)),
         GRPC_ERROR_NONE);
@@ -1408,7 +1357,7 @@
               "chand=%p calld=%p: saved batch, yeilding call combiner", chand,
               calld);
     }
-    GRPC_CALL_COMBINER_STOP(exec_ctx, calld->call_combiner,
+    GRPC_CALL_COMBINER_STOP(calld->call_combiner,
                             "batch does not include send_initial_metadata");
   }
 done:
@@ -1416,8 +1365,7 @@
 }
 
 /* Constructor for call_data */
-static grpc_error *cc_init_call_elem(grpc_exec_ctx *exec_ctx,
-                                     grpc_call_element *elem,
+static grpc_error *cc_init_call_elem(grpc_call_element *elem,
                                      const grpc_call_element_args *args) {
   call_data *calld = (call_data *)elem->call_data;
   channel_data *chand = (channel_data *)elem->channel_data;
@@ -1429,23 +1377,22 @@
   calld->owning_call = args->call_stack;
   calld->call_combiner = args->call_combiner;
   if (chand->deadline_checking_enabled) {
-    grpc_deadline_state_init(exec_ctx, elem, args->call_stack,
-                             args->call_combiner, calld->deadline);
+    grpc_deadline_state_init(elem, args->call_stack, args->call_combiner,
+                             calld->deadline);
   }
   return GRPC_ERROR_NONE;
 }
 
 /* Destructor for call_data */
-static void cc_destroy_call_elem(grpc_exec_ctx *exec_ctx,
-                                 grpc_call_element *elem,
+static void cc_destroy_call_elem(grpc_call_element *elem,
                                  const grpc_call_final_info *final_info,
                                  grpc_closure *then_schedule_closure) {
   call_data *calld = (call_data *)elem->call_data;
   channel_data *chand = (channel_data *)elem->channel_data;
   if (chand->deadline_checking_enabled) {
-    grpc_deadline_state_destroy(exec_ctx, elem);
+    grpc_deadline_state_destroy(elem);
   }
-  grpc_slice_unref_internal(exec_ctx, calld->path);
+  grpc_slice_unref_internal(calld->path);
   if (calld->method_params != NULL) {
     method_parameters_unref(calld->method_params);
   }
@@ -1454,14 +1401,13 @@
     grpc_subchannel_call_set_cleanup_closure(calld->subchannel_call,
                                              then_schedule_closure);
     then_schedule_closure = NULL;
-    GRPC_SUBCHANNEL_CALL_UNREF(exec_ctx, calld->subchannel_call,
+    GRPC_SUBCHANNEL_CALL_UNREF(calld->subchannel_call,
                                "client_channel_destroy_call");
   }
   GPR_ASSERT(calld->lb_policy == NULL);
   GPR_ASSERT(calld->waiting_for_pick_batches_count == 0);
   if (calld->connected_subchannel != NULL) {
-    GRPC_CONNECTED_SUBCHANNEL_UNREF(exec_ctx, calld->connected_subchannel,
-                                    "picked");
+    GRPC_CONNECTED_SUBCHANNEL_UNREF(calld->connected_subchannel, "picked");
   }
   for (size_t i = 0; i < GRPC_CONTEXT_COUNT; ++i) {
     if (calld->subchannel_call_context[i].value != NULL) {
@@ -1469,11 +1415,10 @@
           calld->subchannel_call_context[i].value);
     }
   }
-  GRPC_CLOSURE_SCHED(exec_ctx, then_schedule_closure, GRPC_ERROR_NONE);
+  GRPC_CLOSURE_SCHED(then_schedule_closure, GRPC_ERROR_NONE);
 }
 
-static void cc_set_pollset_or_pollset_set(grpc_exec_ctx *exec_ctx,
-                                          grpc_call_element *elem,
+static void cc_set_pollset_or_pollset_set(grpc_call_element *elem,
                                           grpc_polling_entity *pollent) {
   call_data *calld = (call_data *)elem->call_data;
   calld->pollent = pollent;
@@ -1497,30 +1442,29 @@
     "client-channel",
 };
 
-static void try_to_connect_locked(grpc_exec_ctx *exec_ctx, void *arg,
-                                  grpc_error *error_ignored) {
+static void try_to_connect_locked(void *arg, grpc_error *error_ignored) {
   channel_data *chand = (channel_data *)arg;
   if (chand->lb_policy != NULL) {
-    grpc_lb_policy_exit_idle_locked(exec_ctx, chand->lb_policy);
+    grpc_lb_policy_exit_idle_locked(chand->lb_policy);
   } else {
     chand->exit_idle_when_lb_policy_arrives = true;
     if (!chand->started_resolving && chand->resolver != NULL) {
-      start_resolving_locked(exec_ctx, chand);
+      start_resolving_locked(chand);
     }
   }
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, chand->owning_stack, "try_to_connect");
+  GRPC_CHANNEL_STACK_UNREF(chand->owning_stack, "try_to_connect");
 }
 
 grpc_connectivity_state grpc_client_channel_check_connectivity_state(
-    grpc_exec_ctx *exec_ctx, grpc_channel_element *elem, int try_to_connect) {
+    grpc_channel_element *elem, int try_to_connect) {
   channel_data *chand = (channel_data *)elem->channel_data;
   grpc_connectivity_state out =
       grpc_connectivity_state_check(&chand->state_tracker);
   if (out == GRPC_CHANNEL_IDLE && try_to_connect) {
     GRPC_CHANNEL_STACK_REF(chand->owning_stack, "try_to_connect");
     GRPC_CLOSURE_SCHED(
-        exec_ctx, GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
-                                      grpc_combiner_scheduler(chand->combiner)),
+        GRPC_CLOSURE_CREATE(try_to_connect_locked, chand,
+                            grpc_combiner_scheduler(chand->combiner)),
         GRPC_ERROR_NONE);
   }
   return out;
@@ -1599,50 +1543,49 @@
   return count;
 }
 
-static void on_external_watch_complete(grpc_exec_ctx *exec_ctx, void *arg,
-                                       grpc_error *error) {
+static void on_external_watch_complete(void *arg, grpc_error *error) {
   external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
   grpc_closure *follow_up = w->on_complete;
-  grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
+  grpc_polling_entity_del_from_pollset_set(&w->pollent,
                                            w->chand->interested_parties);
-  GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
+  GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
                            "external_connectivity_watcher");
   external_connectivity_watcher_list_remove(w->chand, w);
   gpr_free(w);
-  GRPC_CLOSURE_RUN(exec_ctx, follow_up, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_RUN(follow_up, GRPC_ERROR_REF(error));
 }
 
-static void watch_connectivity_state_locked(grpc_exec_ctx *exec_ctx, void *arg,
+static void watch_connectivity_state_locked(void *arg,
                                             grpc_error *error_ignored) {
   external_connectivity_watcher *w = (external_connectivity_watcher *)arg;
   external_connectivity_watcher *found = NULL;
   if (w->state != NULL) {
     external_connectivity_watcher_list_append(w->chand, w);
-    GRPC_CLOSURE_RUN(exec_ctx, w->watcher_timer_init, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_RUN(w->watcher_timer_init, GRPC_ERROR_NONE);
     GRPC_CLOSURE_INIT(&w->my_closure, on_external_watch_complete, w,
                       grpc_schedule_on_exec_ctx);
-    grpc_connectivity_state_notify_on_state_change(
-        exec_ctx, &w->chand->state_tracker, w->state, &w->my_closure);
+    grpc_connectivity_state_notify_on_state_change(&w->chand->state_tracker,
+                                                   w->state, &w->my_closure);
   } else {
     GPR_ASSERT(w->watcher_timer_init == NULL);
     found = lookup_external_connectivity_watcher(w->chand, w->on_complete);
     if (found) {
       GPR_ASSERT(found->on_complete == w->on_complete);
       grpc_connectivity_state_notify_on_state_change(
-          exec_ctx, &found->chand->state_tracker, NULL, &found->my_closure);
+          &found->chand->state_tracker, NULL, &found->my_closure);
     }
-    grpc_polling_entity_del_from_pollset_set(exec_ctx, &w->pollent,
+    grpc_polling_entity_del_from_pollset_set(&w->pollent,
                                              w->chand->interested_parties);
-    GRPC_CHANNEL_STACK_UNREF(exec_ctx, w->chand->owning_stack,
+    GRPC_CHANNEL_STACK_UNREF(w->chand->owning_stack,
                              "external_connectivity_watcher");
     gpr_free(w);
   }
 }
 
 void grpc_client_channel_watch_connectivity_state(
-    grpc_exec_ctx *exec_ctx, grpc_channel_element *elem,
-    grpc_polling_entity pollent, grpc_connectivity_state *state,
-    grpc_closure *closure, grpc_closure *watcher_timer_init) {
+    grpc_channel_element *elem, grpc_polling_entity pollent,
+    grpc_connectivity_state *state, grpc_closure *closure,
+    grpc_closure *watcher_timer_init) {
   channel_data *chand = (channel_data *)elem->channel_data;
   external_connectivity_watcher *w =
       (external_connectivity_watcher *)gpr_zalloc(sizeof(*w));
@@ -1651,12 +1594,11 @@
   w->on_complete = closure;
   w->state = state;
   w->watcher_timer_init = watcher_timer_init;
-  grpc_polling_entity_add_to_pollset_set(exec_ctx, &w->pollent,
+  grpc_polling_entity_add_to_pollset_set(&w->pollent,
                                          chand->interested_parties);
   GRPC_CHANNEL_STACK_REF(w->chand->owning_stack,
                          "external_connectivity_watcher");
   GRPC_CLOSURE_SCHED(
-      exec_ctx,
       GRPC_CLOSURE_INIT(&w->my_closure, watch_connectivity_state_locked, w,
                         grpc_combiner_scheduler(chand->combiner)),
       GRPC_ERROR_NONE);