Revert "Revert "All instances of exec_ctx being passed around in src/core removed""
diff --git a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
index db06fc2..eadeea0 100644
--- a/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
+++ b/src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc
@@ -131,12 +131,12 @@
 /* add lb_token of selected subchannel (address) to the call's initial
  * metadata */
 static grpc_error* initial_metadata_add_lb_token(
-    grpc_exec_ctx* exec_ctx, grpc_metadata_batch* initial_metadata,
+    grpc_metadata_batch* initial_metadata,
     grpc_linked_mdelem* lb_token_mdelem_storage, grpc_mdelem lb_token) {
   GPR_ASSERT(lb_token_mdelem_storage != nullptr);
   GPR_ASSERT(!GRPC_MDISNULL(lb_token));
-  return grpc_metadata_batch_add_tail(exec_ctx, initial_metadata,
-                                      lb_token_mdelem_storage, lb_token);
+  return grpc_metadata_batch_add_tail(initial_metadata, lb_token_mdelem_storage,
+                                      lb_token);
 }
 
 static void destroy_client_stats(void* arg) {
@@ -186,20 +186,19 @@
 /* The \a on_complete closure passed as part of the pick requires keeping a
  * reference to its associated round robin instance. We wrap this closure in
  * order to unref the round robin instance upon its invocation */
-static void wrapped_rr_closure(grpc_exec_ctx* exec_ctx, void* arg,
-                               grpc_error* error) {
+static void wrapped_rr_closure(void* arg, grpc_error* error) {
   wrapped_rr_closure_arg* wc_arg = (wrapped_rr_closure_arg*)arg;
 
   GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
-  GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_REF(error));
 
   if (wc_arg->rr_policy != nullptr) {
-    /* if *target is NULL, no pick has been made by the RR policy (eg, all
+    /* if *target is nullptr, no pick has been made by the RR policy (eg, all
      * addresses failed to connect). There won't be any user_data/token
      * available */
     if (*wc_arg->target != nullptr) {
       if (!GRPC_MDISNULL(wc_arg->lb_token)) {
-        initial_metadata_add_lb_token(exec_ctx, wc_arg->initial_metadata,
+        initial_metadata_add_lb_token(wc_arg->initial_metadata,
                                       wc_arg->lb_token_mdelem_storage,
                                       GRPC_MDELEM_REF(wc_arg->lb_token));
       } else {
@@ -221,7 +220,7 @@
       gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", wc_arg->glb_policy,
               wc_arg->rr_policy);
     }
-    GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "wrapped_rr_closure");
+    GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "wrapped_rr_closure");
   }
   GPR_ASSERT(wc_arg->free_when_done != nullptr);
   gpr_free(wc_arg->free_when_done);
@@ -241,8 +240,8 @@
   /* original pick()'s arguments */
   grpc_lb_policy_pick_args pick_args;
 
-  /* output argument where to store the pick()ed connected subchannel, or NULL
-   * upon error. */
+  /* output argument where to store the pick()ed connected subchannel, or
+   * nullptr upon error. */
   grpc_connected_subchannel** target;
 
   /* args for wrapped_on_complete */
@@ -328,8 +327,8 @@
   /** connectivity state of the LB channel */
   grpc_connectivity_state lb_channel_connectivity;
 
-  /** stores the deserialized response from the LB. May be NULL until one such
-   * response has arrived. */
+  /** stores the deserialized response from the LB. May be nullptr until one
+   * such response has arrived. */
   grpc_grpclb_serverlist* serverlist;
 
   /** Index into serverlist for next pick.
@@ -459,9 +458,9 @@
              ? nullptr
              : (void*)GRPC_MDELEM_REF(grpc_mdelem{(uintptr_t)token}).payload;
 }
-static void lb_token_destroy(grpc_exec_ctx* exec_ctx, void* token) {
+static void lb_token_destroy(void* token) {
   if (token != nullptr) {
-    GRPC_MDELEM_UNREF(exec_ctx, grpc_mdelem{(uintptr_t)token});
+    GRPC_MDELEM_UNREF(grpc_mdelem{(uintptr_t)token});
   }
 }
 static int lb_token_cmp(void* token1, void* token2) {
@@ -497,7 +496,7 @@
 
 /* Returns addresses extracted from \a serverlist. */
 static grpc_lb_addresses* process_serverlist_locked(
-    grpc_exec_ctx* exec_ctx, const grpc_grpclb_serverlist* serverlist) {
+    const grpc_grpclb_serverlist* serverlist) {
   size_t num_valid = 0;
   /* first pass: count how many are valid in order to allocate the necessary
    * memory in a single block */
@@ -528,9 +527,9 @@
           strnlen(server->load_balance_token, lb_token_max_length);
       grpc_slice lb_token_mdstr = grpc_slice_from_copied_buffer(
           server->load_balance_token, lb_token_length);
-      user_data = (void*)grpc_mdelem_from_slices(exec_ctx, GRPC_MDSTR_LB_TOKEN,
-                                                 lb_token_mdstr)
-                      .payload;
+      user_data =
+          (void*)grpc_mdelem_from_slices(GRPC_MDSTR_LB_TOKEN, lb_token_mdstr)
+              .payload;
     } else {
       char* uri = grpc_sockaddr_to_uri(&addr);
       gpr_log(GPR_INFO,
@@ -552,7 +551,7 @@
 
 /* Returns the backend addresses extracted from the given addresses */
 static grpc_lb_addresses* extract_backend_addresses_locked(
-    grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses) {
+    const grpc_lb_addresses* addresses) {
   /* first pass: count the number of backend addresses */
   size_t num_backends = 0;
   for (size_t i = 0; i < addresses->num_addresses; ++i) {
@@ -577,8 +576,8 @@
 }
 
 static void update_lb_connectivity_status_locked(
-    grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
-    grpc_connectivity_state rr_state, grpc_error* rr_state_error) {
+    glb_lb_policy* glb_policy, grpc_connectivity_state rr_state,
+    grpc_error* rr_state_error) {
   const grpc_connectivity_state curr_glb_state =
       grpc_connectivity_state_check(&glb_policy->state_tracker);
 
@@ -630,7 +629,7 @@
         glb_policy, grpc_connectivity_state_name(rr_state),
         glb_policy->rr_policy);
   }
-  grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker, rr_state,
+  grpc_connectivity_state_set(&glb_policy->state_tracker, rr_state,
                               rr_state_error,
                               "update_lb_connectivity_status_locked");
 }
@@ -641,9 +640,9 @@
  * If \a force_async is true, then we will manually schedule the
  * completion callback even if the pick is available immediately. */
 static bool pick_from_internal_rr_locked(
-    grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
-    const grpc_lb_policy_pick_args* pick_args, bool force_async,
-    grpc_connected_subchannel** target, wrapped_rr_closure_arg* wc_arg) {
+    glb_lb_policy* glb_policy, const grpc_lb_policy_pick_args* pick_args,
+    bool force_async, grpc_connected_subchannel** target,
+    wrapped_rr_closure_arg* wc_arg) {
   // Check for drops if we are not using fallback backend addresses.
   if (glb_policy->serverlist != nullptr) {
     // Look at the index into the serverlist to see if we should drop this call.
@@ -658,7 +657,7 @@
         gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p for drop", glb_policy,
                 wc_arg->rr_policy);
       }
-      GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+      GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
       // Update client load reporting stats to indicate the number of
       // dropped calls.  Note that we have to do this here instead of in
       // the client_load_reporting filter, because we do not create a
@@ -670,7 +669,7 @@
       grpc_grpclb_client_stats_unref(wc_arg->client_stats);
       if (force_async) {
         GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
-        GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+        GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
         gpr_free(wc_arg->free_when_done);
         return false;
       }
@@ -680,7 +679,7 @@
   }
   // Pick via the RR policy.
   const bool pick_done = grpc_lb_policy_pick_locked(
-      exec_ctx, wc_arg->rr_policy, pick_args, target, wc_arg->context,
+      wc_arg->rr_policy, pick_args, target, wc_arg->context,
       (void**)&wc_arg->lb_token, &wc_arg->wrapper_closure);
   if (pick_done) {
     /* synchronous grpc_lb_policy_pick call. Unref the RR policy. */
@@ -688,9 +687,9 @@
       gpr_log(GPR_INFO, "[grpclb %p] Unreffing RR %p", glb_policy,
               wc_arg->rr_policy);
     }
-    GRPC_LB_POLICY_UNREF(exec_ctx, wc_arg->rr_policy, "glb_pick_sync");
+    GRPC_LB_POLICY_UNREF(wc_arg->rr_policy, "glb_pick_sync");
     /* add the load reporting initial metadata */
-    initial_metadata_add_lb_token(exec_ctx, pick_args->initial_metadata,
+    initial_metadata_add_lb_token(pick_args->initial_metadata,
                                   pick_args->lb_token_mdelem_storage,
                                   GRPC_MDELEM_REF(wc_arg->lb_token));
     // Pass on client stats via context. Passes ownership of the reference.
@@ -699,7 +698,7 @@
     wc_arg->context[GRPC_GRPCLB_CLIENT_STATS].destroy = destroy_client_stats;
     if (force_async) {
       GPR_ASSERT(wc_arg->wrapped_closure != nullptr);
-      GRPC_CLOSURE_SCHED(exec_ctx, wc_arg->wrapped_closure, GRPC_ERROR_NONE);
+      GRPC_CLOSURE_SCHED(wc_arg->wrapped_closure, GRPC_ERROR_NONE);
       gpr_free(wc_arg->free_when_done);
       return false;
     }
@@ -712,12 +711,11 @@
   return pick_done;
 }
 
-static grpc_lb_policy_args* lb_policy_args_create(grpc_exec_ctx* exec_ctx,
-                                                  glb_lb_policy* glb_policy) {
+static grpc_lb_policy_args* lb_policy_args_create(glb_lb_policy* glb_policy) {
   grpc_lb_addresses* addresses;
   if (glb_policy->serverlist != nullptr) {
     GPR_ASSERT(glb_policy->serverlist->num_servers > 0);
-    addresses = process_serverlist_locked(exec_ctx, glb_policy->serverlist);
+    addresses = process_serverlist_locked(glb_policy->serverlist);
   } else {
     // If rr_handover_locked() is invoked when we haven't received any
     // serverlist from the balancer, we use the fallback backends returned by
@@ -737,24 +735,21 @@
   args->args = grpc_channel_args_copy_and_add_and_remove(
       glb_policy->args, keys_to_remove, GPR_ARRAY_SIZE(keys_to_remove), &arg,
       1);
-  grpc_lb_addresses_destroy(exec_ctx, addresses);
+  grpc_lb_addresses_destroy(addresses);
   return args;
 }
 
-static void lb_policy_args_destroy(grpc_exec_ctx* exec_ctx,
-                                   grpc_lb_policy_args* args) {
-  grpc_channel_args_destroy(exec_ctx, args->args);
+static void lb_policy_args_destroy(grpc_lb_policy_args* args) {
+  grpc_channel_args_destroy(args->args);
   gpr_free(args);
 }
 
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
-                                               void* arg, grpc_error* error);
-static void create_rr_locked(grpc_exec_ctx* exec_ctx, glb_lb_policy* glb_policy,
+static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error);
+static void create_rr_locked(glb_lb_policy* glb_policy,
                              grpc_lb_policy_args* args) {
   GPR_ASSERT(glb_policy->rr_policy == nullptr);
 
-  grpc_lb_policy* new_rr_policy =
-      grpc_lb_policy_create(exec_ctx, "round_robin", args);
+  grpc_lb_policy* new_rr_policy = grpc_lb_policy_create("round_robin", args);
   if (new_rr_policy == nullptr) {
     gpr_log(GPR_ERROR,
             "[grpclb %p] Failure creating a RoundRobin policy for serverlist "
@@ -767,21 +762,19 @@
     return;
   }
   grpc_lb_policy_set_reresolve_closure_locked(
-      exec_ctx, new_rr_policy, glb_policy->base.request_reresolution);
+      new_rr_policy, glb_policy->base.request_reresolution);
   glb_policy->base.request_reresolution = nullptr;
   glb_policy->rr_policy = new_rr_policy;
   grpc_error* rr_state_error = nullptr;
   const grpc_connectivity_state rr_state =
-      grpc_lb_policy_check_connectivity_locked(exec_ctx, glb_policy->rr_policy,
+      grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
                                                &rr_state_error);
   /* Connectivity state is a function of the RR policy updated/created */
-  update_lb_connectivity_status_locked(exec_ctx, glb_policy, rr_state,
-                                       rr_state_error);
+  update_lb_connectivity_status_locked(glb_policy, rr_state, rr_state_error);
   /* Add the gRPC LB's interested_parties pollset_set to that of the newly
    * created RR policy. This will make the RR policy progress upon activity on
    * gRPC LB, which in turn is tied to the application's call */
-  grpc_pollset_set_add_pollset_set(exec_ctx,
-                                   glb_policy->rr_policy->interested_parties,
+  grpc_pollset_set_add_pollset_set(glb_policy->rr_policy->interested_parties,
                                    glb_policy->base.interested_parties);
 
   /* Allocate the data for the tracking of the new RR policy's connectivity.
@@ -796,10 +789,10 @@
 
   /* Subscribe to changes to the connectivity of the new RR */
   GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "glb_rr_connectivity_cb");
-  grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
+  grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
                                                &rr_connectivity->state,
                                                &rr_connectivity->on_change);
-  grpc_lb_policy_exit_idle_locked(exec_ctx, glb_policy->rr_policy);
+  grpc_lb_policy_exit_idle_locked(glb_policy->rr_policy);
 
   /* Update picks and pings in wait */
   pending_pick* pp;
@@ -814,7 +807,7 @@
               "[grpclb %p] Pending pick about to (async) PICK from RR %p",
               glb_policy, glb_policy->rr_policy);
     }
-    pick_from_internal_rr_locked(exec_ctx, glb_policy, &pp->pick_args,
+    pick_from_internal_rr_locked(glb_policy, &pp->pick_args,
                                  true /* force_async */, pp->target,
                                  &pp->wrapped_on_complete_arg);
   }
@@ -828,40 +821,37 @@
       gpr_log(GPR_INFO, "[grpclb %p] Pending ping about to PING from RR %p",
               glb_policy, glb_policy->rr_policy);
     }
-    grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy,
+    grpc_lb_policy_ping_one_locked(glb_policy->rr_policy,
                                    &pping->wrapped_notify_arg.wrapper_closure);
   }
 }
 
-/* glb_policy->rr_policy may be NULL (initial handover) */
-static void rr_handover_locked(grpc_exec_ctx* exec_ctx,
-                               glb_lb_policy* glb_policy) {
+/* glb_policy->rr_policy may be nullptr (initial handover) */
+static void rr_handover_locked(glb_lb_policy* glb_policy) {
   if (glb_policy->shutting_down) return;
-  grpc_lb_policy_args* args = lb_policy_args_create(exec_ctx, glb_policy);
+  grpc_lb_policy_args* args = lb_policy_args_create(glb_policy);
   GPR_ASSERT(args != nullptr);
   if (glb_policy->rr_policy != nullptr) {
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_DEBUG, "[grpclb %p] Updating RR policy %p", glb_policy,
               glb_policy->rr_policy);
     }
-    grpc_lb_policy_update_locked(exec_ctx, glb_policy->rr_policy, args);
+    grpc_lb_policy_update_locked(glb_policy->rr_policy, args);
   } else {
-    create_rr_locked(exec_ctx, glb_policy, args);
+    create_rr_locked(glb_policy, args);
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_DEBUG, "[grpclb %p] Created new RR policy %p", glb_policy,
               glb_policy->rr_policy);
     }
   }
-  lb_policy_args_destroy(exec_ctx, args);
+  lb_policy_args_destroy(args);
 }
 
-static void glb_rr_connectivity_changed_locked(grpc_exec_ctx* exec_ctx,
-                                               void* arg, grpc_error* error) {
+static void glb_rr_connectivity_changed_locked(void* arg, grpc_error* error) {
   rr_connectivity_data* rr_connectivity = (rr_connectivity_data*)arg;
   glb_lb_policy* glb_policy = rr_connectivity->glb_policy;
   if (glb_policy->shutting_down) {
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "glb_rr_connectivity_cb");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
     gpr_free(rr_connectivity);
     return;
   }
@@ -869,25 +859,22 @@
     /* An RR policy that has transitioned into the SHUTDOWN connectivity state
      * should not be considered for picks or updates: the SHUTDOWN state is a
      * sink, policies can't transition back from it. .*/
-    GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy,
-                         "rr_connectivity_shutdown");
+    GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "rr_connectivity_shutdown");
     glb_policy->rr_policy = nullptr;
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "glb_rr_connectivity_cb");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "glb_rr_connectivity_cb");
     gpr_free(rr_connectivity);
     return;
   }
   /* rr state != SHUTDOWN && !glb_policy->shutting down: biz as usual */
-  update_lb_connectivity_status_locked(
-      exec_ctx, glb_policy, rr_connectivity->state, GRPC_ERROR_REF(error));
+  update_lb_connectivity_status_locked(glb_policy, rr_connectivity->state,
+                                       GRPC_ERROR_REF(error));
   /* Resubscribe. Reuse the "glb_rr_connectivity_cb" weak ref. */
-  grpc_lb_policy_notify_on_state_change_locked(exec_ctx, glb_policy->rr_policy,
+  grpc_lb_policy_notify_on_state_change_locked(glb_policy->rr_policy,
                                                &rr_connectivity->state,
                                                &rr_connectivity->on_change);
 }
 
-static void destroy_balancer_name(grpc_exec_ctx* exec_ctx,
-                                  void* balancer_name) {
+static void destroy_balancer_name(void* balancer_name) {
   gpr_free(balancer_name);
 }
 
@@ -914,7 +901,7 @@
  *   above the grpclb policy.
  *   - \a args: other args inherited from the grpclb policy. */
 static grpc_channel_args* build_lb_channel_args(
-    grpc_exec_ctx* exec_ctx, const grpc_lb_addresses* addresses,
+    const grpc_lb_addresses* addresses,
     grpc_fake_resolver_response_generator* response_generator,
     const grpc_channel_args* args) {
   size_t num_grpclb_addrs = 0;
@@ -957,7 +944,7 @@
   gpr_free(targets_info_entries);
 
   grpc_channel_args* lb_channel_args =
-      grpc_lb_policy_grpclb_build_lb_channel_args(exec_ctx, targets_info,
+      grpc_lb_policy_grpclb_build_lb_channel_args(targets_info,
                                                   response_generator, args);
 
   grpc_arg lb_channel_addresses_arg =
@@ -965,34 +952,34 @@
 
   grpc_channel_args* result = grpc_channel_args_copy_and_add(
       lb_channel_args, &lb_channel_addresses_arg, 1);
-  grpc_slice_hash_table_unref(exec_ctx, targets_info);
-  grpc_channel_args_destroy(exec_ctx, lb_channel_args);
-  grpc_lb_addresses_destroy(exec_ctx, lb_addresses);
+  grpc_slice_hash_table_unref(targets_info);
+  grpc_channel_args_destroy(lb_channel_args);
+  grpc_lb_addresses_destroy(lb_addresses);
   return result;
 }
 
-static void glb_destroy(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void glb_destroy(grpc_lb_policy* pol) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   GPR_ASSERT(glb_policy->pending_picks == nullptr);
   GPR_ASSERT(glb_policy->pending_pings == nullptr);
   gpr_free((void*)glb_policy->server_name);
-  grpc_channel_args_destroy(exec_ctx, glb_policy->args);
+  grpc_channel_args_destroy(glb_policy->args);
   if (glb_policy->client_stats != nullptr) {
     grpc_grpclb_client_stats_unref(glb_policy->client_stats);
   }
-  grpc_connectivity_state_destroy(exec_ctx, &glb_policy->state_tracker);
+  grpc_connectivity_state_destroy(&glb_policy->state_tracker);
   if (glb_policy->serverlist != nullptr) {
     grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
   }
   if (glb_policy->fallback_backend_addresses != nullptr) {
-    grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+    grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
   }
   grpc_fake_resolver_response_generator_unref(glb_policy->response_generator);
   grpc_subchannel_index_unref();
   gpr_free(glb_policy);
 }
 
-static void glb_shutdown_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void glb_shutdown_locked(grpc_lb_policy* pol) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   grpc_error* error = GRPC_ERROR_CREATE_FROM_STATIC_STRING("Channel shutdown");
   glb_policy->shutting_down = true;
@@ -1011,11 +998,11 @@
     /* lb_on_server_status_received will pick up the cancel and clean up */
   }
   if (glb_policy->retry_timer_active) {
-    grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+    grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
     glb_policy->retry_timer_active = false;
   }
   if (glb_policy->fallback_timer_active) {
-    grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+    grpc_timer_cancel(&glb_policy->lb_fallback_timer);
     glb_policy->fallback_timer_active = false;
   }
 
@@ -1024,10 +1011,9 @@
   pending_ping* pping = glb_policy->pending_pings;
   glb_policy->pending_pings = nullptr;
   if (glb_policy->rr_policy != nullptr) {
-    GRPC_LB_POLICY_UNREF(exec_ctx, glb_policy->rr_policy, "glb_shutdown");
+    GRPC_LB_POLICY_UNREF(glb_policy->rr_policy, "glb_shutdown");
   } else {
-    grpc_lb_policy_try_reresolve(exec_ctx, pol, &grpc_lb_glb_trace,
-                                 GRPC_ERROR_CANCELLED);
+    grpc_lb_policy_try_reresolve(pol, &grpc_lb_glb_trace, GRPC_ERROR_CANCELLED);
   }
   // We destroy the LB channel here because
   // glb_lb_channel_on_connectivity_changed_cb needs a valid glb_policy
@@ -1037,14 +1023,13 @@
     grpc_channel_destroy(glb_policy->lb_channel);
     glb_policy->lb_channel = nullptr;
   }
-  grpc_connectivity_state_set(exec_ctx, &glb_policy->state_tracker,
-                              GRPC_CHANNEL_SHUTDOWN, GRPC_ERROR_REF(error),
-                              "glb_shutdown");
+  grpc_connectivity_state_set(&glb_policy->state_tracker, GRPC_CHANNEL_SHUTDOWN,
+                              GRPC_ERROR_REF(error), "glb_shutdown");
 
   while (pp != nullptr) {
     pending_pick* next = pp->next;
     *pp->target = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+    GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
                        GRPC_ERROR_REF(error));
     gpr_free(pp);
     pp = next;
@@ -1052,7 +1037,7 @@
 
   while (pping != nullptr) {
     pending_ping* next = pping->next;
-    GRPC_CLOSURE_SCHED(exec_ctx, &pping->wrapped_notify_arg.wrapper_closure,
+    GRPC_CLOSURE_SCHED(&pping->wrapped_notify_arg.wrapper_closure,
                        GRPC_ERROR_REF(error));
     gpr_free(pping);
     pping = next;
@@ -1069,8 +1054,8 @@
 //   pick needs also be cancelled by the RR instance.
 // - Otherwise, without an RR instance, picks stay pending at this policy's
 //   level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
-//   we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+//   we invoke the completion closure and set *target to nullptr right here.
+static void glb_cancel_pick_locked(grpc_lb_policy* pol,
                                    grpc_connected_subchannel** target,
                                    grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
@@ -1080,7 +1065,7 @@
     pending_pick* next = pp->next;
     if (pp->target == target) {
       *target = nullptr;
-      GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+      GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick Cancelled", &error, 1));
     } else {
@@ -1090,7 +1075,7 @@
     pp = next;
   }
   if (glb_policy->rr_policy != nullptr) {
-    grpc_lb_policy_cancel_pick_locked(exec_ctx, glb_policy->rr_policy, target,
+    grpc_lb_policy_cancel_pick_locked(glb_policy->rr_policy, target,
                                       GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
@@ -1105,9 +1090,8 @@
 //   pick needs also be cancelled by the RR instance.
 // - Otherwise, without an RR instance, picks stay pending at this policy's
 //   level (grpclb), inside the glb_policy->pending_picks list. To cancel these,
-//   we invoke the completion closure and set *target to NULL right here.
-static void glb_cancel_picks_locked(grpc_exec_ctx* exec_ctx,
-                                    grpc_lb_policy* pol,
+//   we invoke the completion closure and set *target to nullptr right here.
+static void glb_cancel_picks_locked(grpc_lb_policy* pol,
                                     uint32_t initial_metadata_flags_mask,
                                     uint32_t initial_metadata_flags_eq,
                                     grpc_error* error) {
@@ -1118,7 +1102,7 @@
     pending_pick* next = pp->next;
     if ((pp->pick_args.initial_metadata_flags & initial_metadata_flags_mask) ==
         initial_metadata_flags_eq) {
-      GRPC_CLOSURE_SCHED(exec_ctx, &pp->wrapped_on_complete_arg.wrapper_closure,
+      GRPC_CLOSURE_SCHED(&pp->wrapped_on_complete_arg.wrapper_closure,
                          GRPC_ERROR_CREATE_REFERENCING_FROM_STATIC_STRING(
                              "Pick Cancelled", &error, 1));
     } else {
@@ -1129,52 +1113,49 @@
   }
   if (glb_policy->rr_policy != nullptr) {
     grpc_lb_policy_cancel_picks_locked(
-        exec_ctx, glb_policy->rr_policy, initial_metadata_flags_mask,
+        glb_policy->rr_policy, initial_metadata_flags_mask,
         initial_metadata_flags_eq, GRPC_ERROR_REF(error));
   }
   GRPC_ERROR_UNREF(error);
 }
 
-static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error);
-static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
-                                      glb_lb_policy* glb_policy);
-static void start_picking_locked(grpc_exec_ctx* exec_ctx,
-                                 glb_lb_policy* glb_policy) {
+static void lb_on_fallback_timer_locked(void* arg, grpc_error* error);
+static void query_for_backends_locked(glb_lb_policy* glb_policy);
+static void start_picking_locked(glb_lb_policy* glb_policy) {
   /* start a timer to fall back */
   if (glb_policy->lb_fallback_timeout_ms > 0 &&
       glb_policy->serverlist == nullptr && !glb_policy->fallback_timer_active) {
     grpc_millis deadline =
-        grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_fallback_timeout_ms;
+        grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_fallback_timeout_ms;
     GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "grpclb_fallback_timer");
     GRPC_CLOSURE_INIT(&glb_policy->lb_on_fallback, lb_on_fallback_timer_locked,
                       glb_policy,
                       grpc_combiner_scheduler(glb_policy->base.combiner));
     glb_policy->fallback_timer_active = true;
-    grpc_timer_init(exec_ctx, &glb_policy->lb_fallback_timer, deadline,
+    grpc_timer_init(&glb_policy->lb_fallback_timer, deadline,
                     &glb_policy->lb_on_fallback);
   }
 
   glb_policy->started_picking = true;
   grpc_backoff_reset(&glb_policy->lb_call_backoff_state);
-  query_for_backends_locked(exec_ctx, glb_policy);
+  query_for_backends_locked(glb_policy);
 }
 
-static void glb_exit_idle_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol) {
+static void glb_exit_idle_locked(grpc_lb_policy* pol) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   if (!glb_policy->started_picking) {
-    start_picking_locked(exec_ctx, glb_policy);
+    start_picking_locked(glb_policy);
   }
 }
 
-static int glb_pick_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
+static int glb_pick_locked(grpc_lb_policy* pol,
                            const grpc_lb_policy_pick_args* pick_args,
                            grpc_connected_subchannel** target,
                            grpc_call_context_element* context, void** user_data,
                            grpc_closure* on_complete) {
   if (pick_args->lb_token_mdelem_storage == nullptr) {
     *target = nullptr;
-    GRPC_CLOSURE_SCHED(exec_ctx, on_complete,
+    GRPC_CLOSURE_SCHED(on_complete,
                        GRPC_ERROR_CREATE_FROM_STATIC_STRING(
                            "No mdelem storage for the LB token. Load reporting "
                            "won't work without it. Failing"));
@@ -1184,8 +1165,8 @@
   bool pick_done = false;
   if (glb_policy->rr_policy != nullptr) {
     const grpc_connectivity_state rr_connectivity_state =
-        grpc_lb_policy_check_connectivity_locked(
-            exec_ctx, glb_policy->rr_policy, nullptr);
+        grpc_lb_policy_check_connectivity_locked(glb_policy->rr_policy,
+                                                 nullptr);
     // The glb_policy->rr_policy may have transitioned to SHUTDOWN but the
     // callback registered to capture this event
     // (glb_rr_connectivity_changed_locked) may not have been invoked yet. We
@@ -1222,9 +1203,8 @@
       wc_arg->initial_metadata = pick_args->initial_metadata;
       wc_arg->free_when_done = wc_arg;
       wc_arg->glb_policy = pol;
-      pick_done =
-          pick_from_internal_rr_locked(exec_ctx, glb_policy, pick_args,
-                                       false /* force_async */, target, wc_arg);
+      pick_done = pick_from_internal_rr_locked(
+          glb_policy, pick_args, false /* force_async */, target, wc_arg);
     }
   } else {  // glb_policy->rr_policy == NULL
     if (grpc_lb_glb_trace.enabled()) {
@@ -1235,7 +1215,7 @@
     add_pending_pick(&glb_policy->pending_picks, pick_args, target, context,
                      on_complete);
     if (!glb_policy->started_picking) {
-      start_picking_locked(exec_ctx, glb_policy);
+      start_picking_locked(glb_policy);
     }
     pick_done = false;
   }
@@ -1243,37 +1223,33 @@
 }
 
 static grpc_connectivity_state glb_check_connectivity_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
-    grpc_error** connectivity_error) {
+    grpc_lb_policy* pol, grpc_error** connectivity_error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   return grpc_connectivity_state_get(&glb_policy->state_tracker,
                                      connectivity_error);
 }
 
-static void glb_ping_one_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* pol,
-                                grpc_closure* closure) {
+static void glb_ping_one_locked(grpc_lb_policy* pol, grpc_closure* closure) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
   if (glb_policy->rr_policy) {
-    grpc_lb_policy_ping_one_locked(exec_ctx, glb_policy->rr_policy, closure);
+    grpc_lb_policy_ping_one_locked(glb_policy->rr_policy, closure);
   } else {
     add_pending_ping(&glb_policy->pending_pings, closure);
     if (!glb_policy->started_picking) {
-      start_picking_locked(exec_ctx, glb_policy);
+      start_picking_locked(glb_policy);
     }
   }
 }
 
-static void glb_notify_on_state_change_locked(grpc_exec_ctx* exec_ctx,
-                                              grpc_lb_policy* pol,
+static void glb_notify_on_state_change_locked(grpc_lb_policy* pol,
                                               grpc_connectivity_state* current,
                                               grpc_closure* notify) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)pol;
-  grpc_connectivity_state_notify_on_state_change(
-      exec_ctx, &glb_policy->state_tracker, current, notify);
+  grpc_connectivity_state_notify_on_state_change(&glb_policy->state_tracker,
+                                                 current, notify);
 }
 
-static void lb_call_on_retry_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                          grpc_error* error) {
+static void lb_call_on_retry_timer_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   glb_policy->retry_timer_active = false;
   if (!glb_policy->shutting_down && glb_policy->lb_call == nullptr &&
@@ -1281,28 +1257,26 @@
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_INFO, "[grpclb %p] Restarting call to LB server", glb_policy);
     }
-    query_for_backends_locked(exec_ctx, glb_policy);
+    query_for_backends_locked(glb_policy);
   }
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base, "grpclb_retry_timer");
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_retry_timer");
 }
 
-static void maybe_restart_lb_call(grpc_exec_ctx* exec_ctx,
-                                  glb_lb_policy* glb_policy) {
+static void maybe_restart_lb_call(glb_lb_policy* glb_policy) {
   if (glb_policy->started_picking && glb_policy->updating_lb_call) {
     if (glb_policy->retry_timer_active) {
-      grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+      grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
     }
-    if (!glb_policy->shutting_down) start_picking_locked(exec_ctx, glb_policy);
+    if (!glb_policy->shutting_down) start_picking_locked(glb_policy);
     glb_policy->updating_lb_call = false;
   } else if (!glb_policy->shutting_down) {
     /* if we aren't shutting down, restart the LB client call after some time */
-    grpc_millis next_try =
-        grpc_backoff_step(exec_ctx, &glb_policy->lb_call_backoff_state)
-            .next_attempt_start_time;
+    grpc_millis next_try = grpc_backoff_step(&glb_policy->lb_call_backoff_state)
+                               .next_attempt_start_time;
     if (grpc_lb_glb_trace.enabled()) {
       gpr_log(GPR_DEBUG, "[grpclb %p] Connection to LB server lost...",
               glb_policy);
-      grpc_millis timeout = next_try - grpc_exec_ctx_now(exec_ctx);
+      grpc_millis timeout = next_try - grpc_core::ExecCtx::Get()->Now();
       if (timeout > 0) {
         gpr_log(GPR_DEBUG,
                 "[grpclb %p] ... retry_timer_active in %" PRIuPTR "ms.",
@@ -1317,43 +1291,40 @@
                       lb_call_on_retry_timer_locked, glb_policy,
                       grpc_combiner_scheduler(glb_policy->base.combiner));
     glb_policy->retry_timer_active = true;
-    grpc_timer_init(exec_ctx, &glb_policy->lb_call_retry_timer, next_try,
+    grpc_timer_init(&glb_policy->lb_call_retry_timer, next_try,
                     &glb_policy->lb_on_call_retry);
   }
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                             "lb_on_server_status_received_locked");
 }
 
-static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error);
+static void send_client_load_report_locked(void* arg, grpc_error* error);
 
-static void schedule_next_client_load_report(grpc_exec_ctx* exec_ctx,
-                                             glb_lb_policy* glb_policy) {
+static void schedule_next_client_load_report(glb_lb_policy* glb_policy) {
   const grpc_millis next_client_load_report_time =
-      grpc_exec_ctx_now(exec_ctx) + glb_policy->client_stats_report_interval;
+      grpc_core::ExecCtx::Get()->Now() +
+      glb_policy->client_stats_report_interval;
   GRPC_CLOSURE_INIT(&glb_policy->client_load_report_closure,
                     send_client_load_report_locked, glb_policy,
                     grpc_combiner_scheduler(glb_policy->base.combiner));
-  grpc_timer_init(exec_ctx, &glb_policy->client_load_report_timer,
+  grpc_timer_init(&glb_policy->client_load_report_timer,
                   next_client_load_report_time,
                   &glb_policy->client_load_report_closure);
 }
 
-static void client_load_report_done_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void client_load_report_done_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   grpc_byte_buffer_destroy(glb_policy->client_load_report_payload);
   glb_policy->client_load_report_payload = nullptr;
   if (error != GRPC_ERROR_NONE || glb_policy->lb_call == nullptr) {
     glb_policy->client_load_report_timer_pending = false;
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "client_load_report");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
     if (glb_policy->lb_call == nullptr) {
-      maybe_restart_lb_call(exec_ctx, glb_policy);
+      maybe_restart_lb_call(glb_policy);
     }
     return;
   }
-  schedule_next_client_load_report(exec_ctx, glb_policy);
+  schedule_next_client_load_report(glb_policy);
 }
 
 static bool load_report_counters_are_zero(grpc_grpclb_request* request) {
@@ -1368,15 +1339,13 @@
          (drop_entries == nullptr || drop_entries->num_entries == 0);
 }
 
-static void send_client_load_report_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void send_client_load_report_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   if (error == GRPC_ERROR_CANCELLED || glb_policy->lb_call == nullptr) {
     glb_policy->client_load_report_timer_pending = false;
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                              "client_load_report");
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "client_load_report");
     if (glb_policy->lb_call == nullptr) {
-      maybe_restart_lb_call(exec_ctx, glb_policy);
+      maybe_restart_lb_call(glb_policy);
     }
     return;
   }
@@ -1389,7 +1358,7 @@
   if (load_report_counters_are_zero(request)) {
     if (glb_policy->last_client_load_report_counters_were_zero) {
       grpc_grpclb_request_destroy(request);
-      schedule_next_client_load_report(exec_ctx, glb_policy);
+      schedule_next_client_load_report(glb_policy);
       return;
     }
     glb_policy->last_client_load_report_counters_were_zero = true;
@@ -1399,7 +1368,7 @@
   grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
   glb_policy->client_load_report_payload =
       grpc_raw_byte_buffer_create(&request_payload_slice, 1);
-  grpc_slice_unref_internal(exec_ctx, request_payload_slice);
+  grpc_slice_unref_internal(request_payload_slice);
   grpc_grpclb_request_destroy(request);
   // Send load report message.
   grpc_op op;
@@ -1410,20 +1379,16 @@
                     client_load_report_done_locked, glb_policy,
                     grpc_combiner_scheduler(glb_policy->base.combiner));
   grpc_call_error call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, &op, 1,
-      &glb_policy->client_load_report_closure);
+      glb_policy->lb_call, &op, 1, &glb_policy->client_load_report_closure);
   if (call_error != GRPC_CALL_OK) {
     gpr_log(GPR_ERROR, "[grpclb %p] call_error=%d", glb_policy, call_error);
     GPR_ASSERT(GRPC_CALL_OK == call_error);
   }
 }
 
-static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
-                                                void* arg, grpc_error* error);
-static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error);
-static void lb_call_init_locked(grpc_exec_ctx* exec_ctx,
-                                glb_lb_policy* glb_policy) {
+static void lb_on_server_status_received_locked(void* arg, grpc_error* error);
+static void lb_on_response_received_locked(void* arg, grpc_error* error);
+static void lb_call_init_locked(glb_lb_policy* glb_policy) {
   GPR_ASSERT(glb_policy->server_name != nullptr);
   GPR_ASSERT(glb_policy->server_name[0] != '\0');
   GPR_ASSERT(glb_policy->lb_call == nullptr);
@@ -1436,13 +1401,13 @@
   grpc_millis deadline =
       glb_policy->lb_call_timeout_ms == 0
           ? GRPC_MILLIS_INF_FUTURE
-          : grpc_exec_ctx_now(exec_ctx) + glb_policy->lb_call_timeout_ms;
+          : grpc_core::ExecCtx::Get()->Now() + glb_policy->lb_call_timeout_ms;
   glb_policy->lb_call = grpc_channel_create_pollset_set_call(
-      exec_ctx, glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
+      glb_policy->lb_channel, nullptr, GRPC_PROPAGATE_DEFAULTS,
       glb_policy->base.interested_parties,
       GRPC_MDSTR_SLASH_GRPC_DOT_LB_DOT_V1_DOT_LOADBALANCER_SLASH_BALANCELOAD,
       &host, deadline, nullptr);
-  grpc_slice_unref_internal(exec_ctx, host);
+  grpc_slice_unref_internal(host);
 
   if (glb_policy->client_stats != nullptr) {
     grpc_grpclb_client_stats_unref(glb_policy->client_stats);
@@ -1457,7 +1422,7 @@
   grpc_slice request_payload_slice = grpc_grpclb_request_encode(request);
   glb_policy->lb_request_payload =
       grpc_raw_byte_buffer_create(&request_payload_slice, 1);
-  grpc_slice_unref_internal(exec_ctx, request_payload_slice);
+  grpc_slice_unref_internal(request_payload_slice);
   grpc_grpclb_request_destroy(request);
 
   GRPC_CLOSURE_INIT(&glb_policy->lb_on_server_status_received,
@@ -1478,8 +1443,7 @@
   glb_policy->last_client_load_report_counters_were_zero = false;
 }
 
-static void lb_call_destroy_locked(grpc_exec_ctx* exec_ctx,
-                                   glb_lb_policy* glb_policy) {
+static void lb_call_destroy_locked(glb_lb_policy* glb_policy) {
   GPR_ASSERT(glb_policy->lb_call != nullptr);
   grpc_call_unref(glb_policy->lb_call);
   glb_policy->lb_call = nullptr;
@@ -1488,22 +1452,21 @@
   grpc_metadata_array_destroy(&glb_policy->lb_trailing_metadata_recv);
 
   grpc_byte_buffer_destroy(glb_policy->lb_request_payload);
-  grpc_slice_unref_internal(exec_ctx, glb_policy->lb_call_status_details);
+  grpc_slice_unref_internal(glb_policy->lb_call_status_details);
 
   if (glb_policy->client_load_report_timer_pending) {
-    grpc_timer_cancel(exec_ctx, &glb_policy->client_load_report_timer);
+    grpc_timer_cancel(&glb_policy->client_load_report_timer);
   }
 }
 
 /*
  * Auxiliary functions and LB client callbacks.
  */
-static void query_for_backends_locked(grpc_exec_ctx* exec_ctx,
-                                      glb_lb_policy* glb_policy) {
+static void query_for_backends_locked(glb_lb_policy* glb_policy) {
   GPR_ASSERT(glb_policy->lb_channel != nullptr);
   if (glb_policy->shutting_down) return;
 
-  lb_call_init_locked(exec_ctx, glb_policy);
+  lb_call_init_locked(glb_policy);
 
   if (grpc_lb_glb_trace.enabled()) {
     gpr_log(GPR_INFO,
@@ -1534,8 +1497,8 @@
   op->flags = 0;
   op->reserved = nullptr;
   op++;
-  call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops), nullptr);
+  call_error = grpc_call_start_batch_and_execute(glb_policy->lb_call, ops,
+                                                 (size_t)(op - ops), nullptr);
   GPR_ASSERT(GRPC_CALL_OK == call_error);
 
   op = ops;
@@ -1553,7 +1516,7 @@
   GRPC_LB_POLICY_WEAK_REF(&glb_policy->base,
                           "lb_on_server_status_received_locked");
   call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+      glb_policy->lb_call, ops, (size_t)(op - ops),
       &glb_policy->lb_on_server_status_received);
   GPR_ASSERT(GRPC_CALL_OK == call_error);
 
@@ -1567,13 +1530,12 @@
    * lb_on_response_received_locked */
   GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "lb_on_response_received_locked");
   call_error = grpc_call_start_batch_and_execute(
-      exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+      glb_policy->lb_call, ops, (size_t)(op - ops),
       &glb_policy->lb_on_response_received);
   GPR_ASSERT(GRPC_CALL_OK == call_error);
 }
 
-static void lb_on_response_received_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                           grpc_error* error) {
+static void lb_on_response_received_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   grpc_op ops[2];
   memset(ops, 0, sizeof(ops));
@@ -1607,7 +1569,7 @@
          * send_client_load_report_locked() */
         glb_policy->client_load_report_timer_pending = true;
         GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "client_load_report");
-        schedule_next_client_load_report(exec_ctx, glb_policy);
+        schedule_next_client_load_report(glb_policy);
       } else if (grpc_lb_glb_trace.enabled()) {
         gpr_log(GPR_INFO,
                 "[grpclb %p] Received initial LB response message; client load "
@@ -1652,11 +1614,10 @@
               grpc_grpclb_destroy_serverlist(glb_policy->serverlist);
             } else {
               /* or dispose of the fallback */
-              grpc_lb_addresses_destroy(exec_ctx,
-                                        glb_policy->fallback_backend_addresses);
+              grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
               glb_policy->fallback_backend_addresses = nullptr;
               if (glb_policy->fallback_timer_active) {
-                grpc_timer_cancel(exec_ctx, &glb_policy->lb_fallback_timer);
+                grpc_timer_cancel(&glb_policy->lb_fallback_timer);
                 glb_policy->fallback_timer_active = false;
               }
             }
@@ -1665,7 +1626,7 @@
              * update or in glb_destroy() */
             glb_policy->serverlist = serverlist;
             glb_policy->serverlist_index = 0;
-            rr_handover_locked(exec_ctx, glb_policy);
+            rr_handover_locked(glb_policy);
           }
         } else {
           if (grpc_lb_glb_trace.enabled()) {
@@ -1675,14 +1636,14 @@
           }
           grpc_grpclb_destroy_serverlist(serverlist);
         }
-      } else { /* serverlist == NULL */
+      } else { /* serverlist == nullptr */
         gpr_log(GPR_ERROR,
                 "[grpclb %p] Invalid LB response received: '%s'. Ignoring.",
                 glb_policy,
                 grpc_dump_slice(response_slice, GPR_DUMP_ASCII | GPR_DUMP_HEX));
       }
     }
-    grpc_slice_unref_internal(exec_ctx, response_slice);
+    grpc_slice_unref_internal(response_slice);
     if (!glb_policy->shutting_down) {
       /* keep listening for serverlist updates */
       op->op = GRPC_OP_RECV_MESSAGE;
@@ -1693,23 +1654,22 @@
       /* reuse the "lb_on_response_received_locked" weak ref taken in
        * query_for_backends_locked() */
       const grpc_call_error call_error = grpc_call_start_batch_and_execute(
-          exec_ctx, glb_policy->lb_call, ops, (size_t)(op - ops),
+          glb_policy->lb_call, ops, (size_t)(op - ops),
           &glb_policy->lb_on_response_received); /* loop */
       GPR_ASSERT(GRPC_CALL_OK == call_error);
     } else {
-      GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+      GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                                 "lb_on_response_received_locked_shutdown");
     }
   } else { /* empty payload: call cancelled. */
            /* dispose of the "lb_on_response_received_locked" weak ref taken in
             * query_for_backends_locked() and reused in every reception loop */
-    GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+    GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                               "lb_on_response_received_locked_empty_payload");
   }
 }
 
-static void lb_on_fallback_timer_locked(grpc_exec_ctx* exec_ctx, void* arg,
-                                        grpc_error* error) {
+static void lb_on_fallback_timer_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   glb_policy->fallback_timer_active = false;
   /* If we receive a serverlist after the timer fires but before this callback
@@ -1722,15 +1682,13 @@
                 glb_policy);
       }
       GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
-      rr_handover_locked(exec_ctx, glb_policy);
+      rr_handover_locked(glb_policy);
     }
   }
-  GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
-                            "grpclb_fallback_timer");
+  GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base, "grpclb_fallback_timer");
 }
 
-static void lb_on_server_status_received_locked(grpc_exec_ctx* exec_ctx,
-                                                void* arg, grpc_error* error) {
+static void lb_on_server_status_received_locked(void* arg, grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   GPR_ASSERT(glb_policy->lb_call != nullptr);
   if (grpc_lb_glb_trace.enabled()) {
@@ -1744,29 +1702,28 @@
     gpr_free(status_details);
   }
   /* We need to perform cleanups no matter what. */
-  lb_call_destroy_locked(exec_ctx, glb_policy);
+  lb_call_destroy_locked(glb_policy);
   // If the load report timer is still pending, we wait for it to be
   // called before restarting the call.  Otherwise, we restart the call
   // here.
   if (!glb_policy->client_load_report_timer_pending) {
-    maybe_restart_lb_call(exec_ctx, glb_policy);
+    maybe_restart_lb_call(glb_policy);
   }
 }
 
-static void fallback_update_locked(grpc_exec_ctx* exec_ctx,
-                                   glb_lb_policy* glb_policy,
+static void fallback_update_locked(glb_lb_policy* glb_policy,
                                    const grpc_lb_addresses* addresses) {
   GPR_ASSERT(glb_policy->fallback_backend_addresses != nullptr);
-  grpc_lb_addresses_destroy(exec_ctx, glb_policy->fallback_backend_addresses);
+  grpc_lb_addresses_destroy(glb_policy->fallback_backend_addresses);
   glb_policy->fallback_backend_addresses =
-      extract_backend_addresses_locked(exec_ctx, addresses);
+      extract_backend_addresses_locked(addresses);
   if (glb_policy->lb_fallback_timeout_ms > 0 &&
       glb_policy->rr_policy != nullptr) {
-    rr_handover_locked(exec_ctx, glb_policy);
+    rr_handover_locked(glb_policy);
   }
 }
 
-static void glb_update_locked(grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
+static void glb_update_locked(grpc_lb_policy* policy,
                               const grpc_lb_policy_args* args) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
   const grpc_arg* arg =
@@ -1776,7 +1733,7 @@
       // If we don't have a current channel to the LB, go into TRANSIENT
       // FAILURE.
       grpc_connectivity_state_set(
-          exec_ctx, &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
+          &glb_policy->state_tracker, GRPC_CHANNEL_TRANSIENT_FAILURE,
           GRPC_ERROR_CREATE_FROM_STATIC_STRING("Missing update in args"),
           "glb_update_missing");
     } else {
@@ -1793,16 +1750,16 @@
   // If a non-empty serverlist hasn't been received from the balancer,
   // propagate the update to fallback_backend_addresses.
   if (glb_policy->serverlist == nullptr) {
-    fallback_update_locked(exec_ctx, glb_policy, addresses);
+    fallback_update_locked(glb_policy, addresses);
   }
   GPR_ASSERT(glb_policy->lb_channel != nullptr);
   // Propagate updates to the LB channel (pick_first) through the fake
   // resolver.
   grpc_channel_args* lb_channel_args = build_lb_channel_args(
-      exec_ctx, addresses, glb_policy->response_generator, args->args);
+      addresses, glb_policy->response_generator, args->args);
   grpc_fake_resolver_response_generator_set_response(
-      exec_ctx, glb_policy->response_generator, lb_channel_args);
-  grpc_channel_args_destroy(exec_ctx, lb_channel_args);
+      glb_policy->response_generator, lb_channel_args);
+  grpc_channel_args_destroy(lb_channel_args);
   // Start watching the LB channel connectivity for connection, if not
   // already doing so.
   if (!glb_policy->watching_lb_channel) {
@@ -1814,7 +1771,7 @@
     glb_policy->watching_lb_channel = true;
     GRPC_LB_POLICY_WEAK_REF(&glb_policy->base, "watch_lb_channel_connectivity");
     grpc_client_channel_watch_connectivity_state(
-        exec_ctx, client_channel_elem,
+        client_channel_elem,
         grpc_polling_entity_create_from_pollset_set(
             glb_policy->base.interested_parties),
         &glb_policy->lb_channel_connectivity,
@@ -1825,8 +1782,7 @@
 // Invoked as part of the update process. It continues watching the LB channel
 // until it shuts down or becomes READY. It's invoked even if the LB channel
 // stayed READY throughout the update (for example if the update is identical).
-static void glb_lb_channel_on_connectivity_changed_cb(grpc_exec_ctx* exec_ctx,
-                                                      void* arg,
+static void glb_lb_channel_on_connectivity_changed_cb(void* arg,
                                                       grpc_error* error) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)arg;
   if (glb_policy->shutting_down) goto done;
@@ -1842,7 +1798,7 @@
               grpc_channel_get_channel_stack(glb_policy->lb_channel));
       GPR_ASSERT(client_channel_elem->filter == &grpc_client_channel_filter);
       grpc_client_channel_watch_connectivity_state(
-          exec_ctx, client_channel_elem,
+          client_channel_elem,
           grpc_polling_entity_create_from_pollset_set(
               glb_policy->base.interested_parties),
           &glb_policy->lb_channel_connectivity,
@@ -1861,29 +1817,28 @@
         // lb_call.
       } else if (glb_policy->started_picking) {
         if (glb_policy->retry_timer_active) {
-          grpc_timer_cancel(exec_ctx, &glb_policy->lb_call_retry_timer);
+          grpc_timer_cancel(&glb_policy->lb_call_retry_timer);
           glb_policy->retry_timer_active = false;
         }
-        start_picking_locked(exec_ctx, glb_policy);
+        start_picking_locked(glb_policy);
       }
     /* fallthrough */
     case GRPC_CHANNEL_SHUTDOWN:
     done:
       glb_policy->watching_lb_channel = false;
-      GRPC_LB_POLICY_WEAK_UNREF(exec_ctx, &glb_policy->base,
+      GRPC_LB_POLICY_WEAK_UNREF(&glb_policy->base,
                                 "watch_lb_channel_connectivity_cb_shutdown");
       break;
   }
 }
 
 static void glb_set_reresolve_closure_locked(
-    grpc_exec_ctx* exec_ctx, grpc_lb_policy* policy,
-    grpc_closure* request_reresolution) {
+    grpc_lb_policy* policy, grpc_closure* request_reresolution) {
   glb_lb_policy* glb_policy = (glb_lb_policy*)policy;
   GPR_ASSERT(!glb_policy->shutting_down);
   GPR_ASSERT(glb_policy->base.request_reresolution == nullptr);
   if (glb_policy->rr_policy != nullptr) {
-    grpc_lb_policy_set_reresolve_closure_locked(exec_ctx, glb_policy->rr_policy,
+    grpc_lb_policy_set_reresolve_closure_locked(glb_policy->rr_policy,
                                                 request_reresolution);
   } else {
     glb_policy->base.request_reresolution = request_reresolution;
@@ -1904,8 +1859,7 @@
     glb_update_locked,
     glb_set_reresolve_closure_locked};
 
-static grpc_lb_policy* glb_create(grpc_exec_ctx* exec_ctx,
-                                  grpc_lb_policy_factory* factory,
+static grpc_lb_policy* glb_create(grpc_lb_policy_factory* factory,
                                   grpc_lb_policy_args* args) {
   /* Count the number of gRPC-LB addresses. There must be at least one. */
   const grpc_arg* arg =
@@ -1926,7 +1880,7 @@
   arg = grpc_channel_args_find(args->args, GRPC_ARG_SERVER_URI);
   GPR_ASSERT(arg != nullptr);
   GPR_ASSERT(arg->type == GRPC_ARG_STRING);
-  grpc_uri* uri = grpc_uri_parse(exec_ctx, arg->value.string, true);
+  grpc_uri* uri = grpc_uri_parse(arg->value.string, true);
   GPR_ASSERT(uri->path[0] != '\0');
   glb_policy->server_name =
       gpr_strdup(uri->path[0] == '/' ? uri->path + 1 : uri->path);
@@ -1959,26 +1913,26 @@
   /* Extract the backend addresses (may be empty) from the resolver for
    * fallback. */
   glb_policy->fallback_backend_addresses =
-      extract_backend_addresses_locked(exec_ctx, addresses);
+      extract_backend_addresses_locked(addresses);
 
   /* Create a client channel over them to communicate with a LB service */
   glb_policy->response_generator =
       grpc_fake_resolver_response_generator_create();
   grpc_channel_args* lb_channel_args = build_lb_channel_args(
-      exec_ctx, addresses, glb_policy->response_generator, args->args);
+      addresses, glb_policy->response_generator, args->args);
   char* uri_str;
   gpr_asprintf(&uri_str, "fake:///%s", glb_policy->server_name);
   glb_policy->lb_channel = grpc_lb_policy_grpclb_create_lb_channel(
-      exec_ctx, uri_str, args->client_channel_factory, lb_channel_args);
+      uri_str, args->client_channel_factory, lb_channel_args);
 
   /* Propagate initial resolution */
   grpc_fake_resolver_response_generator_set_response(
-      exec_ctx, glb_policy->response_generator, lb_channel_args);
-  grpc_channel_args_destroy(exec_ctx, lb_channel_args);
+      glb_policy->response_generator, lb_channel_args);
+  grpc_channel_args_destroy(lb_channel_args);
   gpr_free(uri_str);
   if (glb_policy->lb_channel == nullptr) {
     gpr_free((void*)glb_policy->server_name);
-    grpc_channel_args_destroy(exec_ctx, glb_policy->args);
+    grpc_channel_args_destroy(glb_policy->args);
     gpr_free(glb_policy);
     return nullptr;
   }
@@ -2009,7 +1963,7 @@
 
 // Only add client_load_reporting filter if the grpclb LB policy is used.
 static bool maybe_add_client_load_reporting_filter(
-    grpc_exec_ctx* exec_ctx, grpc_channel_stack_builder* builder, void* arg) {
+    grpc_channel_stack_builder* builder, void* arg) {
   const grpc_channel_args* args =
       grpc_channel_stack_builder_get_channel_arguments(builder);
   const grpc_arg* channel_arg =