Revert "Revert "All instances of exec_ctx being passed around in src/core removed""
diff --git a/src/core/lib/iomgr/ev_epoll1_linux.cc b/src/core/lib/iomgr/ev_epoll1_linux.cc
index 0dda1d9..d9e8a30 100644
--- a/src/core/lib/iomgr/ev_epoll1_linux.cc
+++ b/src/core/lib/iomgr/ev_epoll1_linux.cc
@@ -299,31 +299,29 @@
 /* if 'releasing_fd' is true, it means that we are going to detach the internal
  * fd from grpc_fd structure (i.e which means we should not be calling
  * shutdown() syscall on that fd) */
-static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                                 grpc_error* why, bool releasing_fd) {
-  if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
+static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
+                                 bool releasing_fd) {
+  if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
     if (!releasing_fd) {
       shutdown(fd->fd, SHUT_RDWR);
     }
-    fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
+    fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
   }
   GRPC_ERROR_UNREF(why);
 }
 
 /* Might be called multiple times */
-static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
-  fd_shutdown_internal(exec_ctx, fd, why, false);
+static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
+  fd_shutdown_internal(fd, why, false);
 }
 
-static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                      grpc_closure* on_done, int* release_fd,
+static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
                       bool already_closed, const char* reason) {
   grpc_error* error = GRPC_ERROR_NONE;
   bool is_release_fd = (release_fd != nullptr);
 
   if (!fd->read_closure->IsShutdown()) {
-    fd_shutdown_internal(exec_ctx, fd,
-                         GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
+    fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
                          is_release_fd);
   }
 
@@ -335,7 +333,7 @@
     close(fd->fd);
   }
 
-  GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
+  GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
 
   grpc_iomgr_unregister_object(&fd->iomgr_object);
   fd->read_closure->DestroyEvent();
@@ -347,8 +345,7 @@
   gpr_mu_unlock(&fd_freelist_mu);
 }
 
-static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
-                                                  grpc_fd* fd) {
+static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
   gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
   return (grpc_pollset*)notifier;
 }
@@ -357,26 +354,21 @@
   return fd->read_closure->IsShutdown();
 }
 
-static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                              grpc_closure* closure) {
-  fd->read_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
+  fd->read_closure->NotifyOn(closure);
 }
 
-static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_closure* closure) {
-  fd->write_closure->NotifyOn(exec_ctx, closure);
+static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
+  fd->write_closure->NotifyOn(closure);
 }
 
-static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
-                               grpc_pollset* notifier) {
-  fd->read_closure->SetReady(exec_ctx);
+static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
+  fd->read_closure->SetReady();
   /* Use release store to match with acquire load in fd_get_read_notifier */
   gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
 }
 
-static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
-  fd->write_closure->SetReady(exec_ctx);
-}
+static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
 
 /*******************************************************************************
  * Pollset Definitions
@@ -479,7 +471,7 @@
   pollset->next = pollset->prev = nullptr;
 }
 
-static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
+static void pollset_destroy(grpc_pollset* pollset) {
   gpr_mu_lock(&pollset->mu);
   if (!pollset->seen_inactive) {
     pollset_neighborhood* neighborhood = pollset->neighborhood;
@@ -507,27 +499,26 @@
   gpr_mu_destroy(&pollset->mu);
 }
 
-static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset* pollset) {
+static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
   GPR_TIMER_BEGIN("pollset_kick_all", 0);
   grpc_error* error = GRPC_ERROR_NONE;
   if (pollset->root_worker != nullptr) {
     grpc_pollset_worker* worker = pollset->root_worker;
     do {
-      GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK();
       switch (worker->state) {
         case KICKED:
-          GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+          GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
           break;
         case UNKICKED:
           SET_KICK_STATE(worker, KICKED);
           if (worker->initialized_cv) {
-            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
             gpr_cv_signal(&worker->cv);
           }
           break;
         case DESIGNATED_POLLER:
-          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
           SET_KICK_STATE(worker, KICKED);
           append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
                        "pollset_kick_all");
@@ -543,32 +534,29 @@
   return error;
 }
 
-static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
-                                          grpc_pollset* pollset) {
+static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
   if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
       pollset->begin_refs == 0) {
     GPR_TIMER_MARK("pollset_finish_shutdown", 0);
-    GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
+    GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
     pollset->shutdown_closure = nullptr;
   }
 }
 
-static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                             grpc_closure* closure) {
+static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
   GPR_TIMER_BEGIN("pollset_shutdown", 0);
   GPR_ASSERT(pollset->shutdown_closure == nullptr);
   GPR_ASSERT(!pollset->shutting_down);
   pollset->shutdown_closure = closure;
   pollset->shutting_down = true;
-  GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
-  pollset_maybe_finish_shutdown(exec_ctx, pollset);
+  GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
+  pollset_maybe_finish_shutdown(pollset);
   GPR_TIMER_END("pollset_shutdown", 0);
 }
 
-static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
-                                           grpc_millis millis) {
+static int poll_deadline_to_millis_timeout(grpc_millis millis) {
   if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
-  grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
+  grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
   if (delta > INT_MAX) {
     return INT_MAX;
   } else if (delta < 0) {
@@ -586,8 +574,7 @@
    NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
    called by g_active_poller thread. So there is no need for synchronization
    when accessing fields in g_epoll_set */
-static grpc_error* process_epoll_events(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset* pollset) {
+static grpc_error* process_epoll_events(grpc_pollset* pollset) {
   static const char* err_desc = "process_events";
   grpc_error* error = GRPC_ERROR_NONE;
 
@@ -611,11 +598,11 @@
       bool write_ev = (ev->events & EPOLLOUT) != 0;
 
       if (read_ev || cancel) {
-        fd_become_readable(exec_ctx, fd, pollset);
+        fd_become_readable(fd, pollset);
       }
 
       if (write_ev || cancel) {
-        fd_become_writable(exec_ctx, fd);
+        fd_become_writable(fd);
       }
     }
   }
@@ -631,27 +618,26 @@
    NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
    (i.e the designated poller thread) will be calling this function. So there is
    no need for any synchronization when accesing fields in g_epoll_set */
-static grpc_error* do_epoll_wait(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
-                                 grpc_millis deadline) {
+static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
   GPR_TIMER_BEGIN("do_epoll_wait", 0);
 
   int r;
-  int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
+  int timeout = poll_deadline_to_millis_timeout(deadline);
   if (timeout != 0) {
     GRPC_SCHEDULING_START_BLOCKING_REGION;
   }
   do {
-    GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
+    GRPC_STATS_INC_SYSCALL_POLL();
     r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
                    timeout);
   } while (r < 0 && errno == EINTR);
   if (timeout != 0) {
-    GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
+    GRPC_SCHEDULING_END_BLOCKING_REGION;
   }
 
   if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
 
-  GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
+  GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
 
   if (grpc_polling_trace.enabled()) {
     gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
@@ -664,8 +650,7 @@
   return GRPC_ERROR_NONE;
 }
 
-static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                         grpc_pollset_worker* worker,
+static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
                          grpc_pollset_worker** worker_hdl,
                          grpc_millis deadline) {
   GPR_TIMER_BEGIN("begin_worker", 0);
@@ -760,7 +745,7 @@
         SET_KICK_STATE(worker, KICKED);
       }
     }
-    grpc_exec_ctx_invalidate_now(exec_ctx);
+    grpc_core::ExecCtx::Get()->InvalidateNow();
   }
 
   if (grpc_polling_trace.enabled()) {
@@ -791,7 +776,7 @@
 }
 
 static bool check_neighborhood_for_available_poller(
-    grpc_exec_ctx* exec_ctx, pollset_neighborhood* neighborhood) {
+    pollset_neighborhood* neighborhood) {
   GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
   bool found_worker = false;
   do {
@@ -815,7 +800,7 @@
               SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
               if (inspect_worker->initialized_cv) {
                 GPR_TIMER_MARK("signal worker", 0);
-                GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+                GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
                 gpr_cv_signal(&inspect_worker->cv);
               }
             } else {
@@ -855,8 +840,7 @@
   return found_worker;
 }
 
-static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                       grpc_pollset_worker* worker,
+static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
                        grpc_pollset_worker** worker_hdl) {
   GPR_TIMER_BEGIN("end_worker", 0);
   if (grpc_polling_trace.enabled()) {
@@ -866,7 +850,7 @@
   /* Make sure we appear kicked */
   SET_KICK_STATE(worker, KICKED);
   grpc_closure_list_move(&worker->schedule_on_end_work,
-                         &exec_ctx->closure_list);
+                         grpc_core::ExecCtx::Get()->closure_list());
   if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
     if (worker->next != worker && worker->next->state == UNKICKED) {
       if (grpc_polling_trace.enabled()) {
@@ -875,11 +859,11 @@
       GPR_ASSERT(worker->next->initialized_cv);
       gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
       SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
-      GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
       gpr_cv_signal(&worker->next->cv);
-      if (grpc_exec_ctx_has_work(exec_ctx)) {
+      if (grpc_core::ExecCtx::Get()->HasWork()) {
         gpr_mu_unlock(&pollset->mu);
-        grpc_exec_ctx_flush(exec_ctx);
+        grpc_core::ExecCtx::Get()->Flush();
         gpr_mu_lock(&pollset->mu);
       }
     } else {
@@ -894,8 +878,7 @@
             &g_neighborhoods[(poller_neighborhood_idx + i) %
                              g_num_neighborhoods];
         if (gpr_mu_trylock(&neighborhood->mu)) {
-          found_worker =
-              check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+          found_worker = check_neighborhood_for_available_poller(neighborhood);
           gpr_mu_unlock(&neighborhood->mu);
           scan_state[i] = true;
         } else {
@@ -908,16 +891,15 @@
             &g_neighborhoods[(poller_neighborhood_idx + i) %
                              g_num_neighborhoods];
         gpr_mu_lock(&neighborhood->mu);
-        found_worker =
-            check_neighborhood_for_available_poller(exec_ctx, neighborhood);
+        found_worker = check_neighborhood_for_available_poller(neighborhood);
         gpr_mu_unlock(&neighborhood->mu);
       }
-      grpc_exec_ctx_flush(exec_ctx);
+      grpc_core::ExecCtx::Get()->Flush();
       gpr_mu_lock(&pollset->mu);
     }
-  } else if (grpc_exec_ctx_has_work(exec_ctx)) {
+  } else if (grpc_core::ExecCtx::Get()->HasWork()) {
     gpr_mu_unlock(&pollset->mu);
-    grpc_exec_ctx_flush(exec_ctx);
+    grpc_core::ExecCtx::Get()->Flush();
     gpr_mu_lock(&pollset->mu);
   }
   if (worker->initialized_cv) {
@@ -927,7 +909,7 @@
     gpr_log(GPR_DEBUG, " .. remove worker");
   }
   if (EMPTIED == worker_remove(pollset, worker)) {
-    pollset_maybe_finish_shutdown(exec_ctx, pollset);
+    pollset_maybe_finish_shutdown(pollset);
   }
   GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
   GPR_TIMER_END("end_worker", 0);
@@ -937,7 +919,7 @@
    The function pollset_work() may temporarily release the lock (pollset->po.mu)
    during the course of its execution but it will always re-acquire the lock and
    ensure that it is held by the time the function returns */
-static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
+static grpc_error* pollset_work(grpc_pollset* ps,
                                 grpc_pollset_worker** worker_hdl,
                                 grpc_millis deadline) {
   grpc_pollset_worker worker;
@@ -950,7 +932,7 @@
     return GRPC_ERROR_NONE;
   }
 
-  if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
+  if (begin_worker(ps, &worker, worker_hdl, deadline)) {
     gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
     gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
     GPR_ASSERT(!ps->shutting_down);
@@ -968,14 +950,14 @@
 
        process_epoll_events() returns very quickly: It just queues the work on
        exec_ctx but does not execute it (the actual exectution or more
-       accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
-       a designated poller). So we are not waiting long periods without a
-       designated poller */
+       accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
+       AFTER selecting a designated poller). So we are not waiting long periods
+       without a designated poller */
     if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
         gpr_atm_acq_load(&g_epoll_set.num_events)) {
-      append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
+      append_error(&error, do_epoll_wait(ps, deadline), err_desc);
     }
-    append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
+    append_error(&error, process_epoll_events(ps), err_desc);
 
     gpr_mu_lock(&ps->mu); /* lock */
 
@@ -983,17 +965,17 @@
   } else {
     gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
   }
-  end_worker(exec_ctx, ps, &worker, worker_hdl);
+  end_worker(ps, &worker, worker_hdl);
 
   gpr_tls_set(&g_current_thread_pollset, 0);
   GPR_TIMER_END("pollset_work", 0);
   return error;
 }
 
-static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
+static grpc_error* pollset_kick(grpc_pollset* pollset,
                                 grpc_pollset_worker* specific_worker) {
   GPR_TIMER_BEGIN("pollset_kick", 0);
-  GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
+  GRPC_STATS_INC_POLLSET_KICK();
   grpc_error* ret_err = GRPC_ERROR_NONE;
   if (grpc_polling_trace.enabled()) {
     gpr_strvec log;
@@ -1026,7 +1008,7 @@
     if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
       grpc_pollset_worker* root_worker = pollset->root_worker;
       if (root_worker == nullptr) {
-        GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
         pollset->kicked_without_poller = true;
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. kicked_without_poller");
@@ -1035,14 +1017,14 @@
       }
       grpc_pollset_worker* next_worker = root_worker->next;
       if (root_worker->state == KICKED) {
-        GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
         }
         SET_KICK_STATE(root_worker, KICKED);
         goto done;
       } else if (next_worker->state == KICKED) {
-        GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
         }
@@ -1053,7 +1035,7 @@
                                      // there is no next worker
                  root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
                                     &g_active_poller)) {
-        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
         }
@@ -1061,7 +1043,7 @@
         ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
         goto done;
       } else if (next_worker->state == UNKICKED) {
-        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
         if (grpc_polling_trace.enabled()) {
           gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
         }
@@ -1079,12 +1061,12 @@
           }
           SET_KICK_STATE(root_worker, KICKED);
           if (root_worker->initialized_cv) {
-            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+            GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
             gpr_cv_signal(&root_worker->cv);
           }
           goto done;
         } else {
-          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+          GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
           if (grpc_polling_trace.enabled()) {
             gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
                     root_worker);
@@ -1094,13 +1076,13 @@
           goto done;
         }
       } else {
-        GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+        GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
         GPR_ASSERT(next_worker->state == KICKED);
         SET_KICK_STATE(next_worker, KICKED);
         goto done;
       }
     } else {
-      GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+      GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
       if (grpc_polling_trace.enabled()) {
         gpr_log(GPR_ERROR, " .. kicked while waking up");
       }
@@ -1117,7 +1099,7 @@
     goto done;
   } else if (gpr_tls_get(&g_current_thread_worker) ==
              (intptr_t)specific_worker) {
-    GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
     }
@@ -1125,7 +1107,7 @@
     goto done;
   } else if (specific_worker ==
              (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
-    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. kick active poller");
     }
@@ -1133,7 +1115,7 @@
     ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
     goto done;
   } else if (specific_worker->initialized_cv) {
-    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. kick waiting worker");
     }
@@ -1141,7 +1123,7 @@
     gpr_cv_signal(&specific_worker->cv);
     goto done;
   } else {
-    GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
+    GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
     if (grpc_polling_trace.enabled()) {
       gpr_log(GPR_ERROR, " .. kick non-waiting worker");
     }
@@ -1153,8 +1135,7 @@
   return ret_err;
 }
 
-static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
-                           grpc_fd* fd) {}
+static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
 
 /*******************************************************************************
  * Pollset-set Definitions
@@ -1164,27 +1145,20 @@
   return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
 }
 
-static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
-                                grpc_pollset_set* pss) {}
+static void pollset_set_destroy(grpc_pollset_set* pss) {}
 
-static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {}
+static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
 
-static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
-                               grpc_fd* fd) {}
+static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
 
-static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {}
+static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
 
-static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
-                                    grpc_pollset_set* pss, grpc_pollset* ps) {}
+static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
 
-static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {}
 
-static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
-                                        grpc_pollset_set* bag,
+static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
                                         grpc_pollset_set* item) {}
 
 /*******************************************************************************
@@ -1260,7 +1234,7 @@
 const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
   gpr_log(GPR_ERROR,
           "Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
-  return NULL;
+  return nullptr;
 }
 #endif /* defined(GRPC_POSIX_SOCKET) */
 #endif /* !defined(GRPC_LINUX_EPOLL) */