Merge pull request #6542 from ctiller/ecterm

Let execution contexts signal that they are done
diff --git a/src/core/lib/iomgr/exec_ctx.c b/src/core/lib/iomgr/exec_ctx.c
index 2146c7d..e451479 100644
--- a/src/core/lib/iomgr/exec_ctx.c
+++ b/src/core/lib/iomgr/exec_ctx.c
@@ -39,6 +39,22 @@
 
 #include "src/core/lib/profiling/timers.h"
 
+bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx) {
+  if (!exec_ctx->cached_ready_to_finish) {
+    exec_ctx->cached_ready_to_finish = exec_ctx->check_ready_to_finish(
+        exec_ctx, exec_ctx->check_ready_to_finish_arg);
+  }
+  return exec_ctx->cached_ready_to_finish;
+}
+
+bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
+  return false;
+}
+
+bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored) {
+  return true;
+}
+
 #ifndef GRPC_EXECUTION_CONTEXT_SANITIZER
 bool grpc_exec_ctx_flush(grpc_exec_ctx *exec_ctx) {
   bool did_something = 0;
@@ -61,6 +77,7 @@
 }
 
 void grpc_exec_ctx_finish(grpc_exec_ctx *exec_ctx) {
+  exec_ctx->cached_ready_to_finish = true;
   grpc_exec_ctx_flush(exec_ctx);
 }
 
diff --git a/src/core/lib/iomgr/exec_ctx.h b/src/core/lib/iomgr/exec_ctx.h
index 976cc40..9d47a26 100644
--- a/src/core/lib/iomgr/exec_ctx.h
+++ b/src/core/lib/iomgr/exec_ctx.h
@@ -53,6 +53,9 @@
  *  - track a list of work that needs to be delayed until the top of the
  *    call stack (this provides a convenient mechanism to run callbacks
  *    without worrying about locking issues)
+ *  - provide a decision maker (via grpc_exec_ctx_ready_to_finish) that provides
+ *    signal as to whether a borrowed thread should continue to do work or
+ *    should actively try to finish up and get this thread back to its owner
  *
  *  CONVENTIONS:
  *  Instance of this must ALWAYS be constructed on the stack, never
@@ -63,18 +66,26 @@
  */
 struct grpc_exec_ctx {
   grpc_closure_list closure_list;
+  bool cached_ready_to_finish;
+  void *check_ready_to_finish_arg;
+  bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
 };
 
-#define GRPC_EXEC_CTX_INIT \
-  { GRPC_CLOSURE_LIST_INIT }
+#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
+  { GRPC_CLOSURE_LIST_INIT, false, finish_check_arg, finish_check }
 #else
 struct grpc_exec_ctx {
-  int unused;
+  bool cached_ready_to_finish;
+  void *check_ready_to_finish_arg;
+  bool (*check_ready_to_finish)(grpc_exec_ctx *exec_ctx, void *arg);
 };
-#define GRPC_EXEC_CTX_INIT \
-  { 0 }
+#define GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(finish_check, finish_check_arg) \
+  { false, finish_check_arg, finish_check }
 #endif
 
+#define GRPC_EXEC_CTX_INIT \
+  GRPC_EXEC_CTX_INIT_WITH_FINISH_CHECK(grpc_never_ready_to_finish, NULL)
+
 /** Flush any work that has been enqueued onto this grpc_exec_ctx.
  *  Caller must guarantee that no interfering locks are held.
  *  Returns true if work was performed, false otherwise. */
@@ -86,6 +97,14 @@
 void grpc_exec_ctx_enqueue(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
                            bool success,
                            grpc_workqueue *offload_target_or_null);
+/** Returns true if we'd like to leave this execution context as soon as
+    possible: useful for deciding whether to do something more or not depending
+    on outside context */
+bool grpc_exec_ctx_ready_to_finish(grpc_exec_ctx *exec_ctx);
+/** A finish check that is never ready to finish */
+bool grpc_never_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
+/** A finish check that is always ready to finish */
+bool grpc_always_ready_to_finish(grpc_exec_ctx *exec_ctx, void *arg_ignored);
 /** Add a list of closures to be executed at the next flush/finish point.
  *  Leaves \a list empty. */
 void grpc_exec_ctx_enqueue_list(grpc_exec_ctx *exec_ctx,