David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 3 | * Copyright 2015 gRPC authors. |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 4 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 8 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 10 | * |
Jan Tattermusch | 7897ae9 | 2017-06-07 22:57:36 +0200 | [diff] [blame] | 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 16 | * |
| 17 | */ |
| 18 | |
Alexander Polcyn | db3e898 | 2018-02-21 16:59:24 -0800 | [diff] [blame] | 19 | #include <grpc/support/port_platform.h> |
| 20 | |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 21 | #include "src/core/lib/iomgr/executor.h" |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 22 | |
| 23 | #include <string.h> |
| 24 | |
| 25 | #include <grpc/support/alloc.h> |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 26 | #include <grpc/support/cpu.h> |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 27 | #include <grpc/support/log.h> |
| 28 | #include <grpc/support/sync.h> |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 29 | |
Craig Tiller | 57bb9a9 | 2017-08-31 16:44:15 -0700 | [diff] [blame] | 30 | #include "src/core/lib/debug/stats.h" |
Vijay Pai | b6cf123 | 2018-01-25 21:02:26 -0800 | [diff] [blame] | 31 | #include "src/core/lib/gpr/tls.h" |
Vijay Pai | d4d0a30 | 2018-01-25 13:24:03 -0800 | [diff] [blame] | 32 | #include "src/core/lib/gpr/useful.h" |
Sree Kuchibhotla | 37e4990 | 2018-07-11 18:46:29 -0700 | [diff] [blame] | 33 | #include "src/core/lib/gprpp/memory.h" |
Craig Tiller | 9533d04 | 2016-03-25 17:11:06 -0700 | [diff] [blame] | 34 | #include "src/core/lib/iomgr/exec_ctx.h" |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 35 | |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 36 | #define MAX_DEPTH 2 |
| 37 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 38 | #define EXECUTOR_TRACE(format, ...) \ |
| 39 | if (executor_trace.enabled()) { \ |
| 40 | gpr_log(GPR_INFO, "EXECUTOR " format, __VA_ARGS__); \ |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 41 | } |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 42 | |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 43 | #define EXECUTOR_TRACE0(str) \ |
| 44 | if (executor_trace.enabled()) { \ |
| 45 | gpr_log(GPR_INFO, "EXECUTOR " str); \ |
| 46 | } |
| 47 | |
ncteisen | 72afb76 | 2017-11-10 12:23:12 -0800 | [diff] [blame] | 48 | grpc_core::TraceFlag executor_trace(false, "executor"); |
Craig Tiller | af723b0 | 2017-07-17 17:56:28 -0700 | [diff] [blame] | 49 | |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 50 | GPR_TLS_DECL(g_this_thread_state); |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 51 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 52 | GrpcExecutor::GrpcExecutor(const char* name) : name_(name) { |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 53 | adding_thread_lock_ = GPR_SPINLOCK_STATIC_INITIALIZER; |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 54 | gpr_atm_rel_store(&num_threads_, 0); |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 55 | max_threads_ = GPR_MAX(1, 2 * gpr_cpu_num_cores()); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | void GrpcExecutor::Init() { SetThreading(true); } |
| 59 | |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 60 | size_t GrpcExecutor::RunClosures(const char* executor_name, |
| 61 | grpc_closure_list list) { |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 62 | size_t n = 0; |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 63 | |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 64 | grpc_closure* c = list.head; |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 65 | while (c != nullptr) { |
Craig Tiller | baa14a9 | 2017-11-03 09:09:36 -0700 | [diff] [blame] | 66 | grpc_closure* next = c->next_data.next; |
| 67 | grpc_error* error = c->error_data.error; |
Craig Tiller | af723b0 | 2017-07-17 17:56:28 -0700 | [diff] [blame] | 68 | #ifndef NDEBUG |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 69 | EXECUTOR_TRACE("(%s) run %p [created by %s:%d]", executor_name, c, |
| 70 | c->file_created, c->line_created); |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 71 | c->scheduled = false; |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 72 | #else |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 73 | EXECUTOR_TRACE("(%s) run %p", executor_name, c); |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 74 | #endif |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 75 | c->cb(c->cb_arg, error); |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 76 | GRPC_ERROR_UNREF(error); |
| 77 | c = next; |
| 78 | n++; |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 79 | grpc_core::ExecCtx::Get()->Flush(); |
Craig Tiller | 061ef74 | 2016-12-29 10:54:09 -0800 | [diff] [blame] | 80 | } |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 81 | |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 82 | return n; |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 83 | } |
| 84 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 85 | bool GrpcExecutor::IsThreaded() const { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 86 | return gpr_atm_acq_load(&num_threads_) > 0; |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 87 | } |
| 88 | |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 89 | void GrpcExecutor::SetThreading(bool threading) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 90 | gpr_atm curr_num_threads = gpr_atm_acq_load(&num_threads_); |
| 91 | EXECUTOR_TRACE("(%s) SetThreading(%d) begin", name_, threading); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 92 | |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 93 | if (threading) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 94 | if (curr_num_threads > 0) { |
| 95 | EXECUTOR_TRACE("(%s) SetThreading(true). curr_num_threads == 0", name_); |
| 96 | return; |
| 97 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 98 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 99 | GPR_ASSERT(num_threads_ == 0); |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 100 | gpr_atm_rel_store(&num_threads_, 1); |
Sree Kuchibhotla | 8cc3a00 | 2018-07-10 13:32:35 -0700 | [diff] [blame] | 101 | gpr_tls_init(&g_this_thread_state); |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 102 | thd_state_ = static_cast<ThreadState*>( |
| 103 | gpr_zalloc(sizeof(ThreadState) * max_threads_)); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 104 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 105 | for (size_t i = 0; i < max_threads_; i++) { |
| 106 | gpr_mu_init(&thd_state_[i].mu); |
| 107 | gpr_cv_init(&thd_state_[i].cv); |
| 108 | thd_state_[i].id = i; |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 109 | thd_state_[i].name = name_; |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 110 | thd_state_[i].thd = grpc_core::Thread(); |
| 111 | thd_state_[i].elems = GRPC_CLOSURE_LIST_INIT; |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 112 | } |
| 113 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 114 | thd_state_[0].thd = |
| 115 | grpc_core::Thread(name_, &GrpcExecutor::ThreadMain, &thd_state_[0]); |
| 116 | thd_state_[0].thd.Start(); |
Sree Kuchibhotla | 02872df | 2018-07-10 14:21:51 -0700 | [diff] [blame] | 117 | } else { // !threading |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 118 | if (curr_num_threads == 0) { |
| 119 | EXECUTOR_TRACE("(%s) SetThreading(false). curr_num_threads == 0", name_); |
| 120 | return; |
| 121 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 122 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 123 | for (size_t i = 0; i < max_threads_; i++) { |
| 124 | gpr_mu_lock(&thd_state_[i].mu); |
| 125 | thd_state_[i].shutdown = true; |
| 126 | gpr_cv_signal(&thd_state_[i].cv); |
| 127 | gpr_mu_unlock(&thd_state_[i].mu); |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 128 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 129 | |
| 130 | /* Ensure no thread is adding a new thread. Once this is past, then no |
| 131 | * thread will try to add a new one either (since shutdown is true) */ |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 132 | gpr_spinlock_lock(&adding_thread_lock_); |
| 133 | gpr_spinlock_unlock(&adding_thread_lock_); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 134 | |
Sree Kuchibhotla | 7b8a6b6 | 2018-07-11 11:51:46 -0700 | [diff] [blame] | 135 | curr_num_threads = gpr_atm_no_barrier_load(&num_threads_); |
| 136 | for (gpr_atm i = 0; i < curr_num_threads; i++) { |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 137 | thd_state_[i].thd.Join(); |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 138 | EXECUTOR_TRACE("(%s) Thread %" PRIdPTR " of %" PRIdPTR " joined", name_, |
| 139 | i + 1, curr_num_threads); |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 140 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 141 | |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 142 | gpr_atm_rel_store(&num_threads_, 0); |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 143 | for (size_t i = 0; i < max_threads_; i++) { |
| 144 | gpr_mu_destroy(&thd_state_[i].mu); |
| 145 | gpr_cv_destroy(&thd_state_[i].cv); |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 146 | RunClosures(thd_state_[i].name, thd_state_[i].elems); |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 147 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 148 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 149 | gpr_free(thd_state_); |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 150 | gpr_tls_destroy(&g_this_thread_state); |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 151 | } |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 152 | |
| 153 | EXECUTOR_TRACE("(%s) SetThreading(%d) done", name_, threading); |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 154 | } |
| 155 | |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 156 | void GrpcExecutor::Shutdown() { SetThreading(false); } |
Craig Tiller | 5e56f00 | 2017-05-16 15:02:50 -0700 | [diff] [blame] | 157 | |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 158 | void GrpcExecutor::ThreadMain(void* arg) { |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 159 | ThreadState* ts = static_cast<ThreadState*>(arg); |
Sree Kuchibhotla | 02872df | 2018-07-10 14:21:51 -0700 | [diff] [blame] | 160 | gpr_tls_set(&g_this_thread_state, reinterpret_cast<intptr_t>(ts)); |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 161 | |
kpayson64 | 46a6059 | 2018-06-20 15:18:38 -0700 | [diff] [blame] | 162 | grpc_core::ExecCtx exec_ctx(GRPC_EXEC_CTX_FLAG_IS_INTERNAL_THREAD); |
Craig Tiller | 8996208 | 2017-05-12 14:30:42 -0700 | [diff] [blame] | 163 | |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 164 | size_t subtract_depth = 0; |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 165 | for (;;) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 166 | EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: step (sub_depth=%" PRIdPTR ")", |
| 167 | ts->name, ts->id, subtract_depth); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 168 | |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 169 | gpr_mu_lock(&ts->mu); |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 170 | ts->depth -= subtract_depth; |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 171 | // Wait for closures to be enqueued or for the executor to be shutdown |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 172 | while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { |
Craig Tiller | 1ab56d8 | 2017-07-19 09:55:57 -0700 | [diff] [blame] | 173 | ts->queued_long_job = false; |
Sree Kuchibhotla | 54961bb | 2017-12-04 12:50:27 -0800 | [diff] [blame] | 174 | gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_MONOTONIC)); |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 175 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 176 | |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 177 | if (ts->shutdown) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 178 | EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: shutdown", ts->name, ts->id); |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 179 | gpr_mu_unlock(&ts->mu); |
| 180 | break; |
| 181 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 182 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 183 | GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 184 | grpc_closure_list closures = ts->elems; |
Yash Tibrewal | 37fdb73 | 2017-09-25 16:45:02 -0700 | [diff] [blame] | 185 | ts->elems = GRPC_CLOSURE_LIST_INIT; |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 186 | gpr_mu_unlock(&ts->mu); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 187 | |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 188 | EXECUTOR_TRACE("(%s) [%" PRIdPTR "]: execute", ts->name, ts->id); |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 189 | |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 190 | grpc_core::ExecCtx::Get()->InvalidateNow(); |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 191 | subtract_depth = RunClosures(ts->name, closures); |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 192 | } |
| 193 | } |
| 194 | |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 195 | void GrpcExecutor::Enqueue(grpc_closure* closure, grpc_error* error, |
| 196 | bool is_short) { |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 197 | bool retry_push; |
Craig Tiller | 07d2fa7 | 2017-09-07 13:13:36 -0700 | [diff] [blame] | 198 | if (is_short) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 199 | GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(); |
Craig Tiller | 07d2fa7 | 2017-09-07 13:13:36 -0700 | [diff] [blame] | 200 | } else { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 201 | GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(); |
Craig Tiller | 07d2fa7 | 2017-09-07 13:13:36 -0700 | [diff] [blame] | 202 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 203 | |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 204 | do { |
| 205 | retry_push = false; |
Noah Eisen | 4d20a66 | 2018-02-09 09:34:04 -0800 | [diff] [blame] | 206 | size_t cur_thread_count = |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 207 | static_cast<size_t>(gpr_atm_acq_load(&num_threads_)); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 208 | |
| 209 | // If the number of threads is zero(i.e either the executor is not threaded |
| 210 | // or already shutdown), then queue the closure on the exec context itself |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 211 | if (cur_thread_count == 0) { |
Craig Tiller | c2fb83e | 2017-07-18 12:38:25 -0700 | [diff] [blame] | 212 | #ifndef NDEBUG |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 213 | EXECUTOR_TRACE("(%s) schedule %p (created %s:%d) inline", name_, closure, |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 214 | closure->file_created, closure->line_created); |
Craig Tiller | c2fb83e | 2017-07-18 12:38:25 -0700 | [diff] [blame] | 215 | #else |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 216 | EXECUTOR_TRACE("(%s) schedule %p inline", name_, closure); |
Craig Tiller | c2fb83e | 2017-07-18 12:38:25 -0700 | [diff] [blame] | 217 | #endif |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 218 | grpc_closure_list_append(grpc_core::ExecCtx::Get()->closure_list(), |
| 219 | closure, error); |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 220 | return; |
Craig Tiller | c2fb83e | 2017-07-18 12:38:25 -0700 | [diff] [blame] | 221 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 222 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 223 | ThreadState* ts = (ThreadState*)gpr_tls_get(&g_this_thread_state); |
Craig Tiller | 4782d92 | 2017-11-10 09:53:21 -0800 | [diff] [blame] | 224 | if (ts == nullptr) { |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 225 | ts = &thd_state_[GPR_HASH_POINTER(grpc_core::ExecCtx::Get(), |
| 226 | cur_thread_count)]; |
Craig Tiller | 022ad3a | 2017-09-07 13:01:56 -0700 | [diff] [blame] | 227 | } else { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 228 | GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(); |
Craig Tiller | c2fb83e | 2017-07-18 12:38:25 -0700 | [diff] [blame] | 229 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 230 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 231 | ThreadState* orig_ts = ts; |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 232 | bool try_new_thread = false; |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 233 | |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 234 | for (;;) { |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 235 | #ifndef NDEBUG |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 236 | EXECUTOR_TRACE( |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 237 | "(%s) try to schedule %p (%s) (created %s:%d) to thread " |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 238 | "%" PRIdPTR, |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 239 | name_, closure, is_short ? "short" : "long", closure->file_created, |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 240 | closure->line_created, ts->id); |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 241 | #else |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 242 | EXECUTOR_TRACE("(%s) try to schedule %p (%s) to thread %" PRIdPTR, name_, |
| 243 | closure, is_short ? "short" : "long", ts->id); |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 244 | #endif |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 245 | |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 246 | gpr_mu_lock(&ts->mu); |
| 247 | if (ts->queued_long_job) { |
Craig Tiller | b0ce25e | 2017-09-08 14:42:26 -0700 | [diff] [blame] | 248 | // if there's a long job queued, we never queue anything else to this |
| 249 | // queue (since long jobs can take 'infinite' time and we need to |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 250 | // guarantee no starvation). Spin through queues and try again |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 251 | gpr_mu_unlock(&ts->mu); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 252 | size_t idx = ts->id; |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 253 | ts = &thd_state_[(idx + 1) % cur_thread_count]; |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 254 | if (ts == orig_ts) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 255 | // We cycled through all the threads. Retry enqueue again by creating |
| 256 | // a new thread |
| 257 | // |
| 258 | // TODO (sreek): There is a potential issue here. We are |
| 259 | // unconditionally setting try_new_thread to true here. What if the |
| 260 | // executor is shutdown OR if cur_thread_count is already equal to |
| 261 | // max_threads ? |
| 262 | // (Fortunately, this is not an issue yet (as of july 2018) because |
| 263 | // there is only one instance of long job in gRPC and hence we will |
| 264 | // not hit this code path) |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 265 | retry_push = true; |
| 266 | try_new_thread = true; |
| 267 | break; |
| 268 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 269 | |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 270 | continue; // Try the next thread-state |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 271 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 272 | |
| 273 | // == Found the thread state (i.e thread) to enqueue this closure! == |
| 274 | |
| 275 | // Also, if this thread has been waiting for closures, wake it up. |
| 276 | // - If grpc_closure_list_empty() is true and the Executor is not |
| 277 | // shutdown, it means that the thread must be waiting in ThreadMain() |
| 278 | // - Note that gpr_cv_signal() won't immediately wakeup the thread. That |
| 279 | // happens after we release the mutex &ts->mu a few lines below |
ncteisen | c0b00c3 | 2017-12-14 11:30:38 -0800 | [diff] [blame] | 280 | if (grpc_closure_list_empty(ts->elems) && !ts->shutdown) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 281 | GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(); |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 282 | gpr_cv_signal(&ts->cv); |
| 283 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 284 | |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 285 | grpc_closure_list_append(&ts->elems, closure, error); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 286 | |
| 287 | // If we already queued more than MAX_DEPTH number of closures on this |
| 288 | // thread, use this as a hint to create more threads |
Craig Tiller | 2477cf3 | 2017-09-26 12:20:35 -0700 | [diff] [blame] | 289 | ts->depth++; |
| 290 | try_new_thread = ts->depth > MAX_DEPTH && |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 291 | cur_thread_count < max_threads_ && !ts->shutdown; |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 292 | |
| 293 | ts->queued_long_job = !is_short; |
| 294 | |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 295 | gpr_mu_unlock(&ts->mu); |
| 296 | break; |
Craig Tiller | 3e9f98e | 2017-05-12 13:17:47 -0700 | [diff] [blame] | 297 | } |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 298 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 299 | if (try_new_thread && gpr_spinlock_trylock(&adding_thread_lock_)) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 300 | cur_thread_count = static_cast<size_t>(gpr_atm_acq_load(&num_threads_)); |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 301 | if (cur_thread_count < max_threads_) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 302 | // Increment num_threads (safe to do a store instead of a cas because we |
| 303 | // always increment num_threads under the 'adding_thread_lock') |
| 304 | gpr_atm_rel_store(&num_threads_, cur_thread_count + 1); |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 305 | |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 306 | thd_state_[cur_thread_count].thd = grpc_core::Thread( |
| 307 | name_, &GrpcExecutor::ThreadMain, &thd_state_[cur_thread_count]); |
| 308 | thd_state_[cur_thread_count].thd.Start(); |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 309 | } |
Sree Kuchibhotla | 83d0bfa | 2018-07-10 11:29:43 -0700 | [diff] [blame] | 310 | gpr_spinlock_unlock(&adding_thread_lock_); |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 311 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 312 | |
Craig Tiller | 07d2fa7 | 2017-09-07 13:13:36 -0700 | [diff] [blame] | 313 | if (retry_push) { |
Yash Tibrewal | 8cf1470 | 2017-12-06 09:47:54 -0800 | [diff] [blame] | 314 | GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(); |
Craig Tiller | 07d2fa7 | 2017-09-07 13:13:36 -0700 | [diff] [blame] | 315 | } |
Craig Tiller | 2f767eb | 2017-07-20 12:06:14 -0700 | [diff] [blame] | 316 | } while (retry_push); |
David Garcia Quintas | 4bc3463 | 2015-10-07 16:12:35 -0700 | [diff] [blame] | 317 | } |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 318 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 319 | static GrpcExecutor* executors[GRPC_NUM_EXECUTORS]; |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 320 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 321 | void default_enqueue_short(grpc_closure* closure, grpc_error* error) { |
| 322 | executors[GRPC_DEFAULT_EXECUTOR]->Enqueue(closure, error, |
| 323 | true /* is_short */); |
Craig Tiller | 7a82afd | 2017-07-18 09:40:40 -0700 | [diff] [blame] | 324 | } |
| 325 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 326 | void default_enqueue_long(grpc_closure* closure, grpc_error* error) { |
| 327 | executors[GRPC_DEFAULT_EXECUTOR]->Enqueue(closure, error, |
| 328 | false /* is_short */); |
Craig Tiller | 7a82afd | 2017-07-18 09:40:40 -0700 | [diff] [blame] | 329 | } |
| 330 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 331 | void resolver_enqueue_short(grpc_closure* closure, grpc_error* error) { |
| 332 | executors[GRPC_RESOLVER_EXECUTOR]->Enqueue(closure, error, |
| 333 | true /* is_short */); |
| 334 | } |
Craig Tiller | 7a82afd | 2017-07-18 09:40:40 -0700 | [diff] [blame] | 335 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 336 | void resolver_enqueue_long(grpc_closure* closure, grpc_error* error) { |
| 337 | executors[GRPC_RESOLVER_EXECUTOR]->Enqueue(closure, error, |
| 338 | false /* is_short */); |
| 339 | } |
| 340 | |
| 341 | static const grpc_closure_scheduler_vtable vtables_[] = { |
| 342 | {&default_enqueue_short, &default_enqueue_short, "def-ex-short"}, |
| 343 | {&default_enqueue_long, &default_enqueue_long, "def-ex-long"}, |
| 344 | {&resolver_enqueue_short, &resolver_enqueue_short, "res-ex-short"}, |
| 345 | {&resolver_enqueue_long, &resolver_enqueue_long, "res-ex-long"}}; |
| 346 | |
| 347 | static grpc_closure_scheduler schedulers_[] = { |
| 348 | {&vtables_[0]}, // Default short |
| 349 | {&vtables_[1]}, // Default long |
| 350 | {&vtables_[2]}, // Resolver short |
| 351 | {&vtables_[3]} // Resolver long |
| 352 | }; |
| 353 | |
| 354 | const char* executor_name(GrpcExecutorType executor_type) { |
| 355 | switch (executor_type) { |
| 356 | case GRPC_DEFAULT_EXECUTOR: |
| 357 | return "default-executor"; |
| 358 | case GRPC_RESOLVER_EXECUTOR: |
| 359 | return "resolver-executor"; |
| 360 | default: |
| 361 | GPR_UNREACHABLE_CODE(return "unknown"); |
| 362 | } |
| 363 | GPR_UNREACHABLE_CODE(return "unknown"); |
| 364 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 365 | |
Sree Kuchibhotla | 69e5dff | 2018-07-12 10:58:04 -0700 | [diff] [blame] | 366 | // grpc_executor_init() and grpc_executor_shutdown() functions are called in the |
| 367 | // the grpc_init() and grpc_shutdown() code paths which are protected by a |
| 368 | // global mutex. So it is okay to assume that these functions are thread-safe |
Sree Kuchibhotla | 8aefdd3 | 2018-07-11 17:44:08 -0700 | [diff] [blame] | 369 | void grpc_executor_init() { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 370 | EXECUTOR_TRACE0("grpc_executor_init() enter"); |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 371 | for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { |
| 372 | // Return if grpc_executor_init() already called earlier |
| 373 | if (executors[i] != nullptr) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 374 | // Ideally we should also assert that all executors i.e executor[0] to |
| 375 | // executor[GRPC_NUM_EXECUTORS-1] are != nullptr too. |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 376 | GPR_ASSERT(i == 0); |
| 377 | break; |
| 378 | } |
Sree Kuchibhotla | 69e5dff | 2018-07-12 10:58:04 -0700 | [diff] [blame] | 379 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 380 | executors[i] = grpc_core::New<GrpcExecutor>( |
| 381 | executor_name(static_cast<GrpcExecutorType>(i))); |
| 382 | executors[i]->Init(); |
| 383 | } |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 384 | EXECUTOR_TRACE0("grpc_executor_init() done"); |
Sree Kuchibhotla | 8aefdd3 | 2018-07-11 17:44:08 -0700 | [diff] [blame] | 385 | } |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 386 | |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 387 | grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorType executor_type, |
| 388 | GrpcExecutorJobType job_type) { |
| 389 | return &schedulers_[(executor_type * GRPC_NUM_EXECUTORS) + job_type]; |
Sree Kuchibhotla | 7e9d525 | 2018-07-09 14:53:54 -0700 | [diff] [blame] | 390 | } |
Craig Tiller | 7a82afd | 2017-07-18 09:40:40 -0700 | [diff] [blame] | 391 | |
Sree Kuchibhotla | 1e69b7c | 2018-07-10 19:30:09 -0700 | [diff] [blame] | 392 | grpc_closure_scheduler* grpc_executor_scheduler(GrpcExecutorJobType job_type) { |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 393 | return grpc_executor_scheduler(GRPC_DEFAULT_EXECUTOR, job_type); |
| 394 | } |
| 395 | |
| 396 | void grpc_executor_shutdown() { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 397 | EXECUTOR_TRACE0("grpc_executor_shutdown() enter"); |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 398 | for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { |
| 399 | // Return if grpc_executor_shutdown() is already called earlier |
| 400 | if (executors[i] == nullptr) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 401 | // Ideally we should also assert that all executors i.e executor[0] to |
| 402 | // executor[GRPC_NUM_EXECUTORS-1] are nullptr too. |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 403 | GPR_ASSERT(i == 0); |
| 404 | break; |
| 405 | } |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 406 | executors[i]->Shutdown(); |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 407 | } |
| 408 | |
| 409 | // Delete the executor objects. |
| 410 | // |
| 411 | // NOTE: It is important to do this in a separate loop (i.e ONLY after all the |
| 412 | // executors are 'Shutdown' first) because it is possible for one executor |
| 413 | // (that is not shutdown yet) to call Enqueue() on a different executor which |
| 414 | // is already shutdown. This is legal and in such cases, the Enqueue() |
| 415 | // operation effectively "fails" and enqueues that closure on the calling |
| 416 | // thread's exec_ctx. |
| 417 | // |
| 418 | // By ensuring that all executors are shutdown first, we are also ensuring |
| 419 | // that no thread is active across all executors. |
| 420 | for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 421 | grpc_core::Delete<GrpcExecutor>(executors[i]); |
| 422 | executors[i] = nullptr; |
| 423 | } |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 424 | EXECUTOR_TRACE0("grpc_executor_shutdown() done"); |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 425 | } |
| 426 | |
| 427 | bool grpc_executor_is_threaded(GrpcExecutorType executor_type) { |
| 428 | GPR_ASSERT(executor_type < GRPC_NUM_EXECUTORS); |
| 429 | return executors[executor_type]->IsThreaded(); |
| 430 | } |
| 431 | |
| 432 | bool grpc_executor_is_threaded() { |
| 433 | return grpc_executor_is_threaded(GRPC_DEFAULT_EXECUTOR); |
| 434 | } |
| 435 | |
| 436 | void grpc_executor_set_threading(bool enable) { |
Sree Kuchibhotla | 00476fd | 2018-07-16 18:09:27 -0700 | [diff] [blame^] | 437 | EXECUTOR_TRACE("grpc_executor_set_threading(%d) called", enable); |
Sree Kuchibhotla | 37d8bbc | 2018-07-10 13:30:57 -0700 | [diff] [blame] | 438 | for (int i = 0; i < GRPC_NUM_EXECUTORS; i++) { |
| 439 | executors[i]->SetThreading(enable); |
| 440 | } |
Craig Tiller | 7a82afd | 2017-07-18 09:40:40 -0700 | [diff] [blame] | 441 | } |