blob: a41c43edfd276929521fd5fc764f406c7a5cf938 [file] [log] [blame]
David Garcia Quintas4bc34632015-10-07 16:12:35 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2015 gRPC authors.
David Garcia Quintas4bc34632015-10-07 16:12:35 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas4bc34632015-10-07 16:12:35 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas4bc34632015-10-07 16:12:35 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas4bc34632015-10-07 16:12:35 -070016 *
17 */
18
Craig Tiller9533d042016-03-25 17:11:06 -070019#include "src/core/lib/iomgr/executor.h"
David Garcia Quintas4bc34632015-10-07 16:12:35 -070020
21#include <string.h>
22
23#include <grpc/support/alloc.h>
Craig Tiller3e9f98e2017-05-12 13:17:47 -070024#include <grpc/support/cpu.h>
David Garcia Quintas4bc34632015-10-07 16:12:35 -070025#include <grpc/support/log.h>
26#include <grpc/support/sync.h>
27#include <grpc/support/thd.h>
Craig Tiller3e9f98e2017-05-12 13:17:47 -070028#include <grpc/support/tls.h>
29#include <grpc/support/useful.h>
30
Craig Tiller57bb9a92017-08-31 16:44:15 -070031#include "src/core/lib/debug/stats.h"
Craig Tiller9533d042016-03-25 17:11:06 -070032#include "src/core/lib/iomgr/exec_ctx.h"
Craig Tiller3e9f98e2017-05-12 13:17:47 -070033#include "src/core/lib/support/spinlock.h"
David Garcia Quintas4bc34632015-10-07 16:12:35 -070034
Craig Tiller1ed31182017-05-24 16:42:35 -070035#define MAX_DEPTH 2
Craig Tiller3e9f98e2017-05-12 13:17:47 -070036
37typedef struct {
David Garcia Quintas4bc34632015-10-07 16:12:35 -070038 gpr_mu mu;
Craig Tiller3e9f98e2017-05-12 13:17:47 -070039 gpr_cv cv;
40 grpc_closure_list elems;
41 size_t depth;
42 bool shutdown;
Craig Tillerc2fb83e2017-07-18 12:38:25 -070043 bool queued_long_job;
Craig Tiller3e9f98e2017-05-12 13:17:47 -070044 gpr_thd_id id;
45} thread_state;
David Garcia Quintas4bc34632015-10-07 16:12:35 -070046
Craig Tiller3e9f98e2017-05-12 13:17:47 -070047static thread_state *g_thread_state;
48static size_t g_max_threads;
49static gpr_atm g_cur_threads;
50static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
51
52GPR_TLS_DECL(g_this_thread_state);
53
Craig Tilleraf723b02017-07-17 17:56:28 -070054static grpc_tracer_flag executor_trace =
55 GRPC_TRACER_INITIALIZER(false, "executor");
56
Craig Tiller3e9f98e2017-05-12 13:17:47 -070057static void executor_thread(void *arg);
David Garcia Quintas4bc34632015-10-07 16:12:35 -070058
Craig Tiller3e9f98e2017-05-12 13:17:47 -070059static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
60 size_t n = 0;
David Garcia Quintas4bc34632015-10-07 16:12:35 -070061
Craig Tiller3e9f98e2017-05-12 13:17:47 -070062 grpc_closure *c = list.head;
Craig Tiller061ef742016-12-29 10:54:09 -080063 while (c != NULL) {
64 grpc_closure *next = c->next_data.next;
65 grpc_error *error = c->error_data.error;
Craig Tilleraf723b02017-07-17 17:56:28 -070066 if (GRPC_TRACER_ON(executor_trace)) {
67#ifndef NDEBUG
68 gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
69 c->file_created, c->line_created);
70#else
71 gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
72#endif
73 }
ncteisenf8061e82017-06-09 10:44:42 -070074#ifndef NDEBUG
Craig Tillerb9b01ce2017-05-12 13:47:10 -070075 c->scheduled = false;
Mark D. Roth43f774e2017-04-04 16:35:37 -070076#endif
Craig Tiller0b093412017-01-03 09:49:07 -080077 c->cb(exec_ctx, c->cb_arg, error);
Craig Tiller061ef742016-12-29 10:54:09 -080078 GRPC_ERROR_UNREF(error);
79 c = next;
Craig Tillerf21acdd2017-06-08 08:09:35 -070080 n++;
Craig Tiller1ab56d82017-07-19 09:55:57 -070081 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller061ef742016-12-29 10:54:09 -080082 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -070083
84 return n;
85}
86
Craig Tiller5e56f002017-05-16 15:02:50 -070087bool grpc_executor_is_threaded() {
88 return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
89}
90
91void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
92 gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
93 if (threading) {
94 if (cur_threads > 0) return;
95 g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
96 gpr_atm_no_barrier_store(&g_cur_threads, 1);
97 gpr_tls_init(&g_this_thread_state);
98 g_thread_state = gpr_zalloc(sizeof(thread_state) * g_max_threads);
99 for (size_t i = 0; i < g_max_threads; i++) {
100 gpr_mu_init(&g_thread_state[i].mu);
101 gpr_cv_init(&g_thread_state[i].cv);
102 g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
103 }
104
105 gpr_thd_options opt = gpr_thd_options_default();
106 gpr_thd_options_set_joinable(&opt);
107 gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
108 &opt);
109 } else {
110 if (cur_threads == 0) return;
111 for (size_t i = 0; i < g_max_threads; i++) {
112 gpr_mu_lock(&g_thread_state[i].mu);
113 g_thread_state[i].shutdown = true;
114 gpr_cv_signal(&g_thread_state[i].cv);
115 gpr_mu_unlock(&g_thread_state[i].mu);
116 }
Craig Tillerf7c8c9f2017-05-17 15:22:05 -0700117 /* ensure no thread is adding a new thread... once this is past, then
118 no thread will try to add a new one either (since shutdown is true) */
119 gpr_spinlock_lock(&g_adding_thread_lock);
120 gpr_spinlock_unlock(&g_adding_thread_lock);
Craig Tiller5e56f002017-05-16 15:02:50 -0700121 for (gpr_atm i = 0; i < g_cur_threads; i++) {
122 gpr_thd_join(g_thread_state[i].id);
123 }
124 gpr_atm_no_barrier_store(&g_cur_threads, 0);
125 for (size_t i = 0; i < g_max_threads; i++) {
126 gpr_mu_destroy(&g_thread_state[i].mu);
127 gpr_cv_destroy(&g_thread_state[i].cv);
128 run_closures(exec_ctx, g_thread_state[i].elems);
129 }
130 gpr_free(g_thread_state);
131 gpr_tls_destroy(&g_this_thread_state);
David Garcia Quintas4bc34632015-10-07 16:12:35 -0700132 }
Craig Tiller5e56f002017-05-16 15:02:50 -0700133}
134
135void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
Craig Tilleraf723b02017-07-17 17:56:28 -0700136 grpc_register_tracer(&executor_trace);
Craig Tillerb9b01ce2017-05-12 13:47:10 -0700137 gpr_atm_no_barrier_store(&g_cur_threads, 0);
Craig Tiller5e56f002017-05-16 15:02:50 -0700138 grpc_executor_set_threading(exec_ctx, true);
139}
140
141void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
142 grpc_executor_set_threading(exec_ctx, false);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700143}
144
145static void executor_thread(void *arg) {
146 thread_state *ts = arg;
147 gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
148
Craig Tiller89962082017-05-12 14:30:42 -0700149 grpc_exec_ctx exec_ctx =
150 GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
151
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700152 size_t subtract_depth = 0;
153 for (;;) {
Craig Tilleraf723b02017-07-17 17:56:28 -0700154 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tiller7d079942017-09-05 12:46:48 -0700155 gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
156 (int)(ts - g_thread_state), subtract_depth);
Craig Tilleraf723b02017-07-17 17:56:28 -0700157 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700158 gpr_mu_lock(&ts->mu);
159 ts->depth -= subtract_depth;
160 while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
Craig Tiller1ab56d82017-07-19 09:55:57 -0700161 ts->queued_long_job = false;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700162 gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
163 }
164 if (ts->shutdown) {
Craig Tilleraf723b02017-07-17 17:56:28 -0700165 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tiller7d079942017-09-05 12:46:48 -0700166 gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
167 (int)(ts - g_thread_state));
Craig Tilleraf723b02017-07-17 17:56:28 -0700168 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700169 gpr_mu_unlock(&ts->mu);
170 break;
171 }
Craig Tiller57bb9a92017-08-31 16:44:15 -0700172 GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700173 grpc_closure_list exec = ts->elems;
174 ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
175 gpr_mu_unlock(&ts->mu);
Craig Tilleraf723b02017-07-17 17:56:28 -0700176 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tiller7d079942017-09-05 12:46:48 -0700177 gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
Craig Tilleraf723b02017-07-17 17:56:28 -0700178 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700179
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700180 subtract_depth = run_closures(&exec_ctx, exec);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700181 }
Craig Tiller89962082017-05-12 14:30:42 -0700182 grpc_exec_ctx_finish(&exec_ctx);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700183}
184
185static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
Craig Tiller7a82afd2017-07-18 09:40:40 -0700186 grpc_error *error, bool is_short) {
Craig Tiller2f767eb2017-07-20 12:06:14 -0700187 bool retry_push;
Craig Tiller07d2fa72017-09-07 13:13:36 -0700188 if (is_short) {
189 GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
190 } else {
191 GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
192 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700193 do {
194 retry_push = false;
195 size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
196 if (cur_thread_count == 0) {
197 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700198#ifndef NDEBUG
Craig Tiller2f767eb2017-07-20 12:06:14 -0700199 gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
200 closure, closure->file_created, closure->line_created);
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700201#else
Craig Tiller2f767eb2017-07-20 12:06:14 -0700202 gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700203#endif
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700204 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700205 grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
206 return;
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700207 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700208 thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
209 if (ts == NULL) {
210 ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
Craig Tiller022ad3a2017-09-07 13:01:56 -0700211 } else {
212 GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700213 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700214 thread_state *orig_ts = ts;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700215
Craig Tiller2f767eb2017-07-20 12:06:14 -0700216 bool try_new_thread;
217 for (;;) {
218 if (GRPC_TRACER_ON(executor_trace)) {
219#ifndef NDEBUG
Craig Tiller8af33db2017-07-20 16:37:36 -0700220 gpr_log(
221 GPR_DEBUG,
222 "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
223 closure, is_short ? "short" : "long", closure->file_created,
224 closure->line_created, (int)(ts - g_thread_state));
Craig Tiller2f767eb2017-07-20 12:06:14 -0700225#else
Craig Tiller8af33db2017-07-20 16:37:36 -0700226 gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
227 closure, is_short ? "short" : "long",
228 (int)(ts - g_thread_state));
Craig Tiller2f767eb2017-07-20 12:06:14 -0700229#endif
230 }
231 gpr_mu_lock(&ts->mu);
232 if (ts->queued_long_job) {
233 gpr_mu_unlock(&ts->mu);
Craig Tillera0d51852017-07-21 13:49:49 -0700234 size_t idx = (size_t)(ts - g_thread_state);
Craig Tiller18908832017-07-21 13:27:27 -0700235 ts = &g_thread_state[(idx + 1) % cur_thread_count];
Craig Tiller2f767eb2017-07-20 12:06:14 -0700236 if (ts == orig_ts) {
237 retry_push = true;
238 try_new_thread = true;
239 break;
240 }
241 continue;
242 }
243 if (grpc_closure_list_empty(ts->elems)) {
Craig Tiller022ad3a2017-09-07 13:01:56 -0700244 GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
Craig Tiller2f767eb2017-07-20 12:06:14 -0700245 gpr_cv_signal(&ts->cv);
246 }
247 grpc_closure_list_append(&ts->elems, closure, error);
248 ts->depth++;
249 try_new_thread = ts->depth > MAX_DEPTH &&
250 cur_thread_count < g_max_threads && !ts->shutdown;
251 if (!is_short) ts->queued_long_job = true;
252 gpr_mu_unlock(&ts->mu);
253 break;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700254 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700255 if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
256 cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
257 if (cur_thread_count < g_max_threads) {
258 gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
259
260 gpr_thd_options opt = gpr_thd_options_default();
261 gpr_thd_options_set_joinable(&opt);
262 gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
263 &g_thread_state[cur_thread_count], &opt);
264 }
265 gpr_spinlock_unlock(&g_adding_thread_lock);
266 }
Craig Tiller07d2fa72017-09-07 13:13:36 -0700267 if (retry_push) {
268 GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
269 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700270 } while (retry_push);
David Garcia Quintas4bc34632015-10-07 16:12:35 -0700271}
Craig Tiller91031da2016-12-28 15:44:25 -0800272
Craig Tiller7a82afd2017-07-18 09:40:40 -0700273static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
274 grpc_error *error) {
275 executor_push(exec_ctx, closure, error, true);
276}
277
278static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
279 grpc_error *error) {
280 executor_push(exec_ctx, closure, error, false);
281}
282
283static const grpc_closure_scheduler_vtable executor_vtable_short = {
284 executor_push_short, executor_push_short, "executor"};
285static grpc_closure_scheduler executor_scheduler_short = {
286 &executor_vtable_short};
287
288static const grpc_closure_scheduler_vtable executor_vtable_long = {
289 executor_push_long, executor_push_long, "executor"};
290static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
291
292grpc_closure_scheduler *grpc_executor_scheduler(
293 grpc_executor_job_length length) {
294 return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
295 : &executor_scheduler_long;
296}