blob: 892385d7d782eab40e8549a25a3213404de4aafd [file] [log] [blame]
David Garcia Quintas4bc34632015-10-07 16:12:35 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2015 gRPC authors.
David Garcia Quintas4bc34632015-10-07 16:12:35 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
David Garcia Quintas4bc34632015-10-07 16:12:35 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
David Garcia Quintas4bc34632015-10-07 16:12:35 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
David Garcia Quintas4bc34632015-10-07 16:12:35 -070016 *
17 */
18
Craig Tiller9533d042016-03-25 17:11:06 -070019#include "src/core/lib/iomgr/executor.h"
David Garcia Quintas4bc34632015-10-07 16:12:35 -070020
21#include <string.h>
22
23#include <grpc/support/alloc.h>
Craig Tiller3e9f98e2017-05-12 13:17:47 -070024#include <grpc/support/cpu.h>
David Garcia Quintas4bc34632015-10-07 16:12:35 -070025#include <grpc/support/log.h>
26#include <grpc/support/sync.h>
27#include <grpc/support/thd.h>
Craig Tiller3e9f98e2017-05-12 13:17:47 -070028#include <grpc/support/tls.h>
29#include <grpc/support/useful.h>
30
Craig Tiller57bb9a92017-08-31 16:44:15 -070031#include "src/core/lib/debug/stats.h"
Craig Tiller9533d042016-03-25 17:11:06 -070032#include "src/core/lib/iomgr/exec_ctx.h"
Craig Tiller3e9f98e2017-05-12 13:17:47 -070033#include "src/core/lib/support/spinlock.h"
David Garcia Quintas4bc34632015-10-07 16:12:35 -070034
Craig Tiller2477cf32017-09-26 12:20:35 -070035#define MAX_DEPTH 2
36
Craig Tiller3e9f98e2017-05-12 13:17:47 -070037typedef struct {
David Garcia Quintas4bc34632015-10-07 16:12:35 -070038 gpr_mu mu;
Craig Tiller3e9f98e2017-05-12 13:17:47 -070039 gpr_cv cv;
40 grpc_closure_list elems;
Craig Tiller2477cf32017-09-26 12:20:35 -070041 size_t depth;
Craig Tiller3e9f98e2017-05-12 13:17:47 -070042 bool shutdown;
Craig Tillerc2fb83e2017-07-18 12:38:25 -070043 bool queued_long_job;
Craig Tiller3e9f98e2017-05-12 13:17:47 -070044 gpr_thd_id id;
45} thread_state;
David Garcia Quintas4bc34632015-10-07 16:12:35 -070046
Craig Tiller3e9f98e2017-05-12 13:17:47 -070047static thread_state *g_thread_state;
48static size_t g_max_threads;
49static gpr_atm g_cur_threads;
50static gpr_spinlock g_adding_thread_lock = GPR_SPINLOCK_STATIC_INITIALIZER;
51
52GPR_TLS_DECL(g_this_thread_state);
53
Craig Tilleraf723b02017-07-17 17:56:28 -070054static grpc_tracer_flag executor_trace =
55 GRPC_TRACER_INITIALIZER(false, "executor");
56
Craig Tiller3e9f98e2017-05-12 13:17:47 -070057static void executor_thread(void *arg);
David Garcia Quintas4bc34632015-10-07 16:12:35 -070058
Craig Tiller2477cf32017-09-26 12:20:35 -070059static size_t run_closures(grpc_exec_ctx *exec_ctx, grpc_closure_list list) {
60 size_t n = 0;
David Garcia Quintas4bc34632015-10-07 16:12:35 -070061
Craig Tiller2477cf32017-09-26 12:20:35 -070062 grpc_closure *c = list.head;
63 while (c != NULL) {
64 grpc_closure *next = c->next_data.next;
65 grpc_error *error = c->error_data.error;
66 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tilleraf723b02017-07-17 17:56:28 -070067#ifndef NDEBUG
Craig Tiller2477cf32017-09-26 12:20:35 -070068 gpr_log(GPR_DEBUG, "EXECUTOR: run %p [created by %s:%d]", c,
69 c->file_created, c->line_created);
Craig Tilleraf723b02017-07-17 17:56:28 -070070#else
Craig Tiller2477cf32017-09-26 12:20:35 -070071 gpr_log(GPR_DEBUG, "EXECUTOR: run %p", c);
Craig Tilleraf723b02017-07-17 17:56:28 -070072#endif
Craig Tillerfb0262b2017-09-13 15:19:19 -070073 }
Craig Tiller2477cf32017-09-26 12:20:35 -070074#ifndef NDEBUG
75 c->scheduled = false;
76#endif
77 c->cb(exec_ctx, c->cb_arg, error);
78 GRPC_ERROR_UNREF(error);
79 c = next;
80 n++;
81 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller061ef742016-12-29 10:54:09 -080082 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -070083
Craig Tiller2477cf32017-09-26 12:20:35 -070084 return n;
Craig Tiller3e9f98e2017-05-12 13:17:47 -070085}
86
Craig Tiller5e56f002017-05-16 15:02:50 -070087bool grpc_executor_is_threaded() {
88 return gpr_atm_no_barrier_load(&g_cur_threads) > 0;
89}
90
91void grpc_executor_set_threading(grpc_exec_ctx *exec_ctx, bool threading) {
92 gpr_atm cur_threads = gpr_atm_no_barrier_load(&g_cur_threads);
93 if (threading) {
94 if (cur_threads > 0) return;
95 g_max_threads = GPR_MAX(1, 2 * gpr_cpu_num_cores());
96 gpr_atm_no_barrier_store(&g_cur_threads, 1);
97 gpr_tls_init(&g_this_thread_state);
Yash Tibrewalca3c1c02017-09-07 22:47:16 -070098 g_thread_state =
99 (thread_state *)gpr_zalloc(sizeof(thread_state) * g_max_threads);
Craig Tiller5e56f002017-05-16 15:02:50 -0700100 for (size_t i = 0; i < g_max_threads; i++) {
101 gpr_mu_init(&g_thread_state[i].mu);
102 gpr_cv_init(&g_thread_state[i].cv);
103 g_thread_state[i].elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
104 }
105
106 gpr_thd_options opt = gpr_thd_options_default();
107 gpr_thd_options_set_joinable(&opt);
108 gpr_thd_new(&g_thread_state[0].id, executor_thread, &g_thread_state[0],
109 &opt);
110 } else {
111 if (cur_threads == 0) return;
112 for (size_t i = 0; i < g_max_threads; i++) {
113 gpr_mu_lock(&g_thread_state[i].mu);
114 g_thread_state[i].shutdown = true;
115 gpr_cv_signal(&g_thread_state[i].cv);
116 gpr_mu_unlock(&g_thread_state[i].mu);
117 }
Craig Tillerf7c8c9f2017-05-17 15:22:05 -0700118 /* ensure no thread is adding a new thread... once this is past, then
119 no thread will try to add a new one either (since shutdown is true) */
120 gpr_spinlock_lock(&g_adding_thread_lock);
121 gpr_spinlock_unlock(&g_adding_thread_lock);
Craig Tiller5e56f002017-05-16 15:02:50 -0700122 for (gpr_atm i = 0; i < g_cur_threads; i++) {
123 gpr_thd_join(g_thread_state[i].id);
124 }
125 gpr_atm_no_barrier_store(&g_cur_threads, 0);
126 for (size_t i = 0; i < g_max_threads; i++) {
127 gpr_mu_destroy(&g_thread_state[i].mu);
128 gpr_cv_destroy(&g_thread_state[i].cv);
Craig Tiller2477cf32017-09-26 12:20:35 -0700129 run_closures(exec_ctx, g_thread_state[i].elems);
Craig Tiller5e56f002017-05-16 15:02:50 -0700130 }
131 gpr_free(g_thread_state);
132 gpr_tls_destroy(&g_this_thread_state);
David Garcia Quintas4bc34632015-10-07 16:12:35 -0700133 }
Craig Tiller5e56f002017-05-16 15:02:50 -0700134}
135
136void grpc_executor_init(grpc_exec_ctx *exec_ctx) {
Craig Tilleraf723b02017-07-17 17:56:28 -0700137 grpc_register_tracer(&executor_trace);
Craig Tillerb9b01ce2017-05-12 13:47:10 -0700138 gpr_atm_no_barrier_store(&g_cur_threads, 0);
Craig Tiller5e56f002017-05-16 15:02:50 -0700139 grpc_executor_set_threading(exec_ctx, true);
140}
141
142void grpc_executor_shutdown(grpc_exec_ctx *exec_ctx) {
143 grpc_executor_set_threading(exec_ctx, false);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700144}
145
146static void executor_thread(void *arg) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700147 thread_state *ts = (thread_state *)arg;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700148 gpr_tls_set(&g_this_thread_state, (intptr_t)ts);
149
Craig Tiller89962082017-05-12 14:30:42 -0700150 grpc_exec_ctx exec_ctx =
151 GRPC_EXEC_CTX_INITIALIZER(0, grpc_never_ready_to_finish, NULL);
152
Craig Tiller2477cf32017-09-26 12:20:35 -0700153 size_t subtract_depth = 0;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700154 for (;;) {
Craig Tilleraf723b02017-07-17 17:56:28 -0700155 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tiller2477cf32017-09-26 12:20:35 -0700156 gpr_log(GPR_DEBUG, "EXECUTOR[%d]: step (sub_depth=%" PRIdPTR ")",
157 (int)(ts - g_thread_state), subtract_depth);
Craig Tilleraf723b02017-07-17 17:56:28 -0700158 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700159 gpr_mu_lock(&ts->mu);
Craig Tiller2477cf32017-09-26 12:20:35 -0700160 ts->depth -= subtract_depth;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700161 while (grpc_closure_list_empty(ts->elems) && !ts->shutdown) {
Craig Tiller1ab56d82017-07-19 09:55:57 -0700162 ts->queued_long_job = false;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700163 gpr_cv_wait(&ts->cv, &ts->mu, gpr_inf_future(GPR_CLOCK_REALTIME));
164 }
165 if (ts->shutdown) {
Craig Tilleraf723b02017-07-17 17:56:28 -0700166 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tiller7d079942017-09-05 12:46:48 -0700167 gpr_log(GPR_DEBUG, "EXECUTOR[%d]: shutdown",
168 (int)(ts - g_thread_state));
Craig Tilleraf723b02017-07-17 17:56:28 -0700169 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700170 gpr_mu_unlock(&ts->mu);
171 break;
172 }
Craig Tiller57bb9a92017-08-31 16:44:15 -0700173 GRPC_STATS_INC_EXECUTOR_QUEUE_DRAINED(&exec_ctx);
Craig Tiller2477cf32017-09-26 12:20:35 -0700174 grpc_closure_list exec = ts->elems;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700175 ts->elems = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
176 gpr_mu_unlock(&ts->mu);
Craig Tilleraf723b02017-07-17 17:56:28 -0700177 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tiller7d079942017-09-05 12:46:48 -0700178 gpr_log(GPR_DEBUG, "EXECUTOR[%d]: execute", (int)(ts - g_thread_state));
Craig Tilleraf723b02017-07-17 17:56:28 -0700179 }
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700180
Craig Tiller2477cf32017-09-26 12:20:35 -0700181 subtract_depth = run_closures(&exec_ctx, exec);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700182 }
Craig Tiller89962082017-05-12 14:30:42 -0700183 grpc_exec_ctx_finish(&exec_ctx);
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700184}
185
186static void executor_push(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
Craig Tiller7a82afd2017-07-18 09:40:40 -0700187 grpc_error *error, bool is_short) {
Craig Tiller2f767eb2017-07-20 12:06:14 -0700188 bool retry_push;
Craig Tiller07d2fa72017-09-07 13:13:36 -0700189 if (is_short) {
190 GRPC_STATS_INC_EXECUTOR_SCHEDULED_SHORT_ITEMS(exec_ctx);
191 } else {
192 GRPC_STATS_INC_EXECUTOR_SCHEDULED_LONG_ITEMS(exec_ctx);
193 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700194 do {
195 retry_push = false;
196 size_t cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
197 if (cur_thread_count == 0) {
198 if (GRPC_TRACER_ON(executor_trace)) {
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700199#ifndef NDEBUG
Craig Tiller2f767eb2017-07-20 12:06:14 -0700200 gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p (created %s:%d) inline",
201 closure, closure->file_created, closure->line_created);
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700202#else
Craig Tiller2f767eb2017-07-20 12:06:14 -0700203 gpr_log(GPR_DEBUG, "EXECUTOR: schedule %p inline", closure);
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700204#endif
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700205 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700206 grpc_closure_list_append(&exec_ctx->closure_list, closure, error);
207 return;
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700208 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700209 thread_state *ts = (thread_state *)gpr_tls_get(&g_this_thread_state);
210 if (ts == NULL) {
211 ts = &g_thread_state[GPR_HASH_POINTER(exec_ctx, cur_thread_count)];
Craig Tiller022ad3a2017-09-07 13:01:56 -0700212 } else {
213 GRPC_STATS_INC_EXECUTOR_SCHEDULED_TO_SELF(exec_ctx);
Craig Tillerc2fb83e2017-07-18 12:38:25 -0700214 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700215 thread_state *orig_ts = ts;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700216
Craig Tiller2f767eb2017-07-20 12:06:14 -0700217 bool try_new_thread;
218 for (;;) {
219 if (GRPC_TRACER_ON(executor_trace)) {
220#ifndef NDEBUG
Craig Tiller8af33db2017-07-20 16:37:36 -0700221 gpr_log(
222 GPR_DEBUG,
223 "EXECUTOR: try to schedule %p (%s) (created %s:%d) to thread %d",
224 closure, is_short ? "short" : "long", closure->file_created,
225 closure->line_created, (int)(ts - g_thread_state));
Craig Tiller2f767eb2017-07-20 12:06:14 -0700226#else
Craig Tiller8af33db2017-07-20 16:37:36 -0700227 gpr_log(GPR_DEBUG, "EXECUTOR: try to schedule %p (%s) to thread %d",
228 closure, is_short ? "short" : "long",
229 (int)(ts - g_thread_state));
Craig Tiller2f767eb2017-07-20 12:06:14 -0700230#endif
231 }
232 gpr_mu_lock(&ts->mu);
233 if (ts->queued_long_job) {
Craig Tillerb0ce25e2017-09-08 14:42:26 -0700234 // if there's a long job queued, we never queue anything else to this
235 // queue (since long jobs can take 'infinite' time and we need to
236 // guarantee no starvation)
237 // ... spin through queues and try again
Craig Tiller2f767eb2017-07-20 12:06:14 -0700238 gpr_mu_unlock(&ts->mu);
Craig Tillera0d51852017-07-21 13:49:49 -0700239 size_t idx = (size_t)(ts - g_thread_state);
Craig Tiller18908832017-07-21 13:27:27 -0700240 ts = &g_thread_state[(idx + 1) % cur_thread_count];
Craig Tiller2f767eb2017-07-20 12:06:14 -0700241 if (ts == orig_ts) {
242 retry_push = true;
243 try_new_thread = true;
244 break;
245 }
246 continue;
247 }
248 if (grpc_closure_list_empty(ts->elems)) {
Craig Tiller022ad3a2017-09-07 13:01:56 -0700249 GRPC_STATS_INC_EXECUTOR_WAKEUP_INITIATED(exec_ctx);
Craig Tiller2f767eb2017-07-20 12:06:14 -0700250 gpr_cv_signal(&ts->cv);
251 }
252 grpc_closure_list_append(&ts->elems, closure, error);
Craig Tiller2477cf32017-09-26 12:20:35 -0700253 ts->depth++;
254 try_new_thread = ts->depth > MAX_DEPTH &&
Craig Tiller2f767eb2017-07-20 12:06:14 -0700255 cur_thread_count < g_max_threads && !ts->shutdown;
256 if (!is_short) ts->queued_long_job = true;
257 gpr_mu_unlock(&ts->mu);
258 break;
Craig Tiller3e9f98e2017-05-12 13:17:47 -0700259 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700260 if (try_new_thread && gpr_spinlock_trylock(&g_adding_thread_lock)) {
261 cur_thread_count = (size_t)gpr_atm_no_barrier_load(&g_cur_threads);
262 if (cur_thread_count < g_max_threads) {
263 gpr_atm_no_barrier_store(&g_cur_threads, cur_thread_count + 1);
264
265 gpr_thd_options opt = gpr_thd_options_default();
266 gpr_thd_options_set_joinable(&opt);
267 gpr_thd_new(&g_thread_state[cur_thread_count].id, executor_thread,
268 &g_thread_state[cur_thread_count], &opt);
269 }
270 gpr_spinlock_unlock(&g_adding_thread_lock);
271 }
Craig Tiller07d2fa72017-09-07 13:13:36 -0700272 if (retry_push) {
273 GRPC_STATS_INC_EXECUTOR_PUSH_RETRIES(exec_ctx);
274 }
Craig Tiller2f767eb2017-07-20 12:06:14 -0700275 } while (retry_push);
David Garcia Quintas4bc34632015-10-07 16:12:35 -0700276}
Craig Tiller91031da2016-12-28 15:44:25 -0800277
Craig Tiller7a82afd2017-07-18 09:40:40 -0700278static void executor_push_short(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
279 grpc_error *error) {
280 executor_push(exec_ctx, closure, error, true);
281}
282
283static void executor_push_long(grpc_exec_ctx *exec_ctx, grpc_closure *closure,
284 grpc_error *error) {
285 executor_push(exec_ctx, closure, error, false);
286}
287
288static const grpc_closure_scheduler_vtable executor_vtable_short = {
289 executor_push_short, executor_push_short, "executor"};
290static grpc_closure_scheduler executor_scheduler_short = {
291 &executor_vtable_short};
292
293static const grpc_closure_scheduler_vtable executor_vtable_long = {
294 executor_push_long, executor_push_long, "executor"};
295static grpc_closure_scheduler executor_scheduler_long = {&executor_vtable_long};
296
297grpc_closure_scheduler *grpc_executor_scheduler(
298 grpc_executor_job_length length) {
299 return length == GRPC_EXECUTOR_SHORT ? &executor_scheduler_short
300 : &executor_scheduler_long;
301}