blob: ba6c7087a920cb5339346e67853b013e1237ed45 [file] [log] [blame]
Craig Tillera26637f2016-05-02 13:36:36 -07001/*
2 *
Craig Tillerad3c8c12016-05-02 21:47:30 -07003 * Copyright 2016, Google Inc.
Craig Tillera26637f2016-05-02 13:36:36 -07004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerf7cade12016-07-07 21:41:10 -070034#include "src/core/lib/iomgr/combiner.h"
Craig Tiller5842a5b2016-05-02 12:38:57 -070035
Craig Tillera26637f2016-05-02 13:36:36 -070036#include <string.h>
37
38#include <grpc/support/alloc.h>
39#include <grpc/support/log.h>
40
Craig Tillerc69d27b2016-07-14 13:20:22 -070041#include "src/core/lib/iomgr/workqueue.h"
42#include "src/core/lib/profiling/timers.h"
43
Craig Tillere7603b82016-07-18 15:43:42 -070044int grpc_combiner_trace = 0;
45
Craig Tillerc3df7b42016-07-18 15:51:26 -070046#define GRPC_COMBINER_TRACE(fn) \
47 do { \
48 if (grpc_combiner_trace) { \
49 fn; \
50 } \
51 } while (0)
Craig Tillere7603b82016-07-18 15:43:42 -070052
Craig Tillerd80a8c92016-10-10 13:19:56 -070053#define STATE_UNORPHANED 1
54#define STATE_ELEM_COUNT_LOW_BIT 2
55
Craig Tillerf7cade12016-07-07 21:41:10 -070056struct grpc_combiner {
Craig Tillerdfd3a8f2016-08-24 09:43:45 -070057 grpc_combiner *next_combiner_on_this_exec_ctx;
Craig Tillercf600c92016-05-03 08:26:56 -070058 grpc_workqueue *optional_workqueue;
Craig Tiller91031da2016-12-28 15:44:25 -080059 grpc_closure_scheduler uncovered_scheduler;
60 grpc_closure_scheduler covered_scheduler;
61 grpc_closure_scheduler uncovered_finally_scheduler;
62 grpc_closure_scheduler covered_finally_scheduler;
Craig Tillercf600c92016-05-03 08:26:56 -070063 gpr_mpscq queue;
64 // state is:
Craig Tillerd80a8c92016-10-10 13:19:56 -070065 // lower bit - zero if orphaned (STATE_UNORPHANED)
66 // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT)
Craig Tillercf600c92016-05-03 08:26:56 -070067 gpr_atm state;
Craig Tillera7cd41c2016-08-31 12:59:24 -070068 // number of elements in the list that are covered by a poller: if >0, we can
69 // offload safely
Craig Tillerd80a8c92016-10-10 13:19:56 -070070 gpr_atm elements_covered_by_poller;
Craig Tillerdfd3a8f2016-08-24 09:43:45 -070071 bool time_to_execute_final_list;
Craig Tiller09b05fd2016-09-07 13:02:05 -070072 bool final_list_covered_by_poller;
Craig Tillera36857d2016-07-08 16:57:42 -070073 grpc_closure_list final_list;
Craig Tillerdfd3a8f2016-08-24 09:43:45 -070074 grpc_closure offload;
Craig Tillercf600c92016-05-03 08:26:56 -070075};
76
Craig Tiller91031da2016-12-28 15:44:25 -080077static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx,
78 grpc_closure *closure, grpc_error *error);
79static void combiner_exec_covered(grpc_exec_ctx *exec_ctx,
80 grpc_closure *closure, grpc_error *error);
81static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
82 grpc_closure *closure,
83 grpc_error *error);
84static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
85 grpc_closure *closure,
86 grpc_error *error);
87
88static const grpc_closure_scheduler_vtable scheduler_uncovered = {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080089 combiner_exec_uncovered, combiner_exec_uncovered,
90 "combiner:immediately:uncovered"};
Craig Tiller91031da2016-12-28 15:44:25 -080091static const grpc_closure_scheduler_vtable scheduler_covered = {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080092 combiner_exec_covered, combiner_exec_covered,
93 "combiner:immediately:covered"};
Craig Tiller91031da2016-12-28 15:44:25 -080094static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080095 combiner_finally_exec_uncovered, combiner_finally_exec_uncovered,
96 "combiner:finally:uncovered"};
Craig Tiller91031da2016-12-28 15:44:25 -080097static const grpc_closure_scheduler_vtable finally_scheduler_covered = {
Craig Tiller7c70b6c2017-01-23 07:48:42 -080098 combiner_finally_exec_covered, combiner_finally_exec_covered,
99 "combiner:finally:covered"};
Craig Tiller91031da2016-12-28 15:44:25 -0800100
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700101static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error);
102
Craig Tillera7cd41c2016-08-31 12:59:24 -0700103typedef struct {
104 grpc_error *error;
105 bool covered_by_poller;
106} error_data;
107
108static uintptr_t pack_error_data(error_data d) {
109 return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0);
110}
111
112static error_data unpack_error_data(uintptr_t p) {
113 return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1};
114}
115
Craig Tiller09b05fd2016-09-07 13:02:05 -0700116static bool is_covered_by_poller(grpc_combiner *lock) {
117 return lock->final_list_covered_by_poller ||
Craig Tillerd80a8c92016-10-10 13:19:56 -0700118 gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0;
Craig Tiller09b05fd2016-09-07 13:02:05 -0700119}
120
Craig Tillerac88ca12016-11-16 14:21:29 -0800121#define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d"
122#define IS_COVERED_BY_POLLER_ARGS(lock) \
123 (lock)->final_list_covered_by_poller, \
124 gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \
125 is_covered_by_poller((lock))
126
Craig Tillerf7cade12016-07-07 21:41:10 -0700127grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) {
128 grpc_combiner *lock = gpr_malloc(sizeof(*lock));
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700129 lock->next_combiner_on_this_exec_ctx = NULL;
130 lock->time_to_execute_final_list = false;
Craig Tiller16125a82016-07-13 16:46:22 -0700131 lock->optional_workqueue = optional_workqueue;
Craig Tillerd8a3c042016-09-09 12:42:37 -0700132 lock->final_list_covered_by_poller = false;
Craig Tiller91031da2016-12-28 15:44:25 -0800133 lock->uncovered_scheduler.vtable = &scheduler_uncovered;
134 lock->covered_scheduler.vtable = &scheduler_covered;
135 lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered;
136 lock->covered_finally_scheduler.vtable = &finally_scheduler_covered;
Craig Tillerd80a8c92016-10-10 13:19:56 -0700137 gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED);
138 gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0);
Craig Tillerad3c8c12016-05-02 21:47:30 -0700139 gpr_mpscq_init(&lock->queue);
Craig Tillera36857d2016-07-08 16:57:42 -0700140 grpc_closure_list_init(&lock->final_list);
Craig Tiller91031da2016-12-28 15:44:25 -0800141 grpc_closure_init(&lock->offload, offload, lock,
142 grpc_workqueue_scheduler(lock->optional_workqueue));
Craig Tillerc3df7b42016-07-18 15:51:26 -0700143 GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock));
Craig Tillercf600c92016-05-03 08:26:56 -0700144 return lock;
Craig Tiller5842a5b2016-05-02 12:38:57 -0700145}
146
Craig Tillere0221ff2016-07-11 15:56:08 -0700147static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
Craig Tillerc3df7b42016-07-18 15:51:26 -0700148 GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock));
Craig Tillercf600c92016-05-03 08:26:56 -0700149 GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0);
Craig Tillerad3c8c12016-05-02 21:47:30 -0700150 gpr_mpscq_destroy(&lock->queue);
Craig Tiller9dc01dd2016-07-11 16:26:34 -0700151 GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner");
Craig Tiller14e3d9b2016-05-03 08:33:56 -0700152 gpr_free(lock);
Craig Tiller5842a5b2016-05-02 12:38:57 -0700153}
154
Craig Tillere0221ff2016-07-11 15:56:08 -0700155void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
Craig Tillerd80a8c92016-10-10 13:19:56 -0700156 gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED);
Craig Tillerc3df7b42016-07-18 15:51:26 -0700157 GRPC_COMBINER_TRACE(gpr_log(
158 GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state));
Craig Tillere7603b82016-07-18 15:43:42 -0700159 if (old_state == 1) {
Craig Tillere0221ff2016-07-11 15:56:08 -0700160 really_destroy(exec_ctx, lock);
Craig Tiller5842a5b2016-05-02 12:38:57 -0700161 }
162}
163
Craig Tiller86037cd02016-09-02 19:58:43 -0700164static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx,
165 grpc_combiner *lock) {
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700166 lock->next_combiner_on_this_exec_ctx = NULL;
167 if (exec_ctx->active_combiner == NULL) {
168 exec_ctx->active_combiner = exec_ctx->last_combiner = lock;
Craig Tiller4e436852016-07-10 14:01:50 -0700169 } else {
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700170 exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock;
171 exec_ctx->last_combiner = lock;
Craig Tiller4e436852016-07-10 14:01:50 -0700172 }
Craig Tillercf600c92016-05-03 08:26:56 -0700173}
174
Craig Tiller86037cd02016-09-02 19:58:43 -0700175static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx,
176 grpc_combiner *lock) {
177 lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner;
178 exec_ctx->active_combiner = lock;
179 if (lock->next_combiner_on_this_exec_ctx == NULL) {
180 exec_ctx->last_combiner = lock;
181 }
182}
183
Craig Tiller91031da2016-12-28 15:44:25 -0800184static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock,
185 grpc_closure *cl, grpc_error *error,
186 bool covered_by_poller) {
Craig Tillerc69d27b2016-07-14 13:20:22 -0700187 GPR_TIMER_BEGIN("combiner.execute", 0);
Craig Tillerd80a8c92016-10-10 13:19:56 -0700188 gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
Craig Tillerfc2636d2016-09-12 09:57:07 -0700189 GRPC_COMBINER_TRACE(gpr_log(
190 GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock,
191 cl, covered_by_poller, last));
Craig Tillerd80a8c92016-10-10 13:19:56 -0700192 GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed
Craig Tillera7cd41c2016-08-31 12:59:24 -0700193 cl->error_data.scratch =
194 pack_error_data((error_data){error, covered_by_poller});
195 if (covered_by_poller) {
Craig Tillerd80a8c92016-10-10 13:19:56 -0700196 gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1);
Craig Tillera7cd41c2016-08-31 12:59:24 -0700197 }
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700198 gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next);
Craig Tiller9dc01dd2016-07-11 16:26:34 -0700199 if (last == 1) {
Craig Tillerd80a8c92016-10-10 13:19:56 -0700200 // first element on this list: add it to the list of combiner locks
201 // executing within this exec_ctx
Craig Tiller86037cd02016-09-02 19:58:43 -0700202 push_last_on_exec_ctx(exec_ctx, lock);
Craig Tiller5842a5b2016-05-02 12:38:57 -0700203 }
Craig Tillerc69d27b2016-07-14 13:20:22 -0700204 GPR_TIMER_END("combiner.execute", 0);
Craig Tiller5842a5b2016-05-02 12:38:57 -0700205}
Craig Tillera36857d2016-07-08 16:57:42 -0700206
Craig Tiller91031da2016-12-28 15:44:25 -0800207#define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \
208 ((grpc_combiner *)(((char *)((closure)->scheduler)) - \
209 offsetof(grpc_combiner, scheduler_name)))
210
211static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
212 grpc_error *error) {
213 combiner_exec(exec_ctx,
214 COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl,
215 error, false);
216}
217
218static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl,
219 grpc_error *error) {
220 combiner_exec(exec_ctx,
221 COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl,
222 error, true);
223}
224
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700225static void move_next(grpc_exec_ctx *exec_ctx) {
226 exec_ctx->active_combiner =
227 exec_ctx->active_combiner->next_combiner_on_this_exec_ctx;
228 if (exec_ctx->active_combiner == NULL) {
229 exec_ctx->last_combiner = NULL;
230 }
231}
232
233static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
234 grpc_combiner *lock = arg;
Craig Tiller86037cd02016-09-02 19:58:43 -0700235 push_last_on_exec_ctx(exec_ctx, lock);
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700236}
237
238static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) {
239 move_next(exec_ctx);
Craig Tillerfc2636d2016-09-12 09:57:07 -0700240 GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock,
241 lock->optional_workqueue));
Craig Tiller91031da2016-12-28 15:44:25 -0800242 grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE);
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700243}
244
245bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) {
246 GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0);
247 grpc_combiner *lock = exec_ctx->active_combiner;
248 if (lock == NULL) {
249 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
250 return false;
251 }
252
Craig Tillerfc2636d2016-09-12 09:57:07 -0700253 GRPC_COMBINER_TRACE(
254 gpr_log(GPR_DEBUG,
255 "C:%p grpc_combiner_continue_exec_ctx workqueue=%p "
Craig Tillerac88ca12016-11-16 14:21:29 -0800256 "is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT
257 " exec_ctx_ready_to_finish=%d "
Craig Tillerfc2636d2016-09-12 09:57:07 -0700258 "time_to_execute_final_list=%d",
Craig Tillerac88ca12016-11-16 14:21:29 -0800259 lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock),
Craig Tillerfc2636d2016-09-12 09:57:07 -0700260 grpc_exec_ctx_ready_to_finish(exec_ctx),
261 lock->time_to_execute_final_list));
262
Craig Tillerd8a3c042016-09-09 12:42:37 -0700263 if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) &&
264 grpc_exec_ctx_ready_to_finish(exec_ctx)) {
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700265 GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0);
Craig Tillera7cd41c2016-08-31 12:59:24 -0700266 // this execution context wants to move on, and we have a workqueue (and
267 // so can help the execution context out): schedule remaining work to be
268 // picked up on the workqueue
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700269 queue_offload(exec_ctx, lock);
270 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
271 return true;
272 }
273
274 if (!lock->time_to_execute_final_list ||
275 // peek to see if something new has shown up, and execute that with
276 // priority
277 (gpr_atm_acq_load(&lock->state) >> 1) > 1) {
278 gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue);
279 GRPC_COMBINER_TRACE(
280 gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n));
281 if (n == NULL) {
Craig Tillerd80a8c92016-10-10 13:19:56 -0700282 // queue is in an inconsistent state: use this as a cue that we should
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700283 // go off and do something else for a while (and come back later)
284 GPR_TIMER_MARK("delay_busy", 0);
Craig Tiller09b05fd2016-09-07 13:02:05 -0700285 if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) {
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700286 queue_offload(exec_ctx, lock);
287 }
288 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
289 return true;
290 }
291 GPR_TIMER_BEGIN("combiner.exec1", 0);
292 grpc_closure *cl = (grpc_closure *)n;
Craig Tillera7cd41c2016-08-31 12:59:24 -0700293 error_data err = unpack_error_data(cl->error_data.scratch);
294 cl->cb(exec_ctx, cl->cb_arg, err.error);
295 if (err.covered_by_poller) {
Craig Tillerd80a8c92016-10-10 13:19:56 -0700296 gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1);
Craig Tillera7cd41c2016-08-31 12:59:24 -0700297 }
298 GRPC_ERROR_UNREF(err.error);
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700299 GPR_TIMER_END("combiner.exec1", 0);
300 } else {
Craig Tillera7cd41c2016-08-31 12:59:24 -0700301 grpc_closure *c = lock->final_list.head;
302 GPR_ASSERT(c != NULL);
303 grpc_closure_list_init(&lock->final_list);
Craig Tiller09b05fd2016-09-07 13:02:05 -0700304 lock->final_list_covered_by_poller = false;
Craig Tillera7cd41c2016-08-31 12:59:24 -0700305 int loops = 0;
306 while (c != NULL) {
307 GPR_TIMER_BEGIN("combiner.exec_1final", 0);
308 GRPC_COMBINER_TRACE(
309 gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c));
310 grpc_closure *next = c->next_data.next;
311 grpc_error *error = c->error_data.error;
312 c->cb(exec_ctx, c->cb_arg, error);
313 GRPC_ERROR_UNREF(error);
314 c = next;
315 GPR_TIMER_END("combiner.exec_1final", 0);
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700316 }
317 }
318
319 GPR_TIMER_MARK("unref", 0);
Craig Tiller86037cd02016-09-02 19:58:43 -0700320 move_next(exec_ctx);
321 lock->time_to_execute_final_list = false;
Craig Tillerd80a8c92016-10-10 13:19:56 -0700322 gpr_atm old_state =
323 gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT);
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700324 GRPC_COMBINER_TRACE(
325 gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state));
Craig Tillerd3ee0d52016-10-10 14:09:18 -0700326// Define a macro to ease readability of the following switch statement.
327#define OLD_STATE_WAS(orphaned, elem_count) \
328 (((orphaned) ? 0 : STATE_UNORPHANED) | \
329 ((elem_count)*STATE_ELEM_COUNT_LOW_BIT))
330 // Depending on what the previous state was, we need to perform different
331 // actions.
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700332 switch (old_state) {
Craig Tiller56f21aa2016-09-02 09:29:43 -0700333 default:
334 // we have multiple queued work items: just continue executing them
335 break;
Craig Tillerd3ee0d52016-10-10 14:09:18 -0700336 case OLD_STATE_WAS(false, 2):
337 case OLD_STATE_WAS(true, 2):
Craig Tillerd80a8c92016-10-10 13:19:56 -0700338 // we're down to one queued item: if it's the final list we should do that
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700339 if (!grpc_closure_list_empty(lock->final_list)) {
340 lock->time_to_execute_final_list = true;
341 }
342 break;
Craig Tillerd3ee0d52016-10-10 14:09:18 -0700343 case OLD_STATE_WAS(false, 1):
Craig Tillerd80a8c92016-10-10 13:19:56 -0700344 // had one count, one unorphaned --> unlocked unorphaned
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700345 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
346 return true;
Craig Tillerd3ee0d52016-10-10 14:09:18 -0700347 case OLD_STATE_WAS(true, 1):
Craig Tillerd80a8c92016-10-10 13:19:56 -0700348 // and one count, one orphaned --> unlocked and orphaned
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700349 really_destroy(exec_ctx, lock);
350 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
351 return true;
Craig Tillerd3ee0d52016-10-10 14:09:18 -0700352 case OLD_STATE_WAS(false, 0):
353 case OLD_STATE_WAS(true, 0):
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700354 // these values are illegal - representing an already unlocked or
355 // deleted lock
356 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
357 GPR_UNREACHABLE_CODE(return true);
358 }
Craig Tiller86037cd02016-09-02 19:58:43 -0700359 push_first_on_exec_ctx(exec_ctx, lock);
Craig Tillerdfd3a8f2016-08-24 09:43:45 -0700360 GPR_TIMER_END("combiner.continue_exec_ctx", 0);
361 return true;
362}
363
Craig Tillere0221ff2016-07-11 15:56:08 -0700364static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
Craig Tiller91031da2016-12-28 15:44:25 -0800365 grpc_error *error);
Craig Tillere0221ff2016-07-11 15:56:08 -0700366
Craig Tiller91031da2016-12-28 15:44:25 -0800367static void combiner_execute_finally(grpc_exec_ctx *exec_ctx,
368 grpc_combiner *lock, grpc_closure *closure,
369 grpc_error *error,
370 bool covered_by_poller) {
Craig Tillerfc2636d2016-09-12 09:57:07 -0700371 GRPC_COMBINER_TRACE(gpr_log(
372 GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock,
373 closure, exec_ctx->active_combiner, covered_by_poller));
Craig Tillerc69d27b2016-07-14 13:20:22 -0700374 GPR_TIMER_BEGIN("combiner.execute_finally", 0);
Craig Tillere0221ff2016-07-11 15:56:08 -0700375 if (exec_ctx->active_combiner != lock) {
Craig Tillerc69d27b2016-07-14 13:20:22 -0700376 GPR_TIMER_MARK("slowpath", 0);
Craig Tiller91031da2016-12-28 15:44:25 -0800377 grpc_closure_sched(
378 exec_ctx, grpc_closure_create(enqueue_finally, closure,
379 grpc_combiner_scheduler(lock, false)),
380 error);
Craig Tillerc69d27b2016-07-14 13:20:22 -0700381 GPR_TIMER_END("combiner.execute_finally", 0);
Craig Tillere0221ff2016-07-11 15:56:08 -0700382 return;
383 }
384
Craig Tillera36857d2016-07-08 16:57:42 -0700385 if (grpc_closure_list_empty(lock->final_list)) {
Craig Tillerd80a8c92016-10-10 13:19:56 -0700386 gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT);
Craig Tillera36857d2016-07-08 16:57:42 -0700387 }
Craig Tiller09b05fd2016-09-07 13:02:05 -0700388 if (covered_by_poller) {
389 lock->final_list_covered_by_poller = true;
390 }
Craig Tillera36857d2016-07-08 16:57:42 -0700391 grpc_closure_list_append(&lock->final_list, closure, error);
Craig Tillere7603b82016-07-18 15:43:42 -0700392 GPR_TIMER_END("combiner.execute_finally", 0);
Craig Tillera36857d2016-07-08 16:57:42 -0700393}
Craig Tiller91031da2016-12-28 15:44:25 -0800394
395static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure,
396 grpc_error *error) {
397 combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure,
398 GRPC_ERROR_REF(error), false);
399}
400
401static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx,
402 grpc_closure *cl,
403 grpc_error *error) {
404 combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(
405 cl, uncovered_finally_scheduler),
406 cl, error, false);
407}
408
409static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx,
410 grpc_closure *cl, grpc_error *error) {
411 combiner_execute_finally(
412 exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler),
413 cl, error, true);
414}
415
416grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner,
417 bool covered_by_poller) {
418 return covered_by_poller ? &combiner->covered_scheduler
419 : &combiner->uncovered_scheduler;
420}
421
422grpc_closure_scheduler *grpc_combiner_finally_scheduler(
423 grpc_combiner *combiner, bool covered_by_poller) {
424 return covered_by_poller ? &combiner->covered_finally_scheduler
425 : &combiner->uncovered_finally_scheduler;
426}