Craig Tiller | a26637f | 2016-05-02 13:36:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | ad3c8c1 | 2016-05-02 21:47:30 -0700 | [diff] [blame] | 3 | * Copyright 2016, Google Inc. |
Craig Tiller | a26637f | 2016-05-02 13:36:36 -0700 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
Craig Tiller | f7cade1 | 2016-07-07 21:41:10 -0700 | [diff] [blame] | 34 | #include "src/core/lib/iomgr/combiner.h" |
Craig Tiller | 5842a5b | 2016-05-02 12:38:57 -0700 | [diff] [blame] | 35 | |
Craig Tiller | a26637f | 2016-05-02 13:36:36 -0700 | [diff] [blame] | 36 | #include <string.h> |
| 37 | |
| 38 | #include <grpc/support/alloc.h> |
| 39 | #include <grpc/support/log.h> |
| 40 | |
Craig Tiller | c69d27b | 2016-07-14 13:20:22 -0700 | [diff] [blame] | 41 | #include "src/core/lib/iomgr/workqueue.h" |
| 42 | #include "src/core/lib/profiling/timers.h" |
| 43 | |
Craig Tiller | e7603b8 | 2016-07-18 15:43:42 -0700 | [diff] [blame] | 44 | int grpc_combiner_trace = 0; |
| 45 | |
Craig Tiller | c3df7b4 | 2016-07-18 15:51:26 -0700 | [diff] [blame] | 46 | #define GRPC_COMBINER_TRACE(fn) \ |
| 47 | do { \ |
| 48 | if (grpc_combiner_trace) { \ |
| 49 | fn; \ |
| 50 | } \ |
| 51 | } while (0) |
Craig Tiller | e7603b8 | 2016-07-18 15:43:42 -0700 | [diff] [blame] | 52 | |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 53 | #define STATE_UNORPHANED 1 |
| 54 | #define STATE_ELEM_COUNT_LOW_BIT 2 |
| 55 | |
Craig Tiller | f7cade1 | 2016-07-07 21:41:10 -0700 | [diff] [blame] | 56 | struct grpc_combiner { |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 57 | grpc_combiner *next_combiner_on_this_exec_ctx; |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 58 | grpc_workqueue *optional_workqueue; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 59 | grpc_closure_scheduler uncovered_scheduler; |
| 60 | grpc_closure_scheduler covered_scheduler; |
| 61 | grpc_closure_scheduler uncovered_finally_scheduler; |
| 62 | grpc_closure_scheduler covered_finally_scheduler; |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 63 | gpr_mpscq queue; |
| 64 | // state is: |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 65 | // lower bit - zero if orphaned (STATE_UNORPHANED) |
| 66 | // other bits - number of items queued on the lock (STATE_ELEM_COUNT_LOW_BIT) |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 67 | gpr_atm state; |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 68 | // number of elements in the list that are covered by a poller: if >0, we can |
| 69 | // offload safely |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 70 | gpr_atm elements_covered_by_poller; |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 71 | bool time_to_execute_final_list; |
Craig Tiller | 09b05fd | 2016-09-07 13:02:05 -0700 | [diff] [blame] | 72 | bool final_list_covered_by_poller; |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 73 | grpc_closure_list final_list; |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 74 | grpc_closure offload; |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 75 | }; |
| 76 | |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 77 | static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, |
| 78 | grpc_closure *closure, grpc_error *error); |
| 79 | static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, |
| 80 | grpc_closure *closure, grpc_error *error); |
| 81 | static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx, |
| 82 | grpc_closure *closure, |
| 83 | grpc_error *error); |
| 84 | static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, |
| 85 | grpc_closure *closure, |
| 86 | grpc_error *error); |
| 87 | |
| 88 | static const grpc_closure_scheduler_vtable scheduler_uncovered = { |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 89 | combiner_exec_uncovered, combiner_exec_uncovered, |
| 90 | "combiner:immediately:uncovered"}; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 91 | static const grpc_closure_scheduler_vtable scheduler_covered = { |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 92 | combiner_exec_covered, combiner_exec_covered, |
| 93 | "combiner:immediately:covered"}; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 94 | static const grpc_closure_scheduler_vtable finally_scheduler_uncovered = { |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 95 | combiner_finally_exec_uncovered, combiner_finally_exec_uncovered, |
| 96 | "combiner:finally:uncovered"}; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 97 | static const grpc_closure_scheduler_vtable finally_scheduler_covered = { |
Craig Tiller | 7c70b6c | 2017-01-23 07:48:42 -0800 | [diff] [blame] | 98 | combiner_finally_exec_covered, combiner_finally_exec_covered, |
| 99 | "combiner:finally:covered"}; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 100 | |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 101 | static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error); |
| 102 | |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 103 | typedef struct { |
| 104 | grpc_error *error; |
| 105 | bool covered_by_poller; |
| 106 | } error_data; |
| 107 | |
| 108 | static uintptr_t pack_error_data(error_data d) { |
| 109 | return ((uintptr_t)d.error) | (d.covered_by_poller ? 1 : 0); |
| 110 | } |
| 111 | |
| 112 | static error_data unpack_error_data(uintptr_t p) { |
| 113 | return (error_data){(grpc_error *)(p & ~(uintptr_t)1), p & 1}; |
| 114 | } |
| 115 | |
Craig Tiller | 09b05fd | 2016-09-07 13:02:05 -0700 | [diff] [blame] | 116 | static bool is_covered_by_poller(grpc_combiner *lock) { |
| 117 | return lock->final_list_covered_by_poller || |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 118 | gpr_atm_acq_load(&lock->elements_covered_by_poller) > 0; |
Craig Tiller | 09b05fd | 2016-09-07 13:02:05 -0700 | [diff] [blame] | 119 | } |
| 120 | |
Craig Tiller | ac88ca1 | 2016-11-16 14:21:29 -0800 | [diff] [blame] | 121 | #define IS_COVERED_BY_POLLER_FMT "(final=%d elems=%" PRIdPTR ")->%d" |
| 122 | #define IS_COVERED_BY_POLLER_ARGS(lock) \ |
| 123 | (lock)->final_list_covered_by_poller, \ |
| 124 | gpr_atm_acq_load(&(lock)->elements_covered_by_poller), \ |
| 125 | is_covered_by_poller((lock)) |
| 126 | |
Craig Tiller | f7cade1 | 2016-07-07 21:41:10 -0700 | [diff] [blame] | 127 | grpc_combiner *grpc_combiner_create(grpc_workqueue *optional_workqueue) { |
| 128 | grpc_combiner *lock = gpr_malloc(sizeof(*lock)); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 129 | lock->next_combiner_on_this_exec_ctx = NULL; |
| 130 | lock->time_to_execute_final_list = false; |
Craig Tiller | 16125a8 | 2016-07-13 16:46:22 -0700 | [diff] [blame] | 131 | lock->optional_workqueue = optional_workqueue; |
Craig Tiller | d8a3c04 | 2016-09-09 12:42:37 -0700 | [diff] [blame] | 132 | lock->final_list_covered_by_poller = false; |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 133 | lock->uncovered_scheduler.vtable = &scheduler_uncovered; |
| 134 | lock->covered_scheduler.vtable = &scheduler_covered; |
| 135 | lock->uncovered_finally_scheduler.vtable = &finally_scheduler_uncovered; |
| 136 | lock->covered_finally_scheduler.vtable = &finally_scheduler_covered; |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 137 | gpr_atm_no_barrier_store(&lock->state, STATE_UNORPHANED); |
| 138 | gpr_atm_no_barrier_store(&lock->elements_covered_by_poller, 0); |
Craig Tiller | ad3c8c1 | 2016-05-02 21:47:30 -0700 | [diff] [blame] | 139 | gpr_mpscq_init(&lock->queue); |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 140 | grpc_closure_list_init(&lock->final_list); |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 141 | grpc_closure_init(&lock->offload, offload, lock, |
| 142 | grpc_workqueue_scheduler(lock->optional_workqueue)); |
Craig Tiller | c3df7b4 | 2016-07-18 15:51:26 -0700 | [diff] [blame] | 143 | GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p create", lock)); |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 144 | return lock; |
Craig Tiller | 5842a5b | 2016-05-02 12:38:57 -0700 | [diff] [blame] | 145 | } |
| 146 | |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 147 | static void really_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { |
Craig Tiller | c3df7b4 | 2016-07-18 15:51:26 -0700 | [diff] [blame] | 148 | GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p really_destroy", lock)); |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 149 | GPR_ASSERT(gpr_atm_no_barrier_load(&lock->state) == 0); |
Craig Tiller | ad3c8c1 | 2016-05-02 21:47:30 -0700 | [diff] [blame] | 150 | gpr_mpscq_destroy(&lock->queue); |
Craig Tiller | 9dc01dd | 2016-07-11 16:26:34 -0700 | [diff] [blame] | 151 | GRPC_WORKQUEUE_UNREF(exec_ctx, lock->optional_workqueue, "combiner"); |
Craig Tiller | 14e3d9b | 2016-05-03 08:33:56 -0700 | [diff] [blame] | 152 | gpr_free(lock); |
Craig Tiller | 5842a5b | 2016-05-02 12:38:57 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 155 | void grpc_combiner_destroy(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 156 | gpr_atm old_state = gpr_atm_full_fetch_add(&lock->state, -STATE_UNORPHANED); |
Craig Tiller | c3df7b4 | 2016-07-18 15:51:26 -0700 | [diff] [blame] | 157 | GRPC_COMBINER_TRACE(gpr_log( |
| 158 | GPR_DEBUG, "C:%p really_destroy old_state=%" PRIdPTR, lock, old_state)); |
Craig Tiller | e7603b8 | 2016-07-18 15:43:42 -0700 | [diff] [blame] | 159 | if (old_state == 1) { |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 160 | really_destroy(exec_ctx, lock); |
Craig Tiller | 5842a5b | 2016-05-02 12:38:57 -0700 | [diff] [blame] | 161 | } |
| 162 | } |
| 163 | |
Craig Tiller | 86037cd0 | 2016-09-02 19:58:43 -0700 | [diff] [blame] | 164 | static void push_last_on_exec_ctx(grpc_exec_ctx *exec_ctx, |
| 165 | grpc_combiner *lock) { |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 166 | lock->next_combiner_on_this_exec_ctx = NULL; |
| 167 | if (exec_ctx->active_combiner == NULL) { |
| 168 | exec_ctx->active_combiner = exec_ctx->last_combiner = lock; |
Craig Tiller | 4e43685 | 2016-07-10 14:01:50 -0700 | [diff] [blame] | 169 | } else { |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 170 | exec_ctx->last_combiner->next_combiner_on_this_exec_ctx = lock; |
| 171 | exec_ctx->last_combiner = lock; |
Craig Tiller | 4e43685 | 2016-07-10 14:01:50 -0700 | [diff] [blame] | 172 | } |
Craig Tiller | cf600c9 | 2016-05-03 08:26:56 -0700 | [diff] [blame] | 173 | } |
| 174 | |
Craig Tiller | 86037cd0 | 2016-09-02 19:58:43 -0700 | [diff] [blame] | 175 | static void push_first_on_exec_ctx(grpc_exec_ctx *exec_ctx, |
| 176 | grpc_combiner *lock) { |
| 177 | lock->next_combiner_on_this_exec_ctx = exec_ctx->active_combiner; |
| 178 | exec_ctx->active_combiner = lock; |
| 179 | if (lock->next_combiner_on_this_exec_ctx == NULL) { |
| 180 | exec_ctx->last_combiner = lock; |
| 181 | } |
| 182 | } |
| 183 | |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 184 | static void combiner_exec(grpc_exec_ctx *exec_ctx, grpc_combiner *lock, |
| 185 | grpc_closure *cl, grpc_error *error, |
| 186 | bool covered_by_poller) { |
Craig Tiller | c69d27b | 2016-07-14 13:20:22 -0700 | [diff] [blame] | 187 | GPR_TIMER_BEGIN("combiner.execute", 0); |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 188 | gpr_atm last = gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); |
Craig Tiller | fc2636d | 2016-09-12 09:57:07 -0700 | [diff] [blame] | 189 | GRPC_COMBINER_TRACE(gpr_log( |
| 190 | GPR_DEBUG, "C:%p grpc_combiner_execute c=%p cov=%d last=%" PRIdPTR, lock, |
| 191 | cl, covered_by_poller, last)); |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 192 | GPR_ASSERT(last & STATE_UNORPHANED); // ensure lock has not been destroyed |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 193 | cl->error_data.scratch = |
| 194 | pack_error_data((error_data){error, covered_by_poller}); |
| 195 | if (covered_by_poller) { |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 196 | gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, 1); |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 197 | } |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 198 | gpr_mpscq_push(&lock->queue, &cl->next_data.atm_next); |
Craig Tiller | 9dc01dd | 2016-07-11 16:26:34 -0700 | [diff] [blame] | 199 | if (last == 1) { |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 200 | // first element on this list: add it to the list of combiner locks |
| 201 | // executing within this exec_ctx |
Craig Tiller | 86037cd0 | 2016-09-02 19:58:43 -0700 | [diff] [blame] | 202 | push_last_on_exec_ctx(exec_ctx, lock); |
Craig Tiller | 5842a5b | 2016-05-02 12:38:57 -0700 | [diff] [blame] | 203 | } |
Craig Tiller | c69d27b | 2016-07-14 13:20:22 -0700 | [diff] [blame] | 204 | GPR_TIMER_END("combiner.execute", 0); |
Craig Tiller | 5842a5b | 2016-05-02 12:38:57 -0700 | [diff] [blame] | 205 | } |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 206 | |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 207 | #define COMBINER_FROM_CLOSURE_SCHEDULER(closure, scheduler_name) \ |
| 208 | ((grpc_combiner *)(((char *)((closure)->scheduler)) - \ |
| 209 | offsetof(grpc_combiner, scheduler_name))) |
| 210 | |
| 211 | static void combiner_exec_uncovered(grpc_exec_ctx *exec_ctx, grpc_closure *cl, |
| 212 | grpc_error *error) { |
| 213 | combiner_exec(exec_ctx, |
| 214 | COMBINER_FROM_CLOSURE_SCHEDULER(cl, uncovered_scheduler), cl, |
| 215 | error, false); |
| 216 | } |
| 217 | |
| 218 | static void combiner_exec_covered(grpc_exec_ctx *exec_ctx, grpc_closure *cl, |
| 219 | grpc_error *error) { |
| 220 | combiner_exec(exec_ctx, |
| 221 | COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_scheduler), cl, |
| 222 | error, true); |
| 223 | } |
| 224 | |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 225 | static void move_next(grpc_exec_ctx *exec_ctx) { |
| 226 | exec_ctx->active_combiner = |
| 227 | exec_ctx->active_combiner->next_combiner_on_this_exec_ctx; |
| 228 | if (exec_ctx->active_combiner == NULL) { |
| 229 | exec_ctx->last_combiner = NULL; |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | static void offload(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) { |
| 234 | grpc_combiner *lock = arg; |
Craig Tiller | 86037cd0 | 2016-09-02 19:58:43 -0700 | [diff] [blame] | 235 | push_last_on_exec_ctx(exec_ctx, lock); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | static void queue_offload(grpc_exec_ctx *exec_ctx, grpc_combiner *lock) { |
| 239 | move_next(exec_ctx); |
Craig Tiller | fc2636d | 2016-09-12 09:57:07 -0700 | [diff] [blame] | 240 | GRPC_COMBINER_TRACE(gpr_log(GPR_DEBUG, "C:%p queue_offload --> %p", lock, |
| 241 | lock->optional_workqueue)); |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 242 | grpc_closure_sched(exec_ctx, &lock->offload, GRPC_ERROR_NONE); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | bool grpc_combiner_continue_exec_ctx(grpc_exec_ctx *exec_ctx) { |
| 246 | GPR_TIMER_BEGIN("combiner.continue_exec_ctx", 0); |
| 247 | grpc_combiner *lock = exec_ctx->active_combiner; |
| 248 | if (lock == NULL) { |
| 249 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 250 | return false; |
| 251 | } |
| 252 | |
Craig Tiller | fc2636d | 2016-09-12 09:57:07 -0700 | [diff] [blame] | 253 | GRPC_COMBINER_TRACE( |
| 254 | gpr_log(GPR_DEBUG, |
| 255 | "C:%p grpc_combiner_continue_exec_ctx workqueue=%p " |
Craig Tiller | ac88ca1 | 2016-11-16 14:21:29 -0800 | [diff] [blame] | 256 | "is_covered_by_poller=" IS_COVERED_BY_POLLER_FMT |
| 257 | " exec_ctx_ready_to_finish=%d " |
Craig Tiller | fc2636d | 2016-09-12 09:57:07 -0700 | [diff] [blame] | 258 | "time_to_execute_final_list=%d", |
Craig Tiller | ac88ca1 | 2016-11-16 14:21:29 -0800 | [diff] [blame] | 259 | lock, lock->optional_workqueue, IS_COVERED_BY_POLLER_ARGS(lock), |
Craig Tiller | fc2636d | 2016-09-12 09:57:07 -0700 | [diff] [blame] | 260 | grpc_exec_ctx_ready_to_finish(exec_ctx), |
| 261 | lock->time_to_execute_final_list)); |
| 262 | |
Craig Tiller | d8a3c04 | 2016-09-09 12:42:37 -0700 | [diff] [blame] | 263 | if (lock->optional_workqueue != NULL && is_covered_by_poller(lock) && |
| 264 | grpc_exec_ctx_ready_to_finish(exec_ctx)) { |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 265 | GPR_TIMER_MARK("offload_from_finished_exec_ctx", 0); |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 266 | // this execution context wants to move on, and we have a workqueue (and |
| 267 | // so can help the execution context out): schedule remaining work to be |
| 268 | // picked up on the workqueue |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 269 | queue_offload(exec_ctx, lock); |
| 270 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 271 | return true; |
| 272 | } |
| 273 | |
| 274 | if (!lock->time_to_execute_final_list || |
| 275 | // peek to see if something new has shown up, and execute that with |
| 276 | // priority |
| 277 | (gpr_atm_acq_load(&lock->state) >> 1) > 1) { |
| 278 | gpr_mpscq_node *n = gpr_mpscq_pop(&lock->queue); |
| 279 | GRPC_COMBINER_TRACE( |
| 280 | gpr_log(GPR_DEBUG, "C:%p maybe_finish_one n=%p", lock, n)); |
| 281 | if (n == NULL) { |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 282 | // queue is in an inconsistent state: use this as a cue that we should |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 283 | // go off and do something else for a while (and come back later) |
| 284 | GPR_TIMER_MARK("delay_busy", 0); |
Craig Tiller | 09b05fd | 2016-09-07 13:02:05 -0700 | [diff] [blame] | 285 | if (lock->optional_workqueue != NULL && is_covered_by_poller(lock)) { |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 286 | queue_offload(exec_ctx, lock); |
| 287 | } |
| 288 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 289 | return true; |
| 290 | } |
| 291 | GPR_TIMER_BEGIN("combiner.exec1", 0); |
| 292 | grpc_closure *cl = (grpc_closure *)n; |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 293 | error_data err = unpack_error_data(cl->error_data.scratch); |
| 294 | cl->cb(exec_ctx, cl->cb_arg, err.error); |
| 295 | if (err.covered_by_poller) { |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 296 | gpr_atm_no_barrier_fetch_add(&lock->elements_covered_by_poller, -1); |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 297 | } |
| 298 | GRPC_ERROR_UNREF(err.error); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 299 | GPR_TIMER_END("combiner.exec1", 0); |
| 300 | } else { |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 301 | grpc_closure *c = lock->final_list.head; |
| 302 | GPR_ASSERT(c != NULL); |
| 303 | grpc_closure_list_init(&lock->final_list); |
Craig Tiller | 09b05fd | 2016-09-07 13:02:05 -0700 | [diff] [blame] | 304 | lock->final_list_covered_by_poller = false; |
Craig Tiller | a7cd41c | 2016-08-31 12:59:24 -0700 | [diff] [blame] | 305 | int loops = 0; |
| 306 | while (c != NULL) { |
| 307 | GPR_TIMER_BEGIN("combiner.exec_1final", 0); |
| 308 | GRPC_COMBINER_TRACE( |
| 309 | gpr_log(GPR_DEBUG, "C:%p execute_final[%d] c=%p", lock, loops, c)); |
| 310 | grpc_closure *next = c->next_data.next; |
| 311 | grpc_error *error = c->error_data.error; |
| 312 | c->cb(exec_ctx, c->cb_arg, error); |
| 313 | GRPC_ERROR_UNREF(error); |
| 314 | c = next; |
| 315 | GPR_TIMER_END("combiner.exec_1final", 0); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 316 | } |
| 317 | } |
| 318 | |
| 319 | GPR_TIMER_MARK("unref", 0); |
Craig Tiller | 86037cd0 | 2016-09-02 19:58:43 -0700 | [diff] [blame] | 320 | move_next(exec_ctx); |
| 321 | lock->time_to_execute_final_list = false; |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 322 | gpr_atm old_state = |
| 323 | gpr_atm_full_fetch_add(&lock->state, -STATE_ELEM_COUNT_LOW_BIT); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 324 | GRPC_COMBINER_TRACE( |
| 325 | gpr_log(GPR_DEBUG, "C:%p finish old_state=%" PRIdPTR, lock, old_state)); |
Craig Tiller | d3ee0d5 | 2016-10-10 14:09:18 -0700 | [diff] [blame] | 326 | // Define a macro to ease readability of the following switch statement. |
| 327 | #define OLD_STATE_WAS(orphaned, elem_count) \ |
| 328 | (((orphaned) ? 0 : STATE_UNORPHANED) | \ |
| 329 | ((elem_count)*STATE_ELEM_COUNT_LOW_BIT)) |
| 330 | // Depending on what the previous state was, we need to perform different |
| 331 | // actions. |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 332 | switch (old_state) { |
Craig Tiller | 56f21aa | 2016-09-02 09:29:43 -0700 | [diff] [blame] | 333 | default: |
| 334 | // we have multiple queued work items: just continue executing them |
| 335 | break; |
Craig Tiller | d3ee0d5 | 2016-10-10 14:09:18 -0700 | [diff] [blame] | 336 | case OLD_STATE_WAS(false, 2): |
| 337 | case OLD_STATE_WAS(true, 2): |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 338 | // we're down to one queued item: if it's the final list we should do that |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 339 | if (!grpc_closure_list_empty(lock->final_list)) { |
| 340 | lock->time_to_execute_final_list = true; |
| 341 | } |
| 342 | break; |
Craig Tiller | d3ee0d5 | 2016-10-10 14:09:18 -0700 | [diff] [blame] | 343 | case OLD_STATE_WAS(false, 1): |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 344 | // had one count, one unorphaned --> unlocked unorphaned |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 345 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 346 | return true; |
Craig Tiller | d3ee0d5 | 2016-10-10 14:09:18 -0700 | [diff] [blame] | 347 | case OLD_STATE_WAS(true, 1): |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 348 | // and one count, one orphaned --> unlocked and orphaned |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 349 | really_destroy(exec_ctx, lock); |
| 350 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 351 | return true; |
Craig Tiller | d3ee0d5 | 2016-10-10 14:09:18 -0700 | [diff] [blame] | 352 | case OLD_STATE_WAS(false, 0): |
| 353 | case OLD_STATE_WAS(true, 0): |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 354 | // these values are illegal - representing an already unlocked or |
| 355 | // deleted lock |
| 356 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 357 | GPR_UNREACHABLE_CODE(return true); |
| 358 | } |
Craig Tiller | 86037cd0 | 2016-09-02 19:58:43 -0700 | [diff] [blame] | 359 | push_first_on_exec_ctx(exec_ctx, lock); |
Craig Tiller | dfd3a8f | 2016-08-24 09:43:45 -0700 | [diff] [blame] | 360 | GPR_TIMER_END("combiner.continue_exec_ctx", 0); |
| 361 | return true; |
| 362 | } |
| 363 | |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 364 | static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 365 | grpc_error *error); |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 366 | |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 367 | static void combiner_execute_finally(grpc_exec_ctx *exec_ctx, |
| 368 | grpc_combiner *lock, grpc_closure *closure, |
| 369 | grpc_error *error, |
| 370 | bool covered_by_poller) { |
Craig Tiller | fc2636d | 2016-09-12 09:57:07 -0700 | [diff] [blame] | 371 | GRPC_COMBINER_TRACE(gpr_log( |
| 372 | GPR_DEBUG, "C:%p grpc_combiner_execute_finally c=%p; ac=%p; cov=%d", lock, |
| 373 | closure, exec_ctx->active_combiner, covered_by_poller)); |
Craig Tiller | c69d27b | 2016-07-14 13:20:22 -0700 | [diff] [blame] | 374 | GPR_TIMER_BEGIN("combiner.execute_finally", 0); |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 375 | if (exec_ctx->active_combiner != lock) { |
Craig Tiller | c69d27b | 2016-07-14 13:20:22 -0700 | [diff] [blame] | 376 | GPR_TIMER_MARK("slowpath", 0); |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 377 | grpc_closure_sched( |
| 378 | exec_ctx, grpc_closure_create(enqueue_finally, closure, |
| 379 | grpc_combiner_scheduler(lock, false)), |
| 380 | error); |
Craig Tiller | c69d27b | 2016-07-14 13:20:22 -0700 | [diff] [blame] | 381 | GPR_TIMER_END("combiner.execute_finally", 0); |
Craig Tiller | e0221ff | 2016-07-11 15:56:08 -0700 | [diff] [blame] | 382 | return; |
| 383 | } |
| 384 | |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 385 | if (grpc_closure_list_empty(lock->final_list)) { |
Craig Tiller | d80a8c9 | 2016-10-10 13:19:56 -0700 | [diff] [blame] | 386 | gpr_atm_full_fetch_add(&lock->state, STATE_ELEM_COUNT_LOW_BIT); |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 387 | } |
Craig Tiller | 09b05fd | 2016-09-07 13:02:05 -0700 | [diff] [blame] | 388 | if (covered_by_poller) { |
| 389 | lock->final_list_covered_by_poller = true; |
| 390 | } |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 391 | grpc_closure_list_append(&lock->final_list, closure, error); |
Craig Tiller | e7603b8 | 2016-07-18 15:43:42 -0700 | [diff] [blame] | 392 | GPR_TIMER_END("combiner.execute_finally", 0); |
Craig Tiller | a36857d | 2016-07-08 16:57:42 -0700 | [diff] [blame] | 393 | } |
Craig Tiller | 91031da | 2016-12-28 15:44:25 -0800 | [diff] [blame] | 394 | |
| 395 | static void enqueue_finally(grpc_exec_ctx *exec_ctx, void *closure, |
| 396 | grpc_error *error) { |
| 397 | combiner_execute_finally(exec_ctx, exec_ctx->active_combiner, closure, |
| 398 | GRPC_ERROR_REF(error), false); |
| 399 | } |
| 400 | |
| 401 | static void combiner_finally_exec_uncovered(grpc_exec_ctx *exec_ctx, |
| 402 | grpc_closure *cl, |
| 403 | grpc_error *error) { |
| 404 | combiner_execute_finally(exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER( |
| 405 | cl, uncovered_finally_scheduler), |
| 406 | cl, error, false); |
| 407 | } |
| 408 | |
| 409 | static void combiner_finally_exec_covered(grpc_exec_ctx *exec_ctx, |
| 410 | grpc_closure *cl, grpc_error *error) { |
| 411 | combiner_execute_finally( |
| 412 | exec_ctx, COMBINER_FROM_CLOSURE_SCHEDULER(cl, covered_finally_scheduler), |
| 413 | cl, error, true); |
| 414 | } |
| 415 | |
| 416 | grpc_closure_scheduler *grpc_combiner_scheduler(grpc_combiner *combiner, |
| 417 | bool covered_by_poller) { |
| 418 | return covered_by_poller ? &combiner->covered_scheduler |
| 419 | : &combiner->uncovered_scheduler; |
| 420 | } |
| 421 | |
| 422 | grpc_closure_scheduler *grpc_combiner_finally_scheduler( |
| 423 | grpc_combiner *combiner, bool covered_by_poller) { |
| 424 | return covered_by_poller ? &combiner->covered_finally_scheduler |
| 425 | : &combiner->uncovered_finally_scheduler; |
| 426 | } |