Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 0605995 | 2015-02-18 08:34:56 -0800 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
| 34 | #include "src/core/surface/completion_queue.h" |
| 35 | |
| 36 | #include <stdio.h> |
| 37 | #include <string.h> |
| 38 | |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 39 | #include "src/core/iomgr/pollset.h" |
Craig Tiller | 485d776 | 2015-01-23 12:54:05 -0800 | [diff] [blame] | 40 | #include "src/core/support/string.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 41 | #include "src/core/surface/call.h" |
| 42 | #include "src/core/surface/event_string.h" |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 43 | #include "src/core/surface/surface_trace.h" |
| 44 | #include <grpc/support/alloc.h> |
| 45 | #include <grpc/support/atm.h> |
| 46 | #include <grpc/support/log.h> |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 47 | |
| 48 | #define NUM_TAG_BUCKETS 31 |
| 49 | |
| 50 | /* A single event: extends grpc_event to form a linked list with a destruction |
| 51 | function (on_finish) that is hidden from outside this module */ |
| 52 | typedef struct event { |
| 53 | grpc_event base; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 54 | struct event *queue_next; |
| 55 | struct event *queue_prev; |
| 56 | struct event *bucket_next; |
| 57 | struct event *bucket_prev; |
| 58 | } event; |
| 59 | |
| 60 | /* Completion queue structure */ |
| 61 | struct grpc_completion_queue { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 62 | /* When refs drops to zero, we are in shutdown mode, and will be destroyable |
| 63 | once all queued events are drained */ |
| 64 | gpr_refcount refs; |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 65 | /* Once owning_refs drops to zero, we will destroy the cq */ |
| 66 | gpr_refcount owning_refs; |
ctiller | d79b486 | 2014-12-17 16:36:59 -0800 | [diff] [blame] | 67 | /* the set of low level i/o things that concern this cq */ |
| 68 | grpc_pollset pollset; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 69 | /* 0 initially, 1 once we've begun shutting down */ |
| 70 | int shutdown; |
Craig Tiller | f5fd4ba | 2015-03-02 18:01:21 +0000 | [diff] [blame] | 71 | int shutdown_called; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 72 | /* Head of a linked list of queued events (prev points to the last element) */ |
| 73 | event *queue; |
| 74 | /* Fixed size chained hash table of events for pluck() */ |
| 75 | event *buckets[NUM_TAG_BUCKETS]; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 76 | }; |
| 77 | |
Craig Tiller | 32946d3 | 2015-01-15 11:37:30 -0800 | [diff] [blame] | 78 | grpc_completion_queue *grpc_completion_queue_create(void) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 79 | grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue)); |
| 80 | memset(cc, 0, sizeof(*cc)); |
| 81 | /* Initial ref is dropped by grpc_completion_queue_shutdown */ |
| 82 | gpr_ref_init(&cc->refs, 1); |
Craig Tiller | 70730b4 | 2015-05-22 14:42:38 -0700 | [diff] [blame] | 83 | /* One for destroy(), one for pollset_shutdown */ |
| 84 | gpr_ref_init(&cc->owning_refs, 2); |
ctiller | d79b486 | 2014-12-17 16:36:59 -0800 | [diff] [blame] | 85 | grpc_pollset_init(&cc->pollset); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 86 | return cc; |
| 87 | } |
| 88 | |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 89 | |
| 90 | |
| 91 | |
| 92 | |
| 93 | |
| 94 | |
| 95 | |
| 96 | |
| 97 | |
| 98 | |
| 99 | |
| 100 | |
| 101 | #ifdef GRPC_CQ_REF_COUNT_DEBUG |
| 102 | void grpc_cq_internal_ref(grpc_completion_queue *cc, const char *reason) { |
| 103 | gpr_log(GPR_DEBUG, "CQ:%p ref %d -> %d %s", cc, (int)cc->owning_refs.count, (int)cc->owning_refs.count + 1, reason); |
| 104 | #else |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 105 | void grpc_cq_internal_ref(grpc_completion_queue *cc) { |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 106 | #endif |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 107 | gpr_ref(&cc->owning_refs); |
| 108 | } |
| 109 | |
| 110 | static void on_pollset_destroy_done(void *arg) { |
| 111 | grpc_completion_queue *cc = arg; |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 112 | GRPC_CQ_INTERNAL_UNREF(cc, "pollset_destroy"); |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 115 | #ifdef GRPC_CQ_REF_COUNT_DEBUG |
| 116 | void grpc_cq_internal_unref(grpc_completion_queue *cc, const char *reason) { |
| 117 | gpr_log(GPR_DEBUG, "CQ:%p unref %d -> %d %s", cc, (int)cc->owning_refs.count, (int)cc->owning_refs.count - 1, reason); |
| 118 | #else |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 119 | void grpc_cq_internal_unref(grpc_completion_queue *cc) { |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 120 | #endif |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 121 | if (gpr_unref(&cc->owning_refs)) { |
| 122 | GPR_ASSERT(cc->queue == NULL); |
Craig Tiller | 70730b4 | 2015-05-22 14:42:38 -0700 | [diff] [blame] | 123 | grpc_pollset_destroy(&cc->pollset); |
| 124 | gpr_free(cc); |
Craig Tiller | 5717a98 | 2015-04-27 12:01:49 -0700 | [diff] [blame] | 125 | } |
| 126 | } |
| 127 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 128 | /* Create and append an event to the queue. Returns the event so that its data |
| 129 | members can be filled in. |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 130 | Requires GRPC_POLLSET_MU(&cc->pollset) locked. */ |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 131 | static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type, |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 132 | void *tag, grpc_call *call) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 133 | event *ev = gpr_malloc(sizeof(event)); |
nnoble | 0c475f0 | 2014-12-05 15:37:39 -0800 | [diff] [blame] | 134 | gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 135 | ev->base.type = type; |
| 136 | ev->base.tag = tag; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 137 | if (cc->queue == NULL) { |
| 138 | cc->queue = ev->queue_next = ev->queue_prev = ev; |
| 139 | } else { |
| 140 | ev->queue_next = cc->queue; |
| 141 | ev->queue_prev = cc->queue->queue_prev; |
| 142 | ev->queue_next->queue_prev = ev->queue_prev->queue_next = ev; |
| 143 | } |
| 144 | if (cc->buckets[bucket] == NULL) { |
| 145 | cc->buckets[bucket] = ev->bucket_next = ev->bucket_prev = ev; |
| 146 | } else { |
| 147 | ev->bucket_next = cc->buckets[bucket]; |
| 148 | ev->bucket_prev = cc->buckets[bucket]->bucket_prev; |
| 149 | ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev; |
| 150 | } |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 151 | grpc_pollset_kick(&cc->pollset); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 152 | return ev; |
| 153 | } |
| 154 | |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 155 | void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 156 | gpr_ref(&cc->refs); |
Craig Tiller | 4df412b | 2015-04-28 07:57:54 -0700 | [diff] [blame] | 157 | if (call) GRPC_CALL_INTERNAL_REF(call, "cq"); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | /* Signal the end of an operation - if this is the last waiting-to-be-queued |
| 161 | event, then enter shutdown mode */ |
Craig Tiller | 70730b4 | 2015-05-22 14:42:38 -0700 | [diff] [blame] | 162 | void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call, |
| 163 | int success) { |
| 164 | event *ev; |
| 165 | int shutdown = 0; |
| 166 | gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); |
| 167 | ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call); |
| 168 | ev->base.success = success; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 169 | if (gpr_unref(&cc->refs)) { |
| 170 | GPR_ASSERT(!cc->shutdown); |
Craig Tiller | f5fd4ba | 2015-03-02 18:01:21 +0000 | [diff] [blame] | 171 | GPR_ASSERT(cc->shutdown_called); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 172 | cc->shutdown = 1; |
Craig Tiller | 70730b4 | 2015-05-22 14:42:38 -0700 | [diff] [blame] | 173 | shutdown = 1; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 174 | } |
Craig Tiller | cce17ac | 2015-01-20 09:29:28 -0800 | [diff] [blame] | 175 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
Craig Tiller | 745d63a | 2015-05-04 15:27:51 -0700 | [diff] [blame] | 176 | if (call) GRPC_CALL_INTERNAL_UNREF(call, "cq", 0); |
Craig Tiller | 6f2d647 | 2015-05-22 21:34:15 -0700 | [diff] [blame] | 177 | if (shutdown) { |
Craig Tiller | 70730b4 | 2015-05-22 14:42:38 -0700 | [diff] [blame] | 178 | grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc); |
Craig Tiller | 6f2d647 | 2015-05-22 21:34:15 -0700 | [diff] [blame] | 179 | } |
Craig Tiller | cce17ac | 2015-01-20 09:29:28 -0800 | [diff] [blame] | 180 | } |
| 181 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 182 | /* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */ |
Craig Tiller | 32946d3 | 2015-01-15 11:37:30 -0800 | [diff] [blame] | 183 | static event *create_shutdown_event(void) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 184 | event *ev = gpr_malloc(sizeof(event)); |
| 185 | ev->base.type = GRPC_QUEUE_SHUTDOWN; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 186 | ev->base.tag = NULL; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 187 | return ev; |
| 188 | } |
| 189 | |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 190 | grpc_event grpc_completion_queue_next(grpc_completion_queue *cc, |
| 191 | gpr_timespec deadline) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 192 | event *ev = NULL; |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 193 | grpc_event ret; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 194 | |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 195 | GRPC_CQ_INTERNAL_REF(cc, "next"); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 196 | gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 197 | for (;;) { |
| 198 | if (cc->queue != NULL) { |
nnoble | 0c475f0 | 2014-12-05 15:37:39 -0800 | [diff] [blame] | 199 | gpr_uintptr bucket; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 200 | ev = cc->queue; |
nnoble | 0c475f0 | 2014-12-05 15:37:39 -0800 | [diff] [blame] | 201 | bucket = ((gpr_uintptr)ev->base.tag) % NUM_TAG_BUCKETS; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 202 | cc->queue = ev->queue_next; |
| 203 | ev->queue_next->queue_prev = ev->queue_prev; |
| 204 | ev->queue_prev->queue_next = ev->queue_next; |
| 205 | ev->bucket_next->bucket_prev = ev->bucket_prev; |
| 206 | ev->bucket_prev->bucket_next = ev->bucket_next; |
| 207 | if (ev == cc->buckets[bucket]) { |
| 208 | cc->buckets[bucket] = ev->bucket_next; |
| 209 | if (ev == cc->buckets[bucket]) { |
| 210 | cc->buckets[bucket] = NULL; |
| 211 | } |
| 212 | } |
| 213 | if (cc->queue == ev) { |
| 214 | cc->queue = NULL; |
| 215 | } |
| 216 | break; |
| 217 | } |
| 218 | if (cc->shutdown) { |
| 219 | ev = create_shutdown_event(); |
| 220 | break; |
| 221 | } |
Craig Tiller | 4488330 | 2015-05-28 12:44:27 -0700 | [diff] [blame] | 222 | if (!grpc_pollset_work(&cc->pollset, deadline)) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 223 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 224 | memset(&ret, 0, sizeof(ret)); |
| 225 | ret.type = GRPC_QUEUE_TIMEOUT; |
Craig Tiller | 1b6b212 | 2015-05-12 09:44:28 -0700 | [diff] [blame] | 226 | GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 227 | GRPC_CQ_INTERNAL_UNREF(cc, "next"); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 228 | return ret; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 229 | } |
| 230 | } |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 231 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 232 | ret = ev->base; |
| 233 | gpr_free(ev); |
| 234 | GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 235 | GRPC_CQ_INTERNAL_UNREF(cc, "next"); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 236 | return ret; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | static event *pluck_event(grpc_completion_queue *cc, void *tag) { |
nnoble | 0c475f0 | 2014-12-05 15:37:39 -0800 | [diff] [blame] | 240 | gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 241 | event *ev = cc->buckets[bucket]; |
| 242 | if (ev == NULL) return NULL; |
| 243 | do { |
| 244 | if (ev->base.tag == tag) { |
| 245 | ev->queue_next->queue_prev = ev->queue_prev; |
| 246 | ev->queue_prev->queue_next = ev->queue_next; |
| 247 | ev->bucket_next->bucket_prev = ev->bucket_prev; |
| 248 | ev->bucket_prev->bucket_next = ev->bucket_next; |
| 249 | if (ev == cc->buckets[bucket]) { |
| 250 | cc->buckets[bucket] = ev->bucket_next; |
| 251 | if (ev == cc->buckets[bucket]) { |
| 252 | cc->buckets[bucket] = NULL; |
| 253 | } |
| 254 | } |
| 255 | if (cc->queue == ev) { |
| 256 | cc->queue = ev->queue_next; |
| 257 | if (cc->queue == ev) { |
| 258 | cc->queue = NULL; |
| 259 | } |
| 260 | } |
| 261 | return ev; |
| 262 | } |
| 263 | ev = ev->bucket_next; |
| 264 | } while (ev != cc->buckets[bucket]); |
| 265 | return NULL; |
| 266 | } |
| 267 | |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 268 | grpc_event grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag, |
| 269 | gpr_timespec deadline) { |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 270 | event *ev = NULL; |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 271 | grpc_event ret; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 272 | |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 273 | GRPC_CQ_INTERNAL_REF(cc, "pluck"); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 274 | gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 275 | for (;;) { |
| 276 | if ((ev = pluck_event(cc, tag))) { |
| 277 | break; |
| 278 | } |
| 279 | if (cc->shutdown) { |
| 280 | ev = create_shutdown_event(); |
| 281 | break; |
| 282 | } |
Craig Tiller | 4488330 | 2015-05-28 12:44:27 -0700 | [diff] [blame] | 283 | if (!grpc_pollset_work(&cc->pollset, deadline)) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 284 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 285 | memset(&ret, 0, sizeof(ret)); |
| 286 | ret.type = GRPC_QUEUE_TIMEOUT; |
Craig Tiller | 4751fb1 | 2015-05-15 14:42:54 -0700 | [diff] [blame] | 287 | GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 288 | GRPC_CQ_INTERNAL_UNREF(cc, "pluck"); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 289 | return ret; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 290 | } |
| 291 | } |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 292 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 293 | ret = ev->base; |
| 294 | gpr_free(ev); |
Craig Tiller | 4751fb1 | 2015-05-15 14:42:54 -0700 | [diff] [blame] | 295 | GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ret); |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 296 | GRPC_CQ_INTERNAL_UNREF(cc, "pluck"); |
Craig Tiller | 64be9f7 | 2015-05-04 14:53:51 -0700 | [diff] [blame] | 297 | return ret; |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* Shutdown simply drops a ref that we reserved at creation time; if we drop |
| 301 | to zero here, then enter shutdown mode and wake up any waiters */ |
| 302 | void grpc_completion_queue_shutdown(grpc_completion_queue *cc) { |
Craig Tiller | f5fd4ba | 2015-03-02 18:01:21 +0000 | [diff] [blame] | 303 | gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); |
| 304 | cc->shutdown_called = 1; |
| 305 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
| 306 | |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 307 | if (gpr_unref(&cc->refs)) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 308 | gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 309 | GPR_ASSERT(!cc->shutdown); |
| 310 | cc->shutdown = 1; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 311 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
Craig Tiller | 70730b4 | 2015-05-22 14:42:38 -0700 | [diff] [blame] | 312 | grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc); |
Nicolas Noble | b7ebd3b | 2014-11-26 16:33:03 -0800 | [diff] [blame] | 313 | } |
| 314 | } |
| 315 | |
David Klempner | b505661 | 2015-02-24 14:22:50 -0800 | [diff] [blame] | 316 | void grpc_completion_queue_destroy(grpc_completion_queue *cc) { |
Craig Tiller | 463f237 | 2015-05-28 16:16:15 -0700 | [diff] [blame^] | 317 | GRPC_CQ_INTERNAL_UNREF(cc, "destroy"); |
David Klempner | b505661 | 2015-02-24 14:22:50 -0800 | [diff] [blame] | 318 | } |
| 319 | |
ctiller | d79b486 | 2014-12-17 16:36:59 -0800 | [diff] [blame] | 320 | grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) { |
| 321 | return &cc->pollset; |
Craig Tiller | 190d360 | 2015-02-18 09:23:38 -0800 | [diff] [blame] | 322 | } |
Craig Tiller | aec96aa | 2015-04-07 14:32:15 -0700 | [diff] [blame] | 323 | |
| 324 | void grpc_cq_hack_spin_pollset(grpc_completion_queue *cc) { |
| 325 | gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset)); |
| 326 | grpc_pollset_kick(&cc->pollset); |
Craig Tiller | c02c1d8 | 2015-04-07 16:21:55 -0700 | [diff] [blame] | 327 | grpc_pollset_work(&cc->pollset, |
| 328 | gpr_time_add(gpr_now(), gpr_time_from_millis(100))); |
Craig Tiller | aec96aa | 2015-04-07 14:32:15 -0700 | [diff] [blame] | 329 | gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset)); |
| 330 | } |