blob: c4b8d60782f613cac41041d9fb9cdda9ca7cc848 [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/surface/completion_queue.h"
35
36#include <stdio.h>
37#include <string.h>
38
ctiller58393c22015-01-07 14:03:30 -080039#include "src/core/iomgr/pollset.h"
Craig Tiller485d7762015-01-23 12:54:05 -080040#include "src/core/support/string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080041#include "src/core/surface/call.h"
42#include "src/core/surface/event_string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include "src/core/surface/surface_trace.h"
44#include <grpc/support/alloc.h>
45#include <grpc/support/atm.h>
46#include <grpc/support/log.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080047
48#define NUM_TAG_BUCKETS 31
49
50/* A single event: extends grpc_event to form a linked list with a destruction
51 function (on_finish) that is hidden from outside this module */
52typedef struct event {
53 grpc_event base;
54 grpc_event_finish_func on_finish;
55 void *on_finish_user_data;
56 struct event *queue_next;
57 struct event *queue_prev;
58 struct event *bucket_next;
59 struct event *bucket_prev;
60} event;
61
62/* Completion queue structure */
63struct grpc_completion_queue {
ctiller58393c22015-01-07 14:03:30 -080064 /* TODO(ctiller): see if this can be removed */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080065 int allow_polling;
66
67 /* When refs drops to zero, we are in shutdown mode, and will be destroyable
68 once all queued events are drained */
69 gpr_refcount refs;
ctillerd79b4862014-12-17 16:36:59 -080070 /* the set of low level i/o things that concern this cq */
71 grpc_pollset pollset;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080072 /* 0 initially, 1 once we've begun shutting down */
73 int shutdown;
74 /* Head of a linked list of queued events (prev points to the last element) */
75 event *queue;
76 /* Fixed size chained hash table of events for pluck() */
77 event *buckets[NUM_TAG_BUCKETS];
78
79#ifndef NDEBUG
80 /* Debug support: track which operations are in flight at any given time */
81 gpr_atm pending_op_count[GRPC_COMPLETION_DO_NOT_USE];
82#endif
83};
84
85/* Default do-nothing on_finish function */
86static void null_on_finish(void *user_data, grpc_op_error error) {}
87
Craig Tiller32946d32015-01-15 11:37:30 -080088grpc_completion_queue *grpc_completion_queue_create(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080089 grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
90 memset(cc, 0, sizeof(*cc));
91 /* Initial ref is dropped by grpc_completion_queue_shutdown */
92 gpr_ref_init(&cc->refs, 1);
ctillerd79b4862014-12-17 16:36:59 -080093 grpc_pollset_init(&cc->pollset);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080094 cc->allow_polling = 1;
95 return cc;
96}
97
98void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) {
99 cc->allow_polling = 0;
100}
101
102/* Create and append an event to the queue. Returns the event so that its data
103 members can be filled in.
ctiller58393c22015-01-07 14:03:30 -0800104 Requires GRPC_POLLSET_MU(&cc->pollset) locked. */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800105static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
106 void *tag, grpc_call *call,
107 grpc_event_finish_func on_finish, void *user_data) {
108 event *ev = gpr_malloc(sizeof(event));
nnoble0c475f02014-12-05 15:37:39 -0800109 gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800110 GPR_ASSERT(!cc->shutdown);
111 ev->base.type = type;
112 ev->base.tag = tag;
113 ev->base.call = call;
114 ev->on_finish = on_finish ? on_finish : null_on_finish;
115 ev->on_finish_user_data = user_data;
116 if (cc->queue == NULL) {
117 cc->queue = ev->queue_next = ev->queue_prev = ev;
118 } else {
119 ev->queue_next = cc->queue;
120 ev->queue_prev = cc->queue->queue_prev;
121 ev->queue_next->queue_prev = ev->queue_prev->queue_next = ev;
122 }
123 if (cc->buckets[bucket] == NULL) {
124 cc->buckets[bucket] = ev->bucket_next = ev->bucket_prev = ev;
125 } else {
126 ev->bucket_next = cc->buckets[bucket];
127 ev->bucket_prev = cc->buckets[bucket]->bucket_prev;
128 ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev;
129 }
ctiller58393c22015-01-07 14:03:30 -0800130 gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
131 grpc_pollset_kick(&cc->pollset);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800132 return ev;
133}
134
135void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call,
136 grpc_completion_type type) {
137 gpr_ref(&cc->refs);
Craig Tillerf63fed72015-01-29 10:49:34 -0800138 if (call) grpc_call_internal_ref(call);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800139#ifndef NDEBUG
140 gpr_atm_no_barrier_fetch_add(&cc->pending_op_count[type], 1);
141#endif
142}
143
144/* Signal the end of an operation - if this is the last waiting-to-be-queued
145 event, then enter shutdown mode */
146static void end_op_locked(grpc_completion_queue *cc,
147 grpc_completion_type type) {
148#ifndef NDEBUG
149 GPR_ASSERT(gpr_atm_full_fetch_add(&cc->pending_op_count[type], -1) > 0);
150#endif
151 if (gpr_unref(&cc->refs)) {
152 GPR_ASSERT(!cc->shutdown);
153 cc->shutdown = 1;
ctiller58393c22015-01-07 14:03:30 -0800154 gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800155 }
156}
157
Craig Tiller4ffdcd52015-01-16 11:34:55 -0800158void grpc_cq_end_server_shutdown(grpc_completion_queue *cc, void *tag) {
159 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
160 add_locked(cc, GRPC_SERVER_SHUTDOWN, tag, NULL, NULL, NULL);
161 end_op_locked(cc, GRPC_SERVER_SHUTDOWN);
162 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
163}
164
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800165void grpc_cq_end_read(grpc_completion_queue *cc, void *tag, grpc_call *call,
166 grpc_event_finish_func on_finish, void *user_data,
167 grpc_byte_buffer *read) {
168 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800169 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800170 ev = add_locked(cc, GRPC_READ, tag, call, on_finish, user_data);
171 ev->base.data.read = read;
172 end_op_locked(cc, GRPC_READ);
ctiller58393c22015-01-07 14:03:30 -0800173 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800174}
175
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800176void grpc_cq_end_write_accepted(grpc_completion_queue *cc, void *tag,
177 grpc_call *call,
178 grpc_event_finish_func on_finish,
179 void *user_data, grpc_op_error error) {
180 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800181 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800182 ev = add_locked(cc, GRPC_WRITE_ACCEPTED, tag, call, on_finish, user_data);
183 ev->base.data.write_accepted = error;
184 end_op_locked(cc, GRPC_WRITE_ACCEPTED);
ctiller58393c22015-01-07 14:03:30 -0800185 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800186}
187
Craig Tiller166e2502015-02-03 20:14:41 -0800188void grpc_cq_end_op_complete(grpc_completion_queue *cc, void *tag,
189 grpc_call *call, grpc_event_finish_func on_finish,
190 void *user_data, grpc_op_error error) {
191 event *ev;
192 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
193 ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call, on_finish, user_data);
194 ev->base.data.write_accepted = error;
195 end_op_locked(cc, GRPC_OP_COMPLETE);
196 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
197}
198
Craig Tillerfb189f82015-02-03 12:07:07 -0800199void grpc_cq_end_op(grpc_completion_queue *cc, void *tag, grpc_call *call,
200 grpc_event_finish_func on_finish, void *user_data,
201 grpc_op_error error) {
Craig Tillercce17ac2015-01-20 09:29:28 -0800202 event *ev;
203 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Craig Tillerfb189f82015-02-03 12:07:07 -0800204 ev = add_locked(cc, GRPC_OP_COMPLETE, tag, call, on_finish, user_data);
Craig Tillercce17ac2015-01-20 09:29:28 -0800205 ev->base.data.write_accepted = error;
Craig Tillerfb189f82015-02-03 12:07:07 -0800206 end_op_locked(cc, GRPC_OP_COMPLETE);
Craig Tillercce17ac2015-01-20 09:29:28 -0800207 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
208}
209
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800210void grpc_cq_end_finish_accepted(grpc_completion_queue *cc, void *tag,
211 grpc_call *call,
212 grpc_event_finish_func on_finish,
213 void *user_data, grpc_op_error error) {
214 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800215 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800216 ev = add_locked(cc, GRPC_FINISH_ACCEPTED, tag, call, on_finish, user_data);
217 ev->base.data.finish_accepted = error;
218 end_op_locked(cc, GRPC_FINISH_ACCEPTED);
ctiller58393c22015-01-07 14:03:30 -0800219 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800220}
221
222void grpc_cq_end_client_metadata_read(grpc_completion_queue *cc, void *tag,
223 grpc_call *call,
224 grpc_event_finish_func on_finish,
225 void *user_data, size_t count,
226 grpc_metadata *elements) {
227 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800228 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800229 ev = add_locked(cc, GRPC_CLIENT_METADATA_READ, tag, call, on_finish,
230 user_data);
231 ev->base.data.client_metadata_read.count = count;
232 ev->base.data.client_metadata_read.elements = elements;
233 end_op_locked(cc, GRPC_CLIENT_METADATA_READ);
ctiller58393c22015-01-07 14:03:30 -0800234 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800235}
236
237void grpc_cq_end_finished(grpc_completion_queue *cc, void *tag, grpc_call *call,
238 grpc_event_finish_func on_finish, void *user_data,
ctiller2845cad2014-12-15 15:14:12 -0800239 grpc_status_code status, const char *details,
240 grpc_metadata *metadata_elements,
241 size_t metadata_count) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800242 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800243 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800244 ev = add_locked(cc, GRPC_FINISHED, tag, call, on_finish, user_data);
ctiller2845cad2014-12-15 15:14:12 -0800245 ev->base.data.finished.status = status;
246 ev->base.data.finished.details = details;
247 ev->base.data.finished.metadata_count = metadata_count;
248 ev->base.data.finished.metadata_elements = metadata_elements;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800249 end_op_locked(cc, GRPC_FINISHED);
ctiller58393c22015-01-07 14:03:30 -0800250 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800251}
252
253void grpc_cq_end_new_rpc(grpc_completion_queue *cc, void *tag, grpc_call *call,
254 grpc_event_finish_func on_finish, void *user_data,
255 const char *method, const char *host,
256 gpr_timespec deadline, size_t metadata_count,
257 grpc_metadata *metadata_elements) {
258 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800259 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800260 ev = add_locked(cc, GRPC_SERVER_RPC_NEW, tag, call, on_finish, user_data);
261 ev->base.data.server_rpc_new.method = method;
262 ev->base.data.server_rpc_new.host = host;
263 ev->base.data.server_rpc_new.deadline = deadline;
264 ev->base.data.server_rpc_new.metadata_count = metadata_count;
265 ev->base.data.server_rpc_new.metadata_elements = metadata_elements;
266 end_op_locked(cc, GRPC_SERVER_RPC_NEW);
ctiller58393c22015-01-07 14:03:30 -0800267 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800268}
269
270/* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */
Craig Tiller32946d32015-01-15 11:37:30 -0800271static event *create_shutdown_event(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800272 event *ev = gpr_malloc(sizeof(event));
273 ev->base.type = GRPC_QUEUE_SHUTDOWN;
274 ev->base.call = NULL;
275 ev->base.tag = NULL;
276 ev->on_finish = null_on_finish;
277 return ev;
278}
279
280grpc_event *grpc_completion_queue_next(grpc_completion_queue *cc,
281 gpr_timespec deadline) {
282 event *ev = NULL;
283
ctiller58393c22015-01-07 14:03:30 -0800284 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800285 for (;;) {
286 if (cc->queue != NULL) {
nnoble0c475f02014-12-05 15:37:39 -0800287 gpr_uintptr bucket;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800288 ev = cc->queue;
nnoble0c475f02014-12-05 15:37:39 -0800289 bucket = ((gpr_uintptr)ev->base.tag) % NUM_TAG_BUCKETS;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800290 cc->queue = ev->queue_next;
291 ev->queue_next->queue_prev = ev->queue_prev;
292 ev->queue_prev->queue_next = ev->queue_next;
293 ev->bucket_next->bucket_prev = ev->bucket_prev;
294 ev->bucket_prev->bucket_next = ev->bucket_next;
295 if (ev == cc->buckets[bucket]) {
296 cc->buckets[bucket] = ev->bucket_next;
297 if (ev == cc->buckets[bucket]) {
298 cc->buckets[bucket] = NULL;
299 }
300 }
301 if (cc->queue == ev) {
302 cc->queue = NULL;
303 }
304 break;
305 }
306 if (cc->shutdown) {
307 ev = create_shutdown_event();
308 break;
309 }
ctiller58393c22015-01-07 14:03:30 -0800310 if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800311 continue;
312 }
ctiller58393c22015-01-07 14:03:30 -0800313 if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
314 GRPC_POLLSET_MU(&cc->pollset), deadline)) {
315 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800316 return NULL;
317 }
318 }
ctiller58393c22015-01-07 14:03:30 -0800319 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800320 GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
321 return &ev->base;
322}
323
324static event *pluck_event(grpc_completion_queue *cc, void *tag) {
nnoble0c475f02014-12-05 15:37:39 -0800325 gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800326 event *ev = cc->buckets[bucket];
327 if (ev == NULL) return NULL;
328 do {
329 if (ev->base.tag == tag) {
330 ev->queue_next->queue_prev = ev->queue_prev;
331 ev->queue_prev->queue_next = ev->queue_next;
332 ev->bucket_next->bucket_prev = ev->bucket_prev;
333 ev->bucket_prev->bucket_next = ev->bucket_next;
334 if (ev == cc->buckets[bucket]) {
335 cc->buckets[bucket] = ev->bucket_next;
336 if (ev == cc->buckets[bucket]) {
337 cc->buckets[bucket] = NULL;
338 }
339 }
340 if (cc->queue == ev) {
341 cc->queue = ev->queue_next;
342 if (cc->queue == ev) {
343 cc->queue = NULL;
344 }
345 }
346 return ev;
347 }
348 ev = ev->bucket_next;
349 } while (ev != cc->buckets[bucket]);
350 return NULL;
351}
352
353grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
354 gpr_timespec deadline) {
355 event *ev = NULL;
356
ctiller58393c22015-01-07 14:03:30 -0800357 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800358 for (;;) {
359 if ((ev = pluck_event(cc, tag))) {
360 break;
361 }
362 if (cc->shutdown) {
363 ev = create_shutdown_event();
364 break;
365 }
ctiller58393c22015-01-07 14:03:30 -0800366 if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800367 continue;
368 }
ctiller58393c22015-01-07 14:03:30 -0800369 if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
370 GRPC_POLLSET_MU(&cc->pollset), deadline)) {
371 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800372 return NULL;
373 }
374 }
ctiller58393c22015-01-07 14:03:30 -0800375 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800376 GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
377 return &ev->base;
378}
379
380/* Shutdown simply drops a ref that we reserved at creation time; if we drop
381 to zero here, then enter shutdown mode and wake up any waiters */
382void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
383 if (gpr_unref(&cc->refs)) {
ctiller58393c22015-01-07 14:03:30 -0800384 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800385 GPR_ASSERT(!cc->shutdown);
386 cc->shutdown = 1;
ctiller58393c22015-01-07 14:03:30 -0800387 gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
388 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800389 }
390}
391
David Klempnerb5056612015-02-24 14:22:50 -0800392static void on_pollset_destroy_done(void *arg) {
393 grpc_completion_queue *cc = arg;
ctillerd79b4862014-12-17 16:36:59 -0800394 grpc_pollset_destroy(&cc->pollset);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800395 gpr_free(cc);
396}
397
David Klempnerb5056612015-02-24 14:22:50 -0800398void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
399 GPR_ASSERT(cc->queue == NULL);
400 grpc_pollset_shutdown(&cc->pollset, on_pollset_destroy_done, cc);
401}
402
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800403void grpc_event_finish(grpc_event *base) {
404 event *ev = (event *)base;
405 ev->on_finish(ev->on_finish_user_data, GRPC_OP_OK);
406 if (ev->base.call) {
Craig Tilleraef25da2015-01-29 17:19:45 -0800407 grpc_call_internal_unref(ev->base.call, 1);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800408 }
409 gpr_free(ev);
410}
411
412void grpc_cq_dump_pending_ops(grpc_completion_queue *cc) {
413#ifndef NDEBUG
Craig Tillerd09f8802015-01-23 13:09:21 -0800414 char tmp[GRPC_COMPLETION_DO_NOT_USE * (1 + GPR_LTOA_MIN_BUFSIZE)];
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800415 char *p = tmp;
416 int i;
417
418 for (i = 0; i < GRPC_COMPLETION_DO_NOT_USE; i++) {
Craig Tillerd09f8802015-01-23 13:09:21 -0800419 *p++ = ' ';
420 p += gpr_ltoa(cc->pending_op_count[i], p);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800421 }
422
423 gpr_log(GPR_INFO, "pending ops:%s", tmp);
424#endif
425}
ctillerd79b4862014-12-17 16:36:59 -0800426
427grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
428 return &cc->pollset;
Craig Tiller190d3602015-02-18 09:23:38 -0800429}