blob: 5854afbeefdf67216eab8f51a7adb1578f3e087a [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
3 * Copyright 2014, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/surface/completion_queue.h"
35
36#include <stdio.h>
37#include <string.h>
38
ctiller58393c22015-01-07 14:03:30 -080039#include "src/core/iomgr/pollset.h"
Craig Tiller485d7762015-01-23 12:54:05 -080040#include "src/core/support/string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080041#include "src/core/surface/call.h"
42#include "src/core/surface/event_string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include "src/core/surface/surface_trace.h"
44#include <grpc/support/alloc.h>
45#include <grpc/support/atm.h>
46#include <grpc/support/log.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080047
48#define NUM_TAG_BUCKETS 31
49
50/* A single event: extends grpc_event to form a linked list with a destruction
51 function (on_finish) that is hidden from outside this module */
52typedef struct event {
53 grpc_event base;
54 grpc_event_finish_func on_finish;
55 void *on_finish_user_data;
56 struct event *queue_next;
57 struct event *queue_prev;
58 struct event *bucket_next;
59 struct event *bucket_prev;
60} event;
61
62/* Completion queue structure */
63struct grpc_completion_queue {
ctiller58393c22015-01-07 14:03:30 -080064 /* TODO(ctiller): see if this can be removed */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080065 int allow_polling;
66
67 /* When refs drops to zero, we are in shutdown mode, and will be destroyable
68 once all queued events are drained */
69 gpr_refcount refs;
ctillerd79b4862014-12-17 16:36:59 -080070 /* the set of low level i/o things that concern this cq */
71 grpc_pollset pollset;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080072 /* 0 initially, 1 once we've begun shutting down */
73 int shutdown;
74 /* Head of a linked list of queued events (prev points to the last element) */
75 event *queue;
76 /* Fixed size chained hash table of events for pluck() */
77 event *buckets[NUM_TAG_BUCKETS];
78
79#ifndef NDEBUG
80 /* Debug support: track which operations are in flight at any given time */
81 gpr_atm pending_op_count[GRPC_COMPLETION_DO_NOT_USE];
82#endif
83};
84
85/* Default do-nothing on_finish function */
86static void null_on_finish(void *user_data, grpc_op_error error) {}
87
Craig Tiller32946d32015-01-15 11:37:30 -080088grpc_completion_queue *grpc_completion_queue_create(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080089 grpc_completion_queue *cc = gpr_malloc(sizeof(grpc_completion_queue));
90 memset(cc, 0, sizeof(*cc));
91 /* Initial ref is dropped by grpc_completion_queue_shutdown */
92 gpr_ref_init(&cc->refs, 1);
ctillerd79b4862014-12-17 16:36:59 -080093 grpc_pollset_init(&cc->pollset);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080094 cc->allow_polling = 1;
95 return cc;
96}
97
98void grpc_completion_queue_dont_poll_test_only(grpc_completion_queue *cc) {
99 cc->allow_polling = 0;
100}
101
102/* Create and append an event to the queue. Returns the event so that its data
103 members can be filled in.
ctiller58393c22015-01-07 14:03:30 -0800104 Requires GRPC_POLLSET_MU(&cc->pollset) locked. */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800105static event *add_locked(grpc_completion_queue *cc, grpc_completion_type type,
106 void *tag, grpc_call *call,
107 grpc_event_finish_func on_finish, void *user_data) {
108 event *ev = gpr_malloc(sizeof(event));
nnoble0c475f02014-12-05 15:37:39 -0800109 gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800110 GPR_ASSERT(!cc->shutdown);
111 ev->base.type = type;
112 ev->base.tag = tag;
113 ev->base.call = call;
114 ev->on_finish = on_finish ? on_finish : null_on_finish;
115 ev->on_finish_user_data = user_data;
116 if (cc->queue == NULL) {
117 cc->queue = ev->queue_next = ev->queue_prev = ev;
118 } else {
119 ev->queue_next = cc->queue;
120 ev->queue_prev = cc->queue->queue_prev;
121 ev->queue_next->queue_prev = ev->queue_prev->queue_next = ev;
122 }
123 if (cc->buckets[bucket] == NULL) {
124 cc->buckets[bucket] = ev->bucket_next = ev->bucket_prev = ev;
125 } else {
126 ev->bucket_next = cc->buckets[bucket];
127 ev->bucket_prev = cc->buckets[bucket]->bucket_prev;
128 ev->bucket_next->bucket_prev = ev->bucket_prev->bucket_next = ev;
129 }
ctiller58393c22015-01-07 14:03:30 -0800130 gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
131 grpc_pollset_kick(&cc->pollset);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800132 return ev;
133}
134
135void grpc_cq_begin_op(grpc_completion_queue *cc, grpc_call *call,
136 grpc_completion_type type) {
137 gpr_ref(&cc->refs);
138 if (call) grpc_call_internal_ref(call);
139#ifndef NDEBUG
140 gpr_atm_no_barrier_fetch_add(&cc->pending_op_count[type], 1);
141#endif
142}
143
144/* Signal the end of an operation - if this is the last waiting-to-be-queued
145 event, then enter shutdown mode */
146static void end_op_locked(grpc_completion_queue *cc,
147 grpc_completion_type type) {
148#ifndef NDEBUG
149 GPR_ASSERT(gpr_atm_full_fetch_add(&cc->pending_op_count[type], -1) > 0);
150#endif
151 if (gpr_unref(&cc->refs)) {
152 GPR_ASSERT(!cc->shutdown);
153 cc->shutdown = 1;
ctiller58393c22015-01-07 14:03:30 -0800154 gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800155 }
156}
157
Craig Tiller4ffdcd52015-01-16 11:34:55 -0800158void grpc_cq_end_server_shutdown(grpc_completion_queue *cc, void *tag) {
159 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
160 add_locked(cc, GRPC_SERVER_SHUTDOWN, tag, NULL, NULL, NULL);
161 end_op_locked(cc, GRPC_SERVER_SHUTDOWN);
162 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
163}
164
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800165void grpc_cq_end_read(grpc_completion_queue *cc, void *tag, grpc_call *call,
166 grpc_event_finish_func on_finish, void *user_data,
167 grpc_byte_buffer *read) {
168 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800169 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800170 ev = add_locked(cc, GRPC_READ, tag, call, on_finish, user_data);
171 ev->base.data.read = read;
172 end_op_locked(cc, GRPC_READ);
ctiller58393c22015-01-07 14:03:30 -0800173 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800174}
175
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800176void grpc_cq_end_write_accepted(grpc_completion_queue *cc, void *tag,
177 grpc_call *call,
178 grpc_event_finish_func on_finish,
179 void *user_data, grpc_op_error error) {
180 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800181 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800182 ev = add_locked(cc, GRPC_WRITE_ACCEPTED, tag, call, on_finish, user_data);
183 ev->base.data.write_accepted = error;
184 end_op_locked(cc, GRPC_WRITE_ACCEPTED);
ctiller58393c22015-01-07 14:03:30 -0800185 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800186}
187
Craig Tillercce17ac2015-01-20 09:29:28 -0800188void grpc_cq_end_ioreq(grpc_completion_queue *cc, void *tag, grpc_call *call,
189 grpc_event_finish_func on_finish, void *user_data,
190 grpc_op_error error) {
191 event *ev;
192 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
193 ev = add_locked(cc, GRPC_IOREQ, tag, call, on_finish, user_data);
194 ev->base.data.write_accepted = error;
195 end_op_locked(cc, GRPC_IOREQ);
196 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
197}
198
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800199void grpc_cq_end_finish_accepted(grpc_completion_queue *cc, void *tag,
200 grpc_call *call,
201 grpc_event_finish_func on_finish,
202 void *user_data, grpc_op_error error) {
203 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800204 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800205 ev = add_locked(cc, GRPC_FINISH_ACCEPTED, tag, call, on_finish, user_data);
206 ev->base.data.finish_accepted = error;
207 end_op_locked(cc, GRPC_FINISH_ACCEPTED);
ctiller58393c22015-01-07 14:03:30 -0800208 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800209}
210
211void grpc_cq_end_client_metadata_read(grpc_completion_queue *cc, void *tag,
212 grpc_call *call,
213 grpc_event_finish_func on_finish,
214 void *user_data, size_t count,
215 grpc_metadata *elements) {
216 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800217 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800218 ev = add_locked(cc, GRPC_CLIENT_METADATA_READ, tag, call, on_finish,
219 user_data);
220 ev->base.data.client_metadata_read.count = count;
221 ev->base.data.client_metadata_read.elements = elements;
222 end_op_locked(cc, GRPC_CLIENT_METADATA_READ);
ctiller58393c22015-01-07 14:03:30 -0800223 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800224}
225
226void grpc_cq_end_finished(grpc_completion_queue *cc, void *tag, grpc_call *call,
227 grpc_event_finish_func on_finish, void *user_data,
ctiller2845cad2014-12-15 15:14:12 -0800228 grpc_status_code status, const char *details,
229 grpc_metadata *metadata_elements,
230 size_t metadata_count) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800231 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800232 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800233 ev = add_locked(cc, GRPC_FINISHED, tag, call, on_finish, user_data);
ctiller2845cad2014-12-15 15:14:12 -0800234 ev->base.data.finished.status = status;
235 ev->base.data.finished.details = details;
236 ev->base.data.finished.metadata_count = metadata_count;
237 ev->base.data.finished.metadata_elements = metadata_elements;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800238 end_op_locked(cc, GRPC_FINISHED);
ctiller58393c22015-01-07 14:03:30 -0800239 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800240}
241
242void grpc_cq_end_new_rpc(grpc_completion_queue *cc, void *tag, grpc_call *call,
243 grpc_event_finish_func on_finish, void *user_data,
244 const char *method, const char *host,
245 gpr_timespec deadline, size_t metadata_count,
246 grpc_metadata *metadata_elements) {
247 event *ev;
ctiller58393c22015-01-07 14:03:30 -0800248 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800249 ev = add_locked(cc, GRPC_SERVER_RPC_NEW, tag, call, on_finish, user_data);
250 ev->base.data.server_rpc_new.method = method;
251 ev->base.data.server_rpc_new.host = host;
252 ev->base.data.server_rpc_new.deadline = deadline;
253 ev->base.data.server_rpc_new.metadata_count = metadata_count;
254 ev->base.data.server_rpc_new.metadata_elements = metadata_elements;
255 end_op_locked(cc, GRPC_SERVER_RPC_NEW);
ctiller58393c22015-01-07 14:03:30 -0800256 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800257}
258
259/* Create a GRPC_QUEUE_SHUTDOWN event without queuing it anywhere */
Craig Tiller32946d32015-01-15 11:37:30 -0800260static event *create_shutdown_event(void) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800261 event *ev = gpr_malloc(sizeof(event));
262 ev->base.type = GRPC_QUEUE_SHUTDOWN;
263 ev->base.call = NULL;
264 ev->base.tag = NULL;
265 ev->on_finish = null_on_finish;
266 return ev;
267}
268
269grpc_event *grpc_completion_queue_next(grpc_completion_queue *cc,
270 gpr_timespec deadline) {
271 event *ev = NULL;
272
ctiller58393c22015-01-07 14:03:30 -0800273 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800274 for (;;) {
275 if (cc->queue != NULL) {
nnoble0c475f02014-12-05 15:37:39 -0800276 gpr_uintptr bucket;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800277 ev = cc->queue;
nnoble0c475f02014-12-05 15:37:39 -0800278 bucket = ((gpr_uintptr)ev->base.tag) % NUM_TAG_BUCKETS;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800279 cc->queue = ev->queue_next;
280 ev->queue_next->queue_prev = ev->queue_prev;
281 ev->queue_prev->queue_next = ev->queue_next;
282 ev->bucket_next->bucket_prev = ev->bucket_prev;
283 ev->bucket_prev->bucket_next = ev->bucket_next;
284 if (ev == cc->buckets[bucket]) {
285 cc->buckets[bucket] = ev->bucket_next;
286 if (ev == cc->buckets[bucket]) {
287 cc->buckets[bucket] = NULL;
288 }
289 }
290 if (cc->queue == ev) {
291 cc->queue = NULL;
292 }
293 break;
294 }
295 if (cc->shutdown) {
296 ev = create_shutdown_event();
297 break;
298 }
ctiller58393c22015-01-07 14:03:30 -0800299 if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800300 continue;
301 }
ctiller58393c22015-01-07 14:03:30 -0800302 if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
303 GRPC_POLLSET_MU(&cc->pollset), deadline)) {
304 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800305 return NULL;
306 }
307 }
ctiller58393c22015-01-07 14:03:30 -0800308 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800309 GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
310 return &ev->base;
311}
312
313static event *pluck_event(grpc_completion_queue *cc, void *tag) {
nnoble0c475f02014-12-05 15:37:39 -0800314 gpr_uintptr bucket = ((gpr_uintptr)tag) % NUM_TAG_BUCKETS;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800315 event *ev = cc->buckets[bucket];
316 if (ev == NULL) return NULL;
317 do {
318 if (ev->base.tag == tag) {
319 ev->queue_next->queue_prev = ev->queue_prev;
320 ev->queue_prev->queue_next = ev->queue_next;
321 ev->bucket_next->bucket_prev = ev->bucket_prev;
322 ev->bucket_prev->bucket_next = ev->bucket_next;
323 if (ev == cc->buckets[bucket]) {
324 cc->buckets[bucket] = ev->bucket_next;
325 if (ev == cc->buckets[bucket]) {
326 cc->buckets[bucket] = NULL;
327 }
328 }
329 if (cc->queue == ev) {
330 cc->queue = ev->queue_next;
331 if (cc->queue == ev) {
332 cc->queue = NULL;
333 }
334 }
335 return ev;
336 }
337 ev = ev->bucket_next;
338 } while (ev != cc->buckets[bucket]);
339 return NULL;
340}
341
342grpc_event *grpc_completion_queue_pluck(grpc_completion_queue *cc, void *tag,
343 gpr_timespec deadline) {
344 event *ev = NULL;
345
ctiller58393c22015-01-07 14:03:30 -0800346 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800347 for (;;) {
348 if ((ev = pluck_event(cc, tag))) {
349 break;
350 }
351 if (cc->shutdown) {
352 ev = create_shutdown_event();
353 break;
354 }
ctiller58393c22015-01-07 14:03:30 -0800355 if (cc->allow_polling && grpc_pollset_work(&cc->pollset, deadline)) {
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800356 continue;
357 }
ctiller58393c22015-01-07 14:03:30 -0800358 if (gpr_cv_wait(GRPC_POLLSET_CV(&cc->pollset),
359 GRPC_POLLSET_MU(&cc->pollset), deadline)) {
360 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800361 return NULL;
362 }
363 }
ctiller58393c22015-01-07 14:03:30 -0800364 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800365 GRPC_SURFACE_TRACE_RETURNED_EVENT(cc, &ev->base);
366 return &ev->base;
367}
368
369/* Shutdown simply drops a ref that we reserved at creation time; if we drop
370 to zero here, then enter shutdown mode and wake up any waiters */
371void grpc_completion_queue_shutdown(grpc_completion_queue *cc) {
372 if (gpr_unref(&cc->refs)) {
ctiller58393c22015-01-07 14:03:30 -0800373 gpr_mu_lock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800374 GPR_ASSERT(!cc->shutdown);
375 cc->shutdown = 1;
ctiller58393c22015-01-07 14:03:30 -0800376 gpr_cv_broadcast(GRPC_POLLSET_CV(&cc->pollset));
377 gpr_mu_unlock(GRPC_POLLSET_MU(&cc->pollset));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800378 }
379}
380
381void grpc_completion_queue_destroy(grpc_completion_queue *cc) {
382 GPR_ASSERT(cc->queue == NULL);
ctillerd79b4862014-12-17 16:36:59 -0800383 grpc_pollset_destroy(&cc->pollset);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800384 gpr_free(cc);
385}
386
387void grpc_event_finish(grpc_event *base) {
388 event *ev = (event *)base;
389 ev->on_finish(ev->on_finish_user_data, GRPC_OP_OK);
390 if (ev->base.call) {
391 grpc_call_internal_unref(ev->base.call);
392 }
393 gpr_free(ev);
394}
395
396void grpc_cq_dump_pending_ops(grpc_completion_queue *cc) {
397#ifndef NDEBUG
Craig Tillerd09f8802015-01-23 13:09:21 -0800398 char tmp[GRPC_COMPLETION_DO_NOT_USE * (1 + GPR_LTOA_MIN_BUFSIZE)];
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800399 char *p = tmp;
400 int i;
401
402 for (i = 0; i < GRPC_COMPLETION_DO_NOT_USE; i++) {
Craig Tillerd09f8802015-01-23 13:09:21 -0800403 *p++ = ' ';
404 p += gpr_ltoa(cc->pending_op_count[i], p);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800405 }
406
407 gpr_log(GPR_INFO, "pending ops:%s", tmp);
408#endif
409}
ctillerd79b4862014-12-17 16:36:59 -0800410
411grpc_pollset *grpc_cq_pollset(grpc_completion_queue *cc) {
412 return &cc->pollset;
413}