blob: 2410ad6fb5ad153dc3c6b9f3cf88960bb6ee4cdb [file] [log] [blame]
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/surface/completion_queue.h"
35
36#include <stdio.h>
37#include <string.h>
38
ctiller58393c22015-01-07 14:03:30 -080039#include "src/core/iomgr/pollset.h"
Craig Tiller485d7762015-01-23 12:54:05 -080040#include "src/core/support/string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080041#include "src/core/surface/call.h"
42#include "src/core/surface/event_string.h"
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080043#include "src/core/surface/surface_trace.h"
44#include <grpc/support/alloc.h>
45#include <grpc/support/atm.h>
46#include <grpc/support/log.h>
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080047
Craig Tiller45724b32015-09-22 10:42:19 -070048typedef struct
49{
Craig Tiller5ddbb9d2015-07-29 15:58:11 -070050 grpc_pollset_worker *worker;
51 void *tag;
52} plucker;
53
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080054/* Completion queue structure */
Craig Tiller45724b32015-09-22 10:42:19 -070055struct grpc_completion_queue
56{
Craig Tiller97fc6a32015-07-08 15:31:35 -070057 /** completed events */
58 grpc_cq_completion completed_head;
59 grpc_cq_completion *completed_tail;
60 /** Number of pending events (+1 if we're not shutdown) */
61 gpr_refcount pending_events;
62 /** Once owning_refs drops to zero, we will destroy the cq */
Craig Tiller5717a982015-04-27 12:01:49 -070063 gpr_refcount owning_refs;
Craig Tiller97fc6a32015-07-08 15:31:35 -070064 /** the set of low level i/o things that concern this cq */
ctillerd79b4862014-12-17 16:36:59 -080065 grpc_pollset pollset;
Craig Tiller97fc6a32015-07-08 15:31:35 -070066 /** 0 initially, 1 once we've begun shutting down */
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080067 int shutdown;
Craig Tillerf5fd4ba2015-03-02 18:01:21 +000068 int shutdown_called;
Craig Tillerb56975c2015-06-15 10:11:16 -070069 int is_server_cq;
Craig Tiller5ddbb9d2015-07-29 15:58:11 -070070 int num_pluckers;
Craig Tiller489df072015-08-01 16:15:45 -070071 plucker pluckers[GRPC_MAX_COMPLETION_QUEUE_PLUCKERS];
Craig Tillerdfff1b82015-09-21 14:39:57 -070072 grpc_closure pollset_destroy_done;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080073};
74
Craig Tiller1be70cc2015-09-22 10:45:28 -070075static void on_pollset_destroy_done (grpc_exec_ctx * exec_ctx, void *cc, int success);
Craig Tillerdfff1b82015-09-21 14:39:57 -070076
Craig Tiller45724b32015-09-22 10:42:19 -070077grpc_completion_queue *
78grpc_completion_queue_create (void *reserved)
79{
80 grpc_completion_queue *cc = gpr_malloc (sizeof (grpc_completion_queue));
81 GPR_ASSERT (!reserved);
82 memset (cc, 0, sizeof (*cc));
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080083 /* Initial ref is dropped by grpc_completion_queue_shutdown */
Craig Tiller45724b32015-09-22 10:42:19 -070084 gpr_ref_init (&cc->pending_events, 1);
Craig Tiller70730b42015-05-22 14:42:38 -070085 /* One for destroy(), one for pollset_shutdown */
Craig Tiller45724b32015-09-22 10:42:19 -070086 gpr_ref_init (&cc->owning_refs, 2);
87 grpc_pollset_init (&cc->pollset);
Craig Tiller97fc6a32015-07-08 15:31:35 -070088 cc->completed_tail = &cc->completed_head;
Craig Tiller45724b32015-09-22 10:42:19 -070089 cc->completed_head.next = (gpr_uintptr) cc->completed_tail;
90 grpc_closure_init (&cc->pollset_destroy_done, on_pollset_destroy_done, cc);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -080091 return cc;
92}
93
Craig Tiller463f2372015-05-28 16:16:15 -070094#ifdef GRPC_CQ_REF_COUNT_DEBUG
Craig Tiller45724b32015-09-22 10:42:19 -070095void
96grpc_cq_internal_ref (grpc_completion_queue * cc, const char *reason, const char *file, int line)
97{
98 gpr_log (file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p ref %d -> %d %s", cc, (int) cc->owning_refs.count, (int) cc->owning_refs.count + 1, reason);
Craig Tiller463f2372015-05-28 16:16:15 -070099#else
Craig Tiller45724b32015-09-22 10:42:19 -0700100void
101grpc_cq_internal_ref (grpc_completion_queue * cc)
102{
Craig Tiller463f2372015-05-28 16:16:15 -0700103#endif
Craig Tiller45724b32015-09-22 10:42:19 -0700104 gpr_ref (&cc->owning_refs);
Craig Tiller5717a982015-04-27 12:01:49 -0700105}
106
Craig Tiller45724b32015-09-22 10:42:19 -0700107static void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700108on_pollset_destroy_done (grpc_exec_ctx * exec_ctx, void *arg, int success)
Craig Tiller45724b32015-09-22 10:42:19 -0700109{
Craig Tiller5717a982015-04-27 12:01:49 -0700110 grpc_completion_queue *cc = arg;
Craig Tiller45724b32015-09-22 10:42:19 -0700111 GRPC_CQ_INTERNAL_UNREF (cc, "pollset_destroy");
Craig Tiller5717a982015-04-27 12:01:49 -0700112}
113
Craig Tiller463f2372015-05-28 16:16:15 -0700114#ifdef GRPC_CQ_REF_COUNT_DEBUG
Craig Tiller45724b32015-09-22 10:42:19 -0700115void
116grpc_cq_internal_unref (grpc_completion_queue * cc, const char *reason, const char *file, int line)
117{
118 gpr_log (file, line, GPR_LOG_SEVERITY_DEBUG, "CQ:%p unref %d -> %d %s", cc, (int) cc->owning_refs.count, (int) cc->owning_refs.count - 1, reason);
Craig Tiller463f2372015-05-28 16:16:15 -0700119#else
Craig Tiller45724b32015-09-22 10:42:19 -0700120void
121grpc_cq_internal_unref (grpc_completion_queue * cc)
122{
Craig Tiller463f2372015-05-28 16:16:15 -0700123#endif
Craig Tiller45724b32015-09-22 10:42:19 -0700124 if (gpr_unref (&cc->owning_refs))
125 {
126 GPR_ASSERT (cc->completed_head.next == (gpr_uintptr) & cc->completed_head);
127 grpc_pollset_destroy (&cc->pollset);
128 gpr_free (cc);
129 }
Craig Tiller5717a982015-04-27 12:01:49 -0700130}
131
Craig Tiller45724b32015-09-22 10:42:19 -0700132void
133grpc_cq_begin_op (grpc_completion_queue * cc)
134{
Craig Tiller402acf62015-08-05 10:43:10 -0700135#ifndef NDEBUG
Craig Tiller45724b32015-09-22 10:42:19 -0700136 gpr_mu_lock (GRPC_POLLSET_MU (&cc->pollset));
137 GPR_ASSERT (!cc->shutdown_called);
138 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
Craig Tiller402acf62015-08-05 10:43:10 -0700139#endif
Craig Tiller45724b32015-09-22 10:42:19 -0700140 gpr_ref (&cc->pending_events);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800141}
142
143/* Signal the end of an operation - if this is the last waiting-to-be-queued
144 event, then enter shutdown mode */
Craig Tiller97fc6a32015-07-08 15:31:35 -0700145/* Queue a GRPC_OP_COMPLETED operation */
Craig Tiller45724b32015-09-22 10:42:19 -0700146void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700147grpc_cq_end_op (grpc_completion_queue * cc, void *tag, int success, void (*done) (grpc_exec_ctx * exec_ctx, void *done_arg, grpc_cq_completion * storage, grpc_closure_list * closure_list), void *done_arg, grpc_cq_completion * storage)
Craig Tiller45724b32015-09-22 10:42:19 -0700148{
Craig Tiller304048c2015-07-17 11:19:58 -0700149 int shutdown;
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700150 int i;
151 grpc_pollset_worker *pluck_worker;
Craig Tiller97fc6a32015-07-08 15:31:35 -0700152
153 storage->tag = tag;
154 storage->done = done;
155 storage->done_arg = done_arg;
Craig Tiller45724b32015-09-22 10:42:19 -0700156 storage->next = ((gpr_uintptr) & cc->completed_head) | ((gpr_uintptr) (success != 0));
Craig Tiller97fc6a32015-07-08 15:31:35 -0700157
Craig Tiller45724b32015-09-22 10:42:19 -0700158 gpr_mu_lock (GRPC_POLLSET_MU (&cc->pollset));
159 shutdown = gpr_unref (&cc->pending_events);
160 if (!shutdown)
161 {
162 cc->completed_tail->next = ((gpr_uintptr) storage) | (1u & (gpr_uintptr) cc->completed_tail->next);
163 cc->completed_tail = storage;
164 pluck_worker = NULL;
165 for (i = 0; i < cc->num_pluckers; i++)
166 {
167 if (cc->pluckers[i].tag == tag)
168 {
169 pluck_worker = cc->pluckers[i].worker;
170 break;
171 }
172 }
173 grpc_pollset_kick (&cc->pollset, pluck_worker);
174 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700175 }
Craig Tiller45724b32015-09-22 10:42:19 -0700176 else
177 {
178 cc->completed_tail->next = ((gpr_uintptr) storage) | (1u & (gpr_uintptr) cc->completed_tail->next);
179 cc->completed_tail = storage;
180 GPR_ASSERT (!cc->shutdown);
181 GPR_ASSERT (cc->shutdown_called);
182 cc->shutdown = 1;
183 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
184 grpc_pollset_shutdown (&cc->pollset, &cc->pollset_destroy_done, closure_list);
185 }
Craig Tillercce17ac2015-01-20 09:29:28 -0800186}
187
Craig Tiller45724b32015-09-22 10:42:19 -0700188grpc_event
189grpc_completion_queue_next (grpc_completion_queue * cc, gpr_timespec deadline, void *reserved)
190{
Craig Tiller64be9f72015-05-04 14:53:51 -0700191 grpc_event ret;
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700192 grpc_pollset_worker worker;
Craig Tiller69f90e62015-08-06 08:32:35 -0700193 int first_loop = 1;
194 gpr_timespec now;
Craig Tillerd9ccbbf2015-09-22 09:30:00 -0700195 grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800196
Craig Tiller45724b32015-09-22 10:42:19 -0700197 GPR_ASSERT (!reserved);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800198
Craig Tiller45724b32015-09-22 10:42:19 -0700199 deadline = gpr_convert_clock_type (deadline, GPR_CLOCK_MONOTONIC);
Craig Tiller6a7626c2015-07-19 22:21:41 -0700200
Craig Tiller45724b32015-09-22 10:42:19 -0700201 GRPC_CQ_INTERNAL_REF (cc, "next");
202 gpr_mu_lock (GRPC_POLLSET_MU (&cc->pollset));
203 for (;;)
204 {
205 if (cc->completed_tail != &cc->completed_head)
206 {
207 grpc_cq_completion *c = (grpc_cq_completion *) cc->completed_head.next;
208 cc->completed_head.next = c->next & ~(gpr_uintptr) 1;
209 if (c == cc->completed_tail)
210 {
211 cc->completed_tail = &cc->completed_head;
212 }
213 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
214 ret.type = GRPC_OP_COMPLETE;
215 ret.success = c->next & 1u;
216 ret.tag = c->tag;
217 c->done (c->done_arg, c, &closure_list);
218 break;
219 }
220 if (cc->shutdown)
221 {
222 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
223 memset (&ret, 0, sizeof (ret));
224 ret.type = GRPC_QUEUE_SHUTDOWN;
225 break;
226 }
227 now = gpr_now (GPR_CLOCK_MONOTONIC);
228 if (!first_loop && gpr_time_cmp (now, deadline) >= 0)
229 {
230 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
231 memset (&ret, 0, sizeof (ret));
232 ret.type = GRPC_QUEUE_TIMEOUT;
233 break;
234 }
235 first_loop = 0;
236 grpc_pollset_work (&cc->pollset, &worker, now, deadline, &closure_list);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800237 }
Craig Tiller45724b32015-09-22 10:42:19 -0700238 GRPC_SURFACE_TRACE_RETURNED_EVENT (cc, &ret);
239 GRPC_CQ_INTERNAL_UNREF (cc, "next");
240 grpc_closure_list_run (&closure_list);
Craig Tiller64be9f72015-05-04 14:53:51 -0700241 return ret;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800242}
243
Craig Tiller45724b32015-09-22 10:42:19 -0700244static int
245add_plucker (grpc_completion_queue * cc, void *tag, grpc_pollset_worker * worker)
246{
247 if (cc->num_pluckers == GRPC_MAX_COMPLETION_QUEUE_PLUCKERS)
248 {
249 return 0;
250 }
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700251 cc->pluckers[cc->num_pluckers].tag = tag;
252 cc->pluckers[cc->num_pluckers].worker = worker;
253 cc->num_pluckers++;
Craig Tiller791e78a2015-08-01 16:20:17 -0700254 return 1;
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700255}
256
Craig Tiller45724b32015-09-22 10:42:19 -0700257static void
258del_plucker (grpc_completion_queue * cc, void *tag, grpc_pollset_worker * worker)
259{
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700260 int i;
Craig Tiller45724b32015-09-22 10:42:19 -0700261 for (i = 0; i < cc->num_pluckers; i++)
262 {
263 if (cc->pluckers[i].tag == tag && cc->pluckers[i].worker == worker)
264 {
265 cc->num_pluckers--;
266 GPR_SWAP (plucker, cc->pluckers[i], cc->pluckers[cc->num_pluckers]);
267 return;
268 }
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700269 }
Craig Tiller45724b32015-09-22 10:42:19 -0700270 gpr_log (GPR_ERROR, "should never reach here");
271 abort ();
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700272}
273
Craig Tiller45724b32015-09-22 10:42:19 -0700274grpc_event
275grpc_completion_queue_pluck (grpc_completion_queue * cc, void *tag, gpr_timespec deadline, void *reserved)
276{
Craig Tiller64be9f72015-05-04 14:53:51 -0700277 grpc_event ret;
Craig Tiller97fc6a32015-07-08 15:31:35 -0700278 grpc_cq_completion *c;
279 grpc_cq_completion *prev;
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700280 grpc_pollset_worker worker;
Craig Tiller4c06b822015-08-06 08:41:31 -0700281 gpr_timespec now;
Craig Tiller69f90e62015-08-06 08:32:35 -0700282 int first_loop = 1;
Craig Tillerd9ccbbf2015-09-22 09:30:00 -0700283 grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800284
Craig Tiller45724b32015-09-22 10:42:19 -0700285 GPR_ASSERT (!reserved);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800286
Craig Tiller45724b32015-09-22 10:42:19 -0700287 deadline = gpr_convert_clock_type (deadline, GPR_CLOCK_MONOTONIC);
Craig Tiller6a7626c2015-07-19 22:21:41 -0700288
Craig Tiller45724b32015-09-22 10:42:19 -0700289 GRPC_CQ_INTERNAL_REF (cc, "pluck");
290 gpr_mu_lock (GRPC_POLLSET_MU (&cc->pollset));
291 for (;;)
292 {
293 prev = &cc->completed_head;
294 while ((c = (grpc_cq_completion *) (prev->next & ~(gpr_uintptr) 1)) != &cc->completed_head)
295 {
296 if (c->tag == tag)
297 {
298 prev->next = (prev->next & (gpr_uintptr) 1) | (c->next & ~(gpr_uintptr) 1);
299 if (c == cc->completed_tail)
300 {
301 cc->completed_tail = prev;
302 }
303 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
304 ret.type = GRPC_OP_COMPLETE;
305 ret.success = c->next & 1u;
306 ret.tag = c->tag;
307 c->done (c->done_arg, c, &closure_list);
308 goto done;
309 }
310 prev = c;
311 }
312 if (cc->shutdown)
313 {
314 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
315 memset (&ret, 0, sizeof (ret));
316 ret.type = GRPC_QUEUE_SHUTDOWN;
317 break;
318 }
319 if (!add_plucker (cc, tag, &worker))
320 {
321 gpr_log (GPR_DEBUG, "Too many outstanding grpc_completion_queue_pluck calls: maximum " "is %d", GRPC_MAX_COMPLETION_QUEUE_PLUCKERS);
322 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
323 memset (&ret, 0, sizeof (ret));
324 /* TODO(ctiller): should we use a different result here */
325 ret.type = GRPC_QUEUE_TIMEOUT;
326 break;
327 }
328 now = gpr_now (GPR_CLOCK_MONOTONIC);
329 if (!first_loop && gpr_time_cmp (now, deadline) >= 0)
330 {
331 del_plucker (cc, tag, &worker);
332 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
333 memset (&ret, 0, sizeof (ret));
334 ret.type = GRPC_QUEUE_TIMEOUT;
335 break;
336 }
337 first_loop = 0;
338 grpc_pollset_work (&cc->pollset, &worker, now, deadline, &closure_list);
339 del_plucker (cc, tag, &worker);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800340 }
Craig Tiller97fc6a32015-07-08 15:31:35 -0700341done:
Craig Tiller45724b32015-09-22 10:42:19 -0700342 GRPC_SURFACE_TRACE_RETURNED_EVENT (cc, &ret);
343 GRPC_CQ_INTERNAL_UNREF (cc, "pluck");
344 grpc_closure_list_run (&closure_list);
Craig Tiller64be9f72015-05-04 14:53:51 -0700345 return ret;
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800346}
347
348/* Shutdown simply drops a ref that we reserved at creation time; if we drop
349 to zero here, then enter shutdown mode and wake up any waiters */
Craig Tiller45724b32015-09-22 10:42:19 -0700350void
351grpc_completion_queue_shutdown (grpc_completion_queue * cc)
352{
Craig Tillerd9ccbbf2015-09-22 09:30:00 -0700353 grpc_closure_list closure_list = GRPC_CLOSURE_LIST_INIT;
Craig Tiller45724b32015-09-22 10:42:19 -0700354 gpr_mu_lock (GRPC_POLLSET_MU (&cc->pollset));
355 if (cc->shutdown_called)
356 {
357 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
358 return;
359 }
Craig Tillerf5fd4ba2015-03-02 18:01:21 +0000360 cc->shutdown_called = 1;
Craig Tiller45724b32015-09-22 10:42:19 -0700361 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
Craig Tillerf5fd4ba2015-03-02 18:01:21 +0000362
Craig Tiller45724b32015-09-22 10:42:19 -0700363 if (gpr_unref (&cc->pending_events))
364 {
365 gpr_mu_lock (GRPC_POLLSET_MU (&cc->pollset));
366 GPR_ASSERT (!cc->shutdown);
367 cc->shutdown = 1;
368 gpr_mu_unlock (GRPC_POLLSET_MU (&cc->pollset));
369 grpc_pollset_shutdown (&cc->pollset, &cc->pollset_destroy_done, &closure_list);
370 }
371 grpc_closure_list_run (&closure_list);
Nicolas Nobleb7ebd3b2014-11-26 16:33:03 -0800372}
373
Craig Tiller45724b32015-09-22 10:42:19 -0700374void
375grpc_completion_queue_destroy (grpc_completion_queue * cc)
376{
377 grpc_completion_queue_shutdown (cc);
378 GRPC_CQ_INTERNAL_UNREF (cc, "destroy");
David Klempnerb5056612015-02-24 14:22:50 -0800379}
380
Craig Tiller45724b32015-09-22 10:42:19 -0700381grpc_pollset *
382grpc_cq_pollset (grpc_completion_queue * cc)
383{
ctillerd79b4862014-12-17 16:36:59 -0800384 return &cc->pollset;
Craig Tiller190d3602015-02-18 09:23:38 -0800385}
Craig Tilleraec96aa2015-04-07 14:32:15 -0700386
Craig Tiller45724b32015-09-22 10:42:19 -0700387void
388grpc_cq_mark_server_cq (grpc_completion_queue * cc)
389{
390 cc->is_server_cq = 1;
391}
Craig Tillerb56975c2015-06-15 10:11:16 -0700392
Craig Tiller45724b32015-09-22 10:42:19 -0700393int
394grpc_cq_is_server_cq (grpc_completion_queue * cc)
395{
396 return cc->is_server_cq;
397}