blob: 19ee6650f006c25336d729964ae211b7820c372c [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tillera93a25f2016-01-28 13:55:49 -08003 * Copyright 2015-2016, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/pollset_posix.h"
39
40#include <errno.h>
ctiller58393c22015-01-07 14:03:30 -080041#include <stdlib.h>
42#include <string.h>
43#include <unistd.h>
44
David Garcia Quintasf747bbc2015-10-04 23:09:47 -070045#include "src/core/iomgr/timer_internal.h"
ctiller58393c22015-01-07 14:03:30 -080046#include "src/core/iomgr/fd_posix.h"
47#include "src/core/iomgr/iomgr_internal.h"
48#include "src/core/iomgr/socket_utils_posix.h"
Craig Tiller1be373c2015-04-27 07:58:16 -070049#include "src/core/profiling/timers.h"
vjpai9839d282015-09-24 17:55:18 -070050#include "src/core/support/block_annotate.h"
ctiller58393c22015-01-07 14:03:30 -080051#include <grpc/support/alloc.h>
52#include <grpc/support/log.h>
53#include <grpc/support/thd.h>
Craig Tiller1be373c2015-04-27 07:58:16 -070054#include <grpc/support/tls.h>
ctiller58393c22015-01-07 14:03:30 -080055#include <grpc/support/useful.h>
56
Craig Tillera82950e2015-09-22 12:33:20 -070057GPR_TLS_DECL(g_current_thread_poller);
58GPR_TLS_DECL(g_current_thread_worker);
Craig Tiller1be373c2015-04-27 07:58:16 -070059
Craig Tiller191b79c2015-09-30 10:49:58 -070060/** Default poll() function - a pointer so that it can be overridden by some
61 * tests */
Craig Tiller69f90e62015-08-06 08:32:35 -070062grpc_poll_function_type grpc_poll_function = poll;
Craig Tiller191b79c2015-09-30 10:49:58 -070063
64/** The alarm system needs to be able to wakeup 'some poller' sometimes
65 * (specifically when a new alarm needs to be triggered earlier than the next
66 * alarm 'epoch').
67 * This wakeup_fd gives us something to alert on when such a case occurs. */
Craig Tiller8afeec82015-09-28 17:03:34 -070068grpc_wakeup_fd grpc_global_wakeup_fd;
Craig Tiller69f90e62015-08-06 08:32:35 -070069
Craig Tillera82950e2015-09-22 12:33:20 -070070static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -070071 worker->prev->next = worker->next;
72 worker->next->prev = worker->prev;
73}
74
Craig Tillera82950e2015-09-22 12:33:20 -070075int grpc_pollset_has_workers(grpc_pollset *p) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -070076 return p->root_worker.next != &p->root_worker;
77}
78
Craig Tillera82950e2015-09-22 12:33:20 -070079static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
80 if (grpc_pollset_has_workers(p)) {
81 grpc_pollset_worker *w = p->root_worker.next;
82 remove_worker(p, w);
83 return w;
84 } else {
85 return NULL;
86 }
ctiller58393c22015-01-07 14:03:30 -080087}
88
Craig Tillera82950e2015-09-22 12:33:20 -070089static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -070090 worker->next = &p->root_worker;
91 worker->prev = worker->next->prev;
92 worker->prev->next = worker->next->prev = worker;
Craig Tiller7d413212015-02-09 08:00:02 -080093}
ctiller58393c22015-01-07 14:03:30 -080094
Craig Tillera82950e2015-09-22 12:33:20 -070095static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -070096 worker->prev = &p->root_worker;
97 worker->next = worker->prev->next;
98 worker->prev->next = worker->next->prev = worker;
99}
100
Craig Tillerd0a00002015-10-06 11:30:37 -0700101void grpc_pollset_kick_ext(grpc_pollset *p,
102 grpc_pollset_worker *specific_worker,
Craig Tiller7536af02015-12-22 13:49:30 -0800103 uint32_t flags) {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700104 GPR_TIMER_BEGIN("grpc_pollset_kick_ext", 0);
Craig Tiller44011e82015-10-09 16:16:43 -0700105
Craig Tiller4c06b822015-08-06 08:41:31 -0700106 /* pollset->mu already held */
Craig Tillera82950e2015-09-22 12:33:20 -0700107 if (specific_worker != NULL) {
108 if (specific_worker == GRPC_POLLSET_KICK_BROADCAST) {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700109 GPR_TIMER_BEGIN("grpc_pollset_kick_ext.broadcast", 0);
Craig Tiller988e37f2015-10-01 07:53:56 -0700110 GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700111 for (specific_worker = p->root_worker.next;
112 specific_worker != &p->root_worker;
113 specific_worker = specific_worker->next) {
Craig Tillere8b5f622015-11-02 14:15:03 -0800114 grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
Craig Tillera82950e2015-09-22 12:33:20 -0700115 }
116 p->kicked_without_pollers = 1;
Craig Tiller0ba432d2015-10-09 16:57:11 -0700117 GPR_TIMER_END("grpc_pollset_kick_ext.broadcast", 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700118 } else if (gpr_tls_get(&g_current_thread_worker) !=
Craig Tiller7536af02015-12-22 13:49:30 -0800119 (intptr_t)specific_worker) {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700120 GPR_TIMER_MARK("different_thread_worker", 0);
Craig Tiller988e37f2015-10-01 07:53:56 -0700121 if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
122 specific_worker->reevaluate_polling_on_wakeup = 1;
123 }
Craig Tiller66197ca2015-11-02 08:04:10 -0800124 specific_worker->kicked_specifically = 1;
Craig Tillere8b5f622015-11-02 14:15:03 -0800125 grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
Craig Tiller988e37f2015-10-01 07:53:56 -0700126 } else if ((flags & GRPC_POLLSET_CAN_KICK_SELF) != 0) {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700127 GPR_TIMER_MARK("kick_yoself", 0);
Craig Tiller988e37f2015-10-01 07:53:56 -0700128 if ((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) != 0) {
129 specific_worker->reevaluate_polling_on_wakeup = 1;
130 }
Craig Tiller66197ca2015-11-02 08:04:10 -0800131 specific_worker->kicked_specifically = 1;
Craig Tillere8b5f622015-11-02 14:15:03 -0800132 grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700133 }
Craig Tiller7536af02015-12-22 13:49:30 -0800134 } else if (gpr_tls_get(&g_current_thread_poller) != (intptr_t)p) {
Craig Tiller988e37f2015-10-01 07:53:56 -0700135 GPR_ASSERT((flags & GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) == 0);
Craig Tiller0ba432d2015-10-09 16:57:11 -0700136 GPR_TIMER_MARK("kick_anonymous", 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700137 specific_worker = pop_front_worker(p);
138 if (specific_worker != NULL) {
Craig Tiller7536af02015-12-22 13:49:30 -0800139 if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700140 GPR_TIMER_MARK("kick_anonymous_not_self", 0);
Craig Tiller988e37f2015-10-01 07:53:56 -0700141 push_back_worker(p, specific_worker);
142 specific_worker = pop_front_worker(p);
Craig Tiller57f79d62015-10-02 14:00:12 -0700143 if ((flags & GRPC_POLLSET_CAN_KICK_SELF) == 0 &&
144 gpr_tls_get(&g_current_thread_worker) ==
Craig Tiller7536af02015-12-22 13:49:30 -0800145 (intptr_t)specific_worker) {
Craig Tiller988e37f2015-10-01 07:53:56 -0700146 push_back_worker(p, specific_worker);
Craig Tiller44011e82015-10-09 16:16:43 -0700147 specific_worker = NULL;
Craig Tiller988e37f2015-10-01 07:53:56 -0700148 }
149 }
Craig Tiller44011e82015-10-09 16:16:43 -0700150 if (specific_worker != NULL) {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700151 GPR_TIMER_MARK("finally_kick", 0);
Craig Tiller44011e82015-10-09 16:16:43 -0700152 push_back_worker(p, specific_worker);
Craig Tillere8b5f622015-11-02 14:15:03 -0800153 grpc_wakeup_fd_wakeup(&specific_worker->wakeup_fd->fd);
Craig Tiller44011e82015-10-09 16:16:43 -0700154 }
Craig Tillera82950e2015-09-22 12:33:20 -0700155 } else {
Craig Tiller0ba432d2015-10-09 16:57:11 -0700156 GPR_TIMER_MARK("kicked_no_pollers", 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700157 p->kicked_without_pollers = 1;
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700158 }
Craig Tillera82950e2015-09-22 12:33:20 -0700159 }
Craig Tiller44011e82015-10-09 16:16:43 -0700160
Craig Tiller0ba432d2015-10-09 16:57:11 -0700161 GPR_TIMER_END("grpc_pollset_kick_ext", 0);
David Klempnerbaced4d2015-02-10 17:10:15 -0800162}
163
Craig Tiller988e37f2015-10-01 07:53:56 -0700164void grpc_pollset_kick(grpc_pollset *p, grpc_pollset_worker *specific_worker) {
Craig Tillerb937aa12015-10-06 11:30:30 -0700165 grpc_pollset_kick_ext(p, specific_worker, 0);
Craig Tiller988e37f2015-10-01 07:53:56 -0700166}
167
ctiller58393c22015-01-07 14:03:30 -0800168/* global state management */
169
Craig Tillera82950e2015-09-22 12:33:20 -0700170void grpc_pollset_global_init(void) {
171 gpr_tls_init(&g_current_thread_poller);
Craig Tiller926c0e92015-09-28 14:25:38 -0700172 gpr_tls_init(&g_current_thread_worker);
Craig Tillera82950e2015-09-22 12:33:20 -0700173 grpc_wakeup_fd_global_init();
Craig Tiller8afeec82015-09-28 17:03:34 -0700174 grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
David Klempnerb5056612015-02-24 14:22:50 -0800175}
176
Craig Tillera82950e2015-09-22 12:33:20 -0700177void grpc_pollset_global_shutdown(void) {
Craig Tiller8afeec82015-09-28 17:03:34 -0700178 grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
Craig Tillera82950e2015-09-22 12:33:20 -0700179 gpr_tls_destroy(&g_current_thread_poller);
Craig Tiller926c0e92015-09-28 14:25:38 -0700180 gpr_tls_destroy(&g_current_thread_worker);
Craig Tillere8b5f622015-11-02 14:15:03 -0800181 grpc_wakeup_fd_global_destroy();
Craig Tiller8afeec82015-09-28 17:03:34 -0700182}
183
Craig Tiller71a0f9d2015-09-28 17:22:01 -0700184void grpc_kick_poller(void) { grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd); }
ctiller58393c22015-01-07 14:03:30 -0800185
186/* main interface */
187
Craig Tillera82950e2015-09-22 12:33:20 -0700188static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null);
ctiller58393c22015-01-07 14:03:30 -0800189
Craig Tillera82950e2015-09-22 12:33:20 -0700190void grpc_pollset_init(grpc_pollset *pollset) {
191 gpr_mu_init(&pollset->mu);
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700192 pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
David Klempnerb5056612015-02-24 14:22:50 -0800193 pollset->in_flight_cbs = 0;
194 pollset->shutting_down = 0;
Jan Tattermuschd6ca8b42015-06-10 15:58:39 -0700195 pollset->called_shutdown = 0;
yang-g30e17272015-11-07 01:11:12 -0800196 pollset->kicked_without_pollers = 0;
Craig Tillerd1bec032015-09-18 17:29:00 -0700197 pollset->idle_jobs.head = pollset->idle_jobs.tail = NULL;
Craig Tillere8b5f622015-11-02 14:15:03 -0800198 pollset->local_wakeup_cache = NULL;
199 pollset->kicked_without_pollers = 0;
200 become_basic_pollset(pollset, NULL);
201}
202
203void grpc_pollset_destroy(grpc_pollset *pollset) {
204 GPR_ASSERT(pollset->in_flight_cbs == 0);
205 GPR_ASSERT(!grpc_pollset_has_workers(pollset));
206 GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
207 pollset->vtable->destroy(pollset);
208 gpr_mu_destroy(&pollset->mu);
209 while (pollset->local_wakeup_cache) {
210 grpc_cached_wakeup_fd *next = pollset->local_wakeup_cache->next;
211 grpc_wakeup_fd_destroy(&pollset->local_wakeup_cache->fd);
212 gpr_free(pollset->local_wakeup_cache);
213 pollset->local_wakeup_cache = next;
214 }
215}
216
217void grpc_pollset_reset(grpc_pollset *pollset) {
218 GPR_ASSERT(pollset->shutting_down);
219 GPR_ASSERT(pollset->in_flight_cbs == 0);
220 GPR_ASSERT(!grpc_pollset_has_workers(pollset));
221 GPR_ASSERT(pollset->idle_jobs.head == pollset->idle_jobs.tail);
222 pollset->vtable->destroy(pollset);
223 pollset->shutting_down = 0;
224 pollset->called_shutdown = 0;
225 pollset->kicked_without_pollers = 0;
Craig Tillera82950e2015-09-22 12:33:20 -0700226 become_basic_pollset(pollset, NULL);
ctiller58393c22015-01-07 14:03:30 -0800227}
228
Craig Tillera82950e2015-09-22 12:33:20 -0700229void grpc_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
230 grpc_fd *fd) {
231 gpr_mu_lock(&pollset->mu);
232 pollset->vtable->add_fd(exec_ctx, pollset, fd, 1);
Craig Tiller4c06b822015-08-06 08:41:31 -0700233/* the following (enabled only in debug) will reacquire and then release
Craig Tiller88a33ef2015-12-11 13:17:40 -0800234 our lock - meaning that if the unlocking flag passed to add_fd above is
Craig Tiller4c06b822015-08-06 08:41:31 -0700235 not respected, the code will deadlock (in a way that we have a chance of
236 debugging) */
Craig Tillerabfaf2a2015-07-16 17:23:40 -0700237#ifndef NDEBUG
Craig Tillera82950e2015-09-22 12:33:20 -0700238 gpr_mu_lock(&pollset->mu);
239 gpr_mu_unlock(&pollset->mu);
Craig Tillerb0c13ad2015-07-16 08:42:31 -0700240#endif
ctiller58393c22015-01-07 14:03:30 -0800241}
242
Craig Tillera82950e2015-09-22 12:33:20 -0700243static void finish_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
Craig Tiller63010382015-09-24 15:00:58 -0700244 GPR_ASSERT(grpc_closure_list_empty(pollset->idle_jobs));
Craig Tillera82950e2015-09-22 12:33:20 -0700245 pollset->vtable->finish_shutdown(pollset);
Craig Tiller6c396862016-01-28 13:53:40 -0800246 grpc_exec_ctx_enqueue(exec_ctx, pollset->shutdown_done, true, NULL);
Craig Tiller000cd8f2015-09-18 07:20:29 -0700247}
248
Craig Tillera82950e2015-09-22 12:33:20 -0700249void grpc_pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
250 grpc_pollset_worker *worker, gpr_timespec now,
251 gpr_timespec deadline) {
ctiller58393c22015-01-07 14:03:30 -0800252 /* pollset->mu already held */
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700253 int added_worker = 0;
Craig Tillerd1bec032015-09-18 17:29:00 -0700254 int locked = 1;
Craig Tillerdc174712015-10-01 10:25:02 -0700255 int queued_work = 0;
Craig Tiller6078a7d2015-10-06 11:50:21 -0700256 int keep_polling = 0;
Craig Tiller0ba432d2015-10-09 16:57:11 -0700257 GPR_TIMER_BEGIN("grpc_pollset_work", 0);
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700258 /* this must happen before we (potentially) drop pollset->mu */
259 worker->next = worker->prev = NULL;
Craig Tillerdc174712015-10-01 10:25:02 -0700260 worker->reevaluate_polling_on_wakeup = 0;
Craig Tillere8b5f622015-11-02 14:15:03 -0800261 if (pollset->local_wakeup_cache != NULL) {
262 worker->wakeup_fd = pollset->local_wakeup_cache;
263 pollset->local_wakeup_cache = worker->wakeup_fd->next;
264 } else {
265 worker->wakeup_fd = gpr_malloc(sizeof(*worker->wakeup_fd));
266 grpc_wakeup_fd_init(&worker->wakeup_fd->fd);
267 }
Craig Tiller66197ca2015-11-02 08:04:10 -0800268 worker->kicked_specifically = 0;
Craig Tiller6078a7d2015-10-06 11:50:21 -0700269 /* If there's work waiting for the pollset to be idle, and the
270 pollset is idle, then do that work */
Craig Tillera82950e2015-09-22 12:33:20 -0700271 if (!grpc_pollset_has_workers(pollset) &&
272 !grpc_closure_list_empty(pollset->idle_jobs)) {
Craig Tillere8b5f622015-11-02 14:15:03 -0800273 GPR_TIMER_MARK("grpc_pollset_work.idle_jobs", 0);
Craig Tiller6c396862016-01-28 13:53:40 -0800274 grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
Craig Tillera82950e2015-09-22 12:33:20 -0700275 goto done;
276 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700277 /* Check alarms - these are a global resource so we just ping
278 each time through on every pollset.
279 May update deadline to ensure timely wakeups.
280 TODO(ctiller): can this work be localized? */
David Garcia Quintasf747bbc2015-10-04 23:09:47 -0700281 if (grpc_timer_check(exec_ctx, now, &deadline)) {
Craig Tillere8b5f622015-11-02 14:15:03 -0800282 GPR_TIMER_MARK("grpc_pollset_work.alarm_triggered", 0);
Craig Tiller8afeec82015-09-28 17:03:34 -0700283 gpr_mu_unlock(&pollset->mu);
284 locked = 0;
Craig Tillera82950e2015-09-22 12:33:20 -0700285 goto done;
286 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700287 /* If we're shutting down then we don't execute any extended work */
Craig Tillera82950e2015-09-22 12:33:20 -0700288 if (pollset->shutting_down) {
Craig Tillere8b5f622015-11-02 14:15:03 -0800289 GPR_TIMER_MARK("grpc_pollset_work.shutting_down", 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700290 goto done;
291 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700292 /* Give do_promote priority so we don't starve it out */
Craig Tillera82950e2015-09-22 12:33:20 -0700293 if (pollset->in_flight_cbs) {
Craig Tillere8b5f622015-11-02 14:15:03 -0800294 GPR_TIMER_MARK("grpc_pollset_work.in_flight_cbs", 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700295 gpr_mu_unlock(&pollset->mu);
296 locked = 0;
297 goto done;
298 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700299 /* Start polling, and keep doing so while we're being asked to
300 re-evaluate our pollers (this allows poll() based pollers to
301 ensure they don't miss wakeups) */
302 keep_polling = 1;
303 while (keep_polling) {
304 keep_polling = 0;
Craig Tillerdc174712015-10-01 10:25:02 -0700305 if (!pollset->kicked_without_pollers) {
306 if (!added_worker) {
307 push_front_worker(pollset, worker);
308 added_worker = 1;
Craig Tiller7536af02015-12-22 13:49:30 -0800309 gpr_tls_set(&g_current_thread_worker, (intptr_t)worker);
Craig Tillerdc174712015-10-01 10:25:02 -0700310 }
Craig Tiller7536af02015-12-22 13:49:30 -0800311 gpr_tls_set(&g_current_thread_poller, (intptr_t)pollset);
Craig Tiller0ba432d2015-10-09 16:57:11 -0700312 GPR_TIMER_BEGIN("maybe_work_and_unlock", 0);
Craig Tillerdc174712015-10-01 10:25:02 -0700313 pollset->vtable->maybe_work_and_unlock(exec_ctx, pollset, worker,
314 deadline, now);
Craig Tiller0ba432d2015-10-09 16:57:11 -0700315 GPR_TIMER_END("maybe_work_and_unlock", 0);
Craig Tillerdc174712015-10-01 10:25:02 -0700316 locked = 0;
317 gpr_tls_set(&g_current_thread_poller, 0);
Craig Tillerdc174712015-10-01 10:25:02 -0700318 } else {
Craig Tillere8b5f622015-11-02 14:15:03 -0800319 GPR_TIMER_MARK("grpc_pollset_work.kicked_without_pollers", 0);
Craig Tillerdc174712015-10-01 10:25:02 -0700320 pollset->kicked_without_pollers = 0;
321 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700322 /* Finished execution - start cleaning up.
323 Note that we may arrive here from outside the enclosing while() loop.
324 In that case we won't loop though as we haven't added worker to the
325 worker list, which means nobody could ask us to re-evaluate polling). */
Craig Tillerdc174712015-10-01 10:25:02 -0700326 done:
327 if (!locked) {
328 queued_work |= grpc_exec_ctx_flush(exec_ctx);
329 gpr_mu_lock(&pollset->mu);
330 locked = 1;
331 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700332 /* If we're forced to re-evaluate polling (via grpc_pollset_kick with
333 GRPC_POLLSET_REEVALUATE_POLLING_ON_WAKEUP) then we land here and force
334 a loop */
Craig Tillerdc174712015-10-01 10:25:02 -0700335 if (worker->reevaluate_polling_on_wakeup) {
336 worker->reevaluate_polling_on_wakeup = 0;
Craig Tiller58d05a62015-10-02 13:59:31 -0700337 pollset->kicked_without_pollers = 0;
Craig Tiller66197ca2015-11-02 08:04:10 -0800338 if (queued_work || worker->kicked_specifically) {
Craig Tiller6078a7d2015-10-06 11:50:21 -0700339 /* If there's queued work on the list, then set the deadline to be
340 immediate so we get back out of the polling loop quickly */
Craig Tillerdc174712015-10-01 10:25:02 -0700341 deadline = gpr_inf_past(GPR_CLOCK_MONOTONIC);
342 }
Craig Tiller6078a7d2015-10-06 11:50:21 -0700343 keep_polling = 1;
Craig Tillerdc174712015-10-01 10:25:02 -0700344 }
Craig Tillera82950e2015-09-22 12:33:20 -0700345 }
Craig Tillera82950e2015-09-22 12:33:20 -0700346 if (added_worker) {
347 remove_worker(pollset, worker);
Craig Tillercb7bf8a2015-10-09 16:18:15 -0700348 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700349 }
Craig Tillere8b5f622015-11-02 14:15:03 -0800350 /* release wakeup fd to the local pool */
351 worker->wakeup_fd->next = pollset->local_wakeup_cache;
352 pollset->local_wakeup_cache = worker->wakeup_fd;
353 /* check shutdown conditions */
Craig Tillera82950e2015-09-22 12:33:20 -0700354 if (pollset->shutting_down) {
355 if (grpc_pollset_has_workers(pollset)) {
356 grpc_pollset_kick(pollset, NULL);
357 } else if (!pollset->called_shutdown && pollset->in_flight_cbs == 0) {
Craig Tiller45724b32015-09-22 10:42:19 -0700358 pollset->called_shutdown = 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700359 gpr_mu_unlock(&pollset->mu);
360 finish_shutdown(exec_ctx, pollset);
361 grpc_exec_ctx_flush(exec_ctx);
362 /* Continuing to access pollset here is safe -- it is the caller's
363 * responsibility to not destroy when it has outstanding calls to
364 * grpc_pollset_work.
365 * TODO(dklempner): Can we refactor the shutdown logic to avoid this? */
366 gpr_mu_lock(&pollset->mu);
Craig Tiller63010382015-09-24 15:00:58 -0700367 } else if (!grpc_closure_list_empty(pollset->idle_jobs)) {
Craig Tiller6c396862016-01-28 13:53:40 -0800368 grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
Craig Tillere8b5f622015-11-02 14:15:03 -0800369 gpr_mu_unlock(&pollset->mu);
Craig Tiller63010382015-09-24 15:00:58 -0700370 grpc_exec_ctx_flush(exec_ctx);
371 gpr_mu_lock(&pollset->mu);
Craig Tiller45724b32015-09-22 10:42:19 -0700372 }
Craig Tillera82950e2015-09-22 12:33:20 -0700373 }
Craig Tiller0ba432d2015-10-09 16:57:11 -0700374 GPR_TIMER_END("grpc_pollset_work", 0);
Craig Tillera82950e2015-09-22 12:33:20 -0700375}
376
377void grpc_pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
378 grpc_closure *closure) {
Craig Tillera82950e2015-09-22 12:33:20 -0700379 GPR_ASSERT(!pollset->shutting_down);
380 pollset->shutting_down = 1;
Craig Tillere8b5f622015-11-02 14:15:03 -0800381 pollset->shutdown_done = closure;
382 grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
Craig Tiller63010382015-09-24 15:00:58 -0700383 if (!grpc_pollset_has_workers(pollset)) {
Craig Tiller6c396862016-01-28 13:53:40 -0800384 grpc_exec_ctx_enqueue_list(exec_ctx, &pollset->idle_jobs, NULL);
Craig Tiller63010382015-09-24 15:00:58 -0700385 }
Craig Tillere8b5f622015-11-02 14:15:03 -0800386 if (!pollset->called_shutdown && pollset->in_flight_cbs == 0 &&
387 !grpc_pollset_has_workers(pollset)) {
388 pollset->called_shutdown = 1;
Craig Tillera82950e2015-09-22 12:33:20 -0700389 finish_shutdown(exec_ctx, pollset);
390 }
David Klempnerb5056612015-02-24 14:22:50 -0800391}
392
Craig Tillera82950e2015-09-22 12:33:20 -0700393int grpc_poll_deadline_to_millis_timeout(gpr_timespec deadline,
394 gpr_timespec now) {
Craig Tiller6174b9a2015-06-18 08:13:05 -0700395 gpr_timespec timeout;
murgatroid99309830f2016-02-05 11:30:00 -0800396 static const int64_t max_spin_polling_us = 10;
Craig Tillera82950e2015-09-22 12:33:20 -0700397 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
398 return -1;
399 }
400 if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
401 max_spin_polling_us,
402 GPR_TIMESPAN))) <= 0) {
403 return 0;
404 }
405 timeout = gpr_time_sub(deadline, now);
406 return gpr_time_to_millis(gpr_time_add(
Craig Tiller8afeec82015-09-28 17:03:34 -0700407 timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
Craig Tiller6174b9a2015-06-18 08:13:05 -0700408}
409
ctiller58393c22015-01-07 14:03:30 -0800410/*
Craig Tiller5ec3bfa2015-05-28 14:13:18 -0700411 * basic_pollset - a vtable that provides polling for zero or one file
Craig Tiller3f529a2c2015-05-28 14:00:47 -0700412 * descriptor via poll()
ctiller58393c22015-01-07 14:03:30 -0800413 */
414
Craig Tillera82950e2015-09-22 12:33:20 -0700415typedef struct grpc_unary_promote_args {
David Klempner7f43eaf2015-02-18 17:00:31 -0800416 const grpc_pollset_vtable *original_vtable;
417 grpc_pollset *pollset;
418 grpc_fd *fd;
Craig Tiller33825112015-09-18 07:44:19 -0700419 grpc_closure promotion_closure;
David Klempner7f43eaf2015-02-18 17:00:31 -0800420} grpc_unary_promote_args;
421
Craig Tiller6c396862016-01-28 13:53:40 -0800422static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args,
423 bool success) {
David Klempner7f43eaf2015-02-18 17:00:31 -0800424 grpc_unary_promote_args *up_args = args;
425 const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
426 grpc_pollset *pollset = up_args->pollset;
427 grpc_fd *fd = up_args->fd;
David Klempner7f43eaf2015-02-18 17:00:31 -0800428
David Klempnerb5056612015-02-24 14:22:50 -0800429 /*
430 * This is quite tricky. There are a number of cases to keep in mind here:
431 * 1. fd may have been orphaned
432 * 2. The pollset may no longer be a unary poller (and we can't let case #1
433 * leak to other pollset types!)
434 * 3. pollset's fd (which may have changed) may have been orphaned
435 * 4. The pollset may be shutting down.
436 */
437
Craig Tillera82950e2015-09-22 12:33:20 -0700438 gpr_mu_lock(&pollset->mu);
David Klempner7f43eaf2015-02-18 17:00:31 -0800439 /* First we need to ensure that nobody is polling concurrently */
Craig Tillera82950e2015-09-22 12:33:20 -0700440 GPR_ASSERT(!grpc_pollset_has_workers(pollset));
Craig Tiller6bf45012015-05-15 17:11:44 -0700441
Craig Tillera82950e2015-09-22 12:33:20 -0700442 gpr_free(up_args);
David Klempner7f43eaf2015-02-18 17:00:31 -0800443 /* At this point the pollset may no longer be a unary poller. In that case
444 * we should just call the right add function and be done. */
445 /* TODO(klempner): If we're not careful this could cause infinite recursion.
446 * That's not a problem for now because empty_pollset has a trivial poller
447 * and we don't have any mechanism to unbecome multipoller. */
David Klempnerb5056612015-02-24 14:22:50 -0800448 pollset->in_flight_cbs--;
Craig Tillera82950e2015-09-22 12:33:20 -0700449 if (pollset->shutting_down) {
450 /* We don't care about this pollset anymore. */
451 if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
yang-g2c1c0002015-10-02 16:15:05 -0700452 pollset->called_shutdown = 1;
Craig Tiller63010382015-09-24 15:00:58 -0700453 finish_shutdown(exec_ctx, pollset);
David Klempnerb5056612015-02-24 14:22:50 -0800454 }
Craig Tillera82950e2015-09-22 12:33:20 -0700455 } else if (grpc_fd_is_orphaned(fd)) {
456 /* Don't try to add it to anything, we'll drop our ref on it below */
457 } else if (pollset->vtable != original_vtable) {
458 pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
459 } else if (fd != pollset->data.ptr) {
460 grpc_fd *fds[2];
461 fds[0] = pollset->data.ptr;
462 fds[1] = fd;
David Klempner7f43eaf2015-02-18 17:00:31 -0800463
Craig Tillera82950e2015-09-22 12:33:20 -0700464 if (fds[0] && !grpc_fd_is_orphaned(fds[0])) {
465 grpc_platform_become_multipoller(exec_ctx, pollset, fds,
466 GPR_ARRAY_SIZE(fds));
467 GRPC_FD_UNREF(fds[0], "basicpoll");
468 } else {
469 /* old fd is orphaned and we haven't cleaned it up until now, so remain a
470 * unary poller */
471 /* Note that it is possible that fds[1] is also orphaned at this point.
472 * That's okay, we'll correct it at the next add or poll. */
473 if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll");
474 pollset->data.ptr = fd;
475 GRPC_FD_REF(fd, "basicpoll");
Craig Tiller45724b32015-09-22 10:42:19 -0700476 }
Craig Tillera82950e2015-09-22 12:33:20 -0700477 }
Craig Tiller45724b32015-09-22 10:42:19 -0700478
Craig Tillera82950e2015-09-22 12:33:20 -0700479 gpr_mu_unlock(&pollset->mu);
David Klempner7f43eaf2015-02-18 17:00:31 -0800480
Craig Tiller3f529a2c2015-05-28 14:00:47 -0700481 /* Matching ref in basic_pollset_add_fd */
Craig Tillera82950e2015-09-22 12:33:20 -0700482 GRPC_FD_UNREF(fd, "basicpoll_add");
David Klempner7f43eaf2015-02-18 17:00:31 -0800483}
484
Craig Tillera82950e2015-09-22 12:33:20 -0700485static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
486 grpc_fd *fd, int and_unlock_pollset) {
David Klempner7f43eaf2015-02-18 17:00:31 -0800487 grpc_unary_promote_args *up_args;
Craig Tillera82950e2015-09-22 12:33:20 -0700488 GPR_ASSERT(fd);
489 if (fd == pollset->data.ptr) goto exit;
Craig Tiller45724b32015-09-22 10:42:19 -0700490
Craig Tillera82950e2015-09-22 12:33:20 -0700491 if (!grpc_pollset_has_workers(pollset)) {
492 /* Fast path -- no in flight cbs */
493 /* TODO(klempner): Comment this out and fix any test failures or establish
494 * they are due to timing issues */
495 grpc_fd *fds[2];
496 fds[0] = pollset->data.ptr;
497 fds[1] = fd;
Craig Tiller45724b32015-09-22 10:42:19 -0700498
Craig Tillera82950e2015-09-22 12:33:20 -0700499 if (fds[0] == NULL) {
500 pollset->data.ptr = fd;
501 GRPC_FD_REF(fd, "basicpoll");
502 } else if (!grpc_fd_is_orphaned(fds[0])) {
503 grpc_platform_become_multipoller(exec_ctx, pollset, fds,
504 GPR_ARRAY_SIZE(fds));
505 GRPC_FD_UNREF(fds[0], "basicpoll");
506 } else {
507 /* old fd is orphaned and we haven't cleaned it up until now, so remain a
508 * unary poller */
509 GRPC_FD_UNREF(fds[0], "basicpoll");
510 pollset->data.ptr = fd;
511 GRPC_FD_REF(fd, "basicpoll");
Craig Tiller45724b32015-09-22 10:42:19 -0700512 }
Craig Tillera82950e2015-09-22 12:33:20 -0700513 goto exit;
514 }
David Klempner7f43eaf2015-02-18 17:00:31 -0800515
516 /* Now we need to promote. This needs to happen when we're not polling. Since
517 * this may be called from poll, the wait needs to happen asynchronously. */
Craig Tillera82950e2015-09-22 12:33:20 -0700518 GRPC_FD_REF(fd, "basicpoll_add");
David Klempnerb5056612015-02-24 14:22:50 -0800519 pollset->in_flight_cbs++;
Craig Tillera82950e2015-09-22 12:33:20 -0700520 up_args = gpr_malloc(sizeof(*up_args));
David Klempner7f43eaf2015-02-18 17:00:31 -0800521 up_args->fd = fd;
522 up_args->original_vtable = pollset->vtable;
Craig Tillerba496452015-09-21 17:15:19 -0700523 up_args->pollset = pollset;
Craig Tillerb4756552015-06-01 20:33:37 -0700524 up_args->promotion_closure.cb = basic_do_promote;
David Garcia Quintas284488b2015-05-28 16:27:39 -0700525 up_args->promotion_closure.cb_arg = up_args;
David Klempner7f43eaf2015-02-18 17:00:31 -0800526
Craig Tillera82950e2015-09-22 12:33:20 -0700527 grpc_closure_list_add(&pollset->idle_jobs, &up_args->promotion_closure, 1);
528 grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
Craig Tiller5c785d42015-07-14 08:23:43 -0700529
530exit:
Craig Tillera82950e2015-09-22 12:33:20 -0700531 if (and_unlock_pollset) {
532 gpr_mu_unlock(&pollset->mu);
533 }
ctiller58393c22015-01-07 14:03:30 -0800534}
535
Craig Tillera82950e2015-09-22 12:33:20 -0700536static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
537 grpc_pollset *pollset,
538 grpc_pollset_worker *worker,
539 gpr_timespec deadline,
540 gpr_timespec now) {
Craig Tiller58d05a62015-10-02 13:59:31 -0700541#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
Craig Tiller57f79d62015-10-02 14:00:12 -0700542#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)
Craig Tiller58d05a62015-10-02 13:59:31 -0700543
Craig Tiller8afeec82015-09-28 17:03:34 -0700544 struct pollfd pfd[3];
ctiller58393c22015-01-07 14:03:30 -0800545 grpc_fd *fd;
Craig Tiller7d413212015-02-09 08:00:02 -0800546 grpc_fd_watcher fd_watcher;
ctiller58393c22015-01-07 14:03:30 -0800547 int timeout;
548 int r;
Craig Tiller3121fd42015-09-10 09:56:20 -0700549 nfds_t nfds;
ctiller58393c22015-01-07 14:03:30 -0800550
ctiller58393c22015-01-07 14:03:30 -0800551 fd = pollset->data.ptr;
Craig Tillera82950e2015-09-22 12:33:20 -0700552 if (fd && grpc_fd_is_orphaned(fd)) {
553 GRPC_FD_UNREF(fd, "basicpoll");
554 fd = pollset->data.ptr = NULL;
555 }
556 timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
Craig Tiller8afeec82015-09-28 17:03:34 -0700557 pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
ctiller58393c22015-01-07 14:03:30 -0800558 pfd[0].events = POLLIN;
559 pfd[0].revents = 0;
Craig Tillere8b5f622015-11-02 14:15:03 -0800560 pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
Craig Tiller8afeec82015-09-28 17:03:34 -0700561 pfd[1].events = POLLIN;
562 pfd[1].revents = 0;
563 nfds = 2;
Craig Tillera82950e2015-09-22 12:33:20 -0700564 if (fd) {
Craig Tiller8afeec82015-09-28 17:03:34 -0700565 pfd[2].fd = fd->fd;
566 pfd[2].revents = 0;
Craig Tillerbae235c2015-10-05 11:33:36 -0700567 GRPC_FD_REF(fd, "basicpoll_begin");
Craig Tillera82950e2015-09-22 12:33:20 -0700568 gpr_mu_unlock(&pollset->mu);
Craig Tillerdc174712015-10-01 10:25:02 -0700569 pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
570 POLLOUT, &fd_watcher);
Craig Tiller8afeec82015-09-28 17:03:34 -0700571 if (pfd[2].events != 0) {
Craig Tillera82950e2015-09-22 12:33:20 -0700572 nfds++;
Craig Tiller3f529a2c2015-05-28 14:00:47 -0700573 }
Craig Tillera82950e2015-09-22 12:33:20 -0700574 } else {
575 gpr_mu_unlock(&pollset->mu);
576 }
Craig Tiller48d01ae2015-01-29 22:08:59 -0800577
Vijay Pai8c7665e2015-09-25 21:40:19 -0700578 /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
579 even going into the blocking annotation if possible */
Craig Tillerbaac9402015-05-28 07:39:31 -0700580 /* poll fd count (argument 2) is shortened by one if we have no events
581 to poll on - such that it only includes the kicker */
Craig Tiller0ba432d2015-10-09 16:57:11 -0700582 GPR_TIMER_BEGIN("poll", 0);
vjpai9839d282015-09-24 17:55:18 -0700583 GRPC_SCHEDULING_START_BLOCKING_REGION;
Craig Tillera82950e2015-09-22 12:33:20 -0700584 r = grpc_poll_function(pfd, nfds, timeout);
vjpai9839d282015-09-24 17:55:18 -0700585 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller0ba432d2015-10-09 16:57:11 -0700586 GPR_TIMER_END("poll", 0);
Craig Tillera13752d2015-03-06 23:01:38 -0800587
Craig Tillera82950e2015-09-22 12:33:20 -0700588 if (r < 0) {
Tom2cb9a612015-12-02 19:05:35 +0000589 if (errno != EINTR) {
590 gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
591 }
Craig Tiller58d05a62015-10-02 13:59:31 -0700592 if (fd) {
593 grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
ctillerd9962df2015-01-07 15:31:39 -0800594 }
Craig Tillera82950e2015-09-22 12:33:20 -0700595 } else if (r == 0) {
Craig Tiller58d05a62015-10-02 13:59:31 -0700596 if (fd) {
597 grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
598 }
Craig Tillera82950e2015-09-22 12:33:20 -0700599 } else {
Craig Tiller58d05a62015-10-02 13:59:31 -0700600 if (pfd[0].revents & POLLIN_CHECK) {
Craig Tiller8afeec82015-09-28 17:03:34 -0700601 grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
602 }
Craig Tiller58d05a62015-10-02 13:59:31 -0700603 if (pfd[1].revents & POLLIN_CHECK) {
Craig Tillere8b5f622015-11-02 14:15:03 -0800604 grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
ctiller58393c22015-01-07 14:03:30 -0800605 }
Craig Tiller8afeec82015-09-28 17:03:34 -0700606 if (nfds > 2) {
Craig Tiller58d05a62015-10-02 13:59:31 -0700607 grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
608 pfd[2].revents & POLLOUT_CHECK);
609 } else if (fd) {
610 grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
ctiller58393c22015-01-07 14:03:30 -0800611 }
Craig Tillera82950e2015-09-22 12:33:20 -0700612 }
Craig Tillerbae235c2015-10-05 11:33:36 -0700613
614 if (fd) {
615 GRPC_FD_UNREF(fd, "basicpoll_begin");
616 }
ctiller58393c22015-01-07 14:03:30 -0800617}
618
Craig Tillera82950e2015-09-22 12:33:20 -0700619static void basic_pollset_destroy(grpc_pollset *pollset) {
620 if (pollset->data.ptr != NULL) {
621 GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
622 pollset->data.ptr = NULL;
623 }
ctiller58393c22015-01-07 14:03:30 -0800624}
625
Craig Tiller3f529a2c2015-05-28 14:00:47 -0700626static const grpc_pollset_vtable basic_pollset = {
Craig Tiller620e9652015-12-14 12:02:50 -0800627 basic_pollset_add_fd, basic_pollset_maybe_work_and_unlock,
628 basic_pollset_destroy, basic_pollset_destroy};
ctiller58393c22015-01-07 14:03:30 -0800629
Craig Tillera82950e2015-09-22 12:33:20 -0700630static void become_basic_pollset(grpc_pollset *pollset, grpc_fd *fd_or_null) {
Craig Tiller3f529a2c2015-05-28 14:00:47 -0700631 pollset->vtable = &basic_pollset;
Craig Tiller3f529a2c2015-05-28 14:00:47 -0700632 pollset->data.ptr = fd_or_null;
Craig Tillera82950e2015-09-22 12:33:20 -0700633 if (fd_or_null != NULL) {
634 GRPC_FD_REF(fd_or_null, "basicpoll");
635 }
ctiller58393c22015-01-07 14:03:30 -0800636}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800637
Craig Tiller190d3602015-02-18 09:23:38 -0800638#endif /* GPR_POSIX_POLLSET */