blob: 1284891ded20ef7439b3ff12f5b20ff2605a1416 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Craig Tillerd4838a92017-04-27 12:08:18 -07003 * Copyright 2017, Google Inc.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/lib/iomgr/port.h"
35
36/* This polling engine is only relevant on linux kernels supporting epoll() */
37#ifdef GRPC_LINUX_EPOLL
38
Craig Tiller4509c472017-04-27 19:05:13 +000039#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070040
41#include <assert.h>
42#include <errno.h>
43#include <poll.h>
44#include <pthread.h>
45#include <string.h>
46#include <sys/epoll.h>
47#include <sys/socket.h>
48#include <unistd.h>
49
50#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070051#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070052#include <grpc/support/log.h>
53#include <grpc/support/string_util.h>
54#include <grpc/support/tls.h>
55#include <grpc/support/useful.h>
56
57#include "src/core/lib/iomgr/ev_posix.h"
58#include "src/core/lib/iomgr/iomgr_internal.h"
59#include "src/core/lib/iomgr/lockfree_event.h"
60#include "src/core/lib/iomgr/timer.h"
61#include "src/core/lib/iomgr/wakeup_fd_posix.h"
62#include "src/core/lib/iomgr/workqueue.h"
63#include "src/core/lib/profiling/timers.h"
64#include "src/core/lib/support/block_annotate.h"
65
66/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
67 * sure to wake up one polling thread (which can wake up other threads if
68 * needed) */
69static grpc_wakeup_fd global_wakeup_fd;
70static int g_epfd;
Craig Tiller32f90ee2017-04-28 12:46:41 -070071static gpr_atm g_timer_kick;
Craig Tillerc67cc992017-04-27 10:15:51 -070072
73/*******************************************************************************
74 * Fd Declarations
75 */
76
77struct grpc_fd {
78 int fd;
79
Craig Tillerc67cc992017-04-27 10:15:51 -070080 gpr_atm read_closure;
81 gpr_atm write_closure;
82
83 struct grpc_fd *freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -070084
85 /* The pollset that last noticed that the fd is readable. The actual type
86 * stored in this is (grpc_pollset *) */
87 gpr_atm read_notifier_pollset;
88
89 grpc_iomgr_object iomgr_object;
90};
91
92static void fd_global_init(void);
93static void fd_global_shutdown(void);
94
95/*******************************************************************************
96 * Pollset Declarations
97 */
98
Craig Tiller43bf2592017-04-28 23:21:01 +000099typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700100
101struct grpc_pollset_worker {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700102 kick_state kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700103 bool initialized_cv;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700104 grpc_pollset_worker *next;
105 grpc_pollset_worker *prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700106 gpr_cv cv;
107};
108
Craig Tiller6de05932017-04-28 09:17:38 -0700109typedef struct pollset_neighbourhood {
110 gpr_mu mu;
111 grpc_pollset *active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700112 char pad[GPR_CACHELINE_SIZE];
113} pollset_neighbourhood;
114
Craig Tillerc67cc992017-04-27 10:15:51 -0700115struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700116 gpr_mu mu;
117 pollset_neighbourhood *neighbourhood;
Craig Tiller4509c472017-04-27 19:05:13 +0000118 grpc_pollset_worker *root_worker;
119 bool kicked_without_poller;
Craig Tiller6de05932017-04-28 09:17:38 -0700120 bool seen_inactive;
Craig Tillerc67cc992017-04-27 10:15:51 -0700121 bool shutting_down; /* Is the pollset shutting down ? */
122 bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
Craig Tiller4509c472017-04-27 19:05:13 +0000123 grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
Craig Tiller6de05932017-04-28 09:17:38 -0700124
125 grpc_pollset *next;
126 grpc_pollset *prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700127};
128
129/*******************************************************************************
130 * Pollset-set Declarations
131 */
Craig Tiller6de05932017-04-28 09:17:38 -0700132
Craig Tillerc67cc992017-04-27 10:15:51 -0700133struct grpc_pollset_set {};
134
135/*******************************************************************************
136 * Common helpers
137 */
138
139static bool append_error(grpc_error **composite, grpc_error *error,
140 const char *desc) {
141 if (error == GRPC_ERROR_NONE) return true;
142 if (*composite == GRPC_ERROR_NONE) {
143 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
144 }
145 *composite = grpc_error_add_child(*composite, error);
146 return false;
147}
148
149/*******************************************************************************
150 * Fd Definitions
151 */
152
153/* We need to keep a freelist not because of any concerns of malloc performance
154 * but instead so that implementations with multiple threads in (for example)
155 * epoll_wait deal with the race between pollset removal and incoming poll
156 * notifications.
157 *
158 * The problem is that the poller ultimately holds a reference to this
159 * object, so it is very difficult to know when is safe to free it, at least
160 * without some expensive synchronization.
161 *
162 * If we keep the object freelisted, in the worst case losing this race just
163 * becomes a spurious read notification on a reused fd.
164 */
165
166/* The alarm system needs to be able to wakeup 'some poller' sometimes
167 * (specifically when a new alarm needs to be triggered earlier than the next
168 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
169 * case occurs. */
170
171static grpc_fd *fd_freelist = NULL;
172static gpr_mu fd_freelist_mu;
173
Craig Tillerc67cc992017-04-27 10:15:51 -0700174static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
175
176static void fd_global_shutdown(void) {
177 gpr_mu_lock(&fd_freelist_mu);
178 gpr_mu_unlock(&fd_freelist_mu);
179 while (fd_freelist != NULL) {
180 grpc_fd *fd = fd_freelist;
181 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700182 gpr_free(fd);
183 }
184 gpr_mu_destroy(&fd_freelist_mu);
185}
186
187static grpc_fd *fd_create(int fd, const char *name) {
188 grpc_fd *new_fd = NULL;
189
190 gpr_mu_lock(&fd_freelist_mu);
191 if (fd_freelist != NULL) {
192 new_fd = fd_freelist;
193 fd_freelist = fd_freelist->freelist_next;
194 }
195 gpr_mu_unlock(&fd_freelist_mu);
196
197 if (new_fd == NULL) {
198 new_fd = gpr_malloc(sizeof(grpc_fd));
Craig Tillerc67cc992017-04-27 10:15:51 -0700199 }
200
Craig Tillerc67cc992017-04-27 10:15:51 -0700201 new_fd->fd = fd;
Craig Tillerc67cc992017-04-27 10:15:51 -0700202 grpc_lfev_init(&new_fd->read_closure);
203 grpc_lfev_init(&new_fd->write_closure);
204 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
205
206 new_fd->freelist_next = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700207
208 char *fd_name;
209 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
210 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
211#ifdef GRPC_FD_REF_COUNT_DEBUG
212 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
213#endif
214 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000215
216 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
217 .data.ptr = new_fd};
218 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
219 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
220 }
221
Craig Tillerc67cc992017-04-27 10:15:51 -0700222 return new_fd;
223}
224
Craig Tiller4509c472017-04-27 19:05:13 +0000225static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700226
Craig Tiller9ddb3152017-04-27 21:32:56 +0000227/* Might be called multiple times */
228static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
229 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
230 GRPC_ERROR_REF(why))) {
231 shutdown(fd->fd, SHUT_RDWR);
232 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
233 }
234 GRPC_ERROR_UNREF(why);
235}
236
Craig Tillerc67cc992017-04-27 10:15:51 -0700237static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
238 grpc_closure *on_done, int *release_fd,
239 const char *reason) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700240 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerc67cc992017-04-27 10:15:51 -0700241
Craig Tiller9ddb3152017-04-27 21:32:56 +0000242 if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
243 fd_shutdown(exec_ctx, fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason));
244 }
245
Craig Tillerc67cc992017-04-27 10:15:51 -0700246 /* If release_fd is not NULL, we should be relinquishing control of the file
247 descriptor fd->fd (but we still own the grpc_fd structure). */
248 if (release_fd != NULL) {
249 *release_fd = fd->fd;
250 } else {
251 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700252 }
253
Craig Tiller4509c472017-04-27 19:05:13 +0000254 grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700255
Craig Tiller4509c472017-04-27 19:05:13 +0000256 grpc_iomgr_unregister_object(&fd->iomgr_object);
257 grpc_lfev_destroy(&fd->read_closure);
258 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700259
Craig Tiller4509c472017-04-27 19:05:13 +0000260 gpr_mu_lock(&fd_freelist_mu);
261 fd->freelist_next = fd_freelist;
262 fd_freelist = fd;
263 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700264}
265
266static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
267 grpc_fd *fd) {
268 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
269 return (grpc_pollset *)notifier;
270}
271
272static bool fd_is_shutdown(grpc_fd *fd) {
273 return grpc_lfev_is_shutdown(&fd->read_closure);
274}
275
Craig Tillerc67cc992017-04-27 10:15:51 -0700276static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
277 grpc_closure *closure) {
278 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
279}
280
281static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
282 grpc_closure *closure) {
283 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
284}
285
286static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000287 return NULL; /* TODO(ctiller): add a global workqueue */
288}
289
290static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
291 grpc_pollset *notifier) {
292 grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
293
294 /* Note, it is possible that fd_become_readable might be called twice with
295 different 'notifier's when an fd becomes readable and it is in two epoll
296 sets (This can happen briefly during polling island merges). In such cases
297 it does not really matter which notifer is set as the read_notifier_pollset
298 (They would both point to the same polling island anyway) */
299 /* Use release store to match with acquire load in fd_get_read_notifier */
300 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
301}
302
303static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
304 grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700305}
306
307/*******************************************************************************
308 * Pollset Definitions
309 */
310
Craig Tiller6de05932017-04-28 09:17:38 -0700311GPR_TLS_DECL(g_current_thread_pollset);
312GPR_TLS_DECL(g_current_thread_worker);
313static gpr_atm g_active_poller;
314static pollset_neighbourhood *g_neighbourhoods;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700315static bool *g_neighbour_scan_state;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700316static size_t g_num_neighbourhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700317
Craig Tillerc67cc992017-04-27 10:15:51 -0700318/* Return true if first in list */
Craig Tiller32f90ee2017-04-28 12:46:41 -0700319static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
320 if (pollset->root_worker == NULL) {
321 pollset->root_worker = worker;
322 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700323 return true;
324 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700325 worker->next = pollset->root_worker;
326 worker->prev = worker->next->prev;
327 worker->next->prev = worker;
328 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700329 return false;
330 }
331}
332
333/* Return true if last in list */
334typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
335
Craig Tiller32f90ee2017-04-28 12:46:41 -0700336static worker_remove_result worker_remove(grpc_pollset *pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -0700337 grpc_pollset_worker *worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700338 if (worker == pollset->root_worker) {
339 if (worker == worker->next) {
340 pollset->root_worker = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700341 return EMPTIED;
342 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700343 pollset->root_worker = worker->next;
344 worker->prev->next = worker->next;
345 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700346 return NEW_ROOT;
347 }
348 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700349 worker->prev->next = worker->next;
350 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700351 return REMOVED;
352 }
353}
354
Craig Tiller4509c472017-04-27 19:05:13 +0000355static grpc_error *pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000356 gpr_tls_init(&g_current_thread_pollset);
357 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700358 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000359 global_wakeup_fd.read_fd = -1;
360 grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
361 if (err != GRPC_ERROR_NONE) return err;
Craig Tiller4509c472017-04-27 19:05:13 +0000362 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
363 .data.ptr = &global_wakeup_fd};
364 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
365 return GRPC_OS_ERROR(errno, "epoll_ctl");
366 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700367 g_num_neighbourhoods = GPR_MAX(1, gpr_cpu_num_cores());
368 g_neighbourhoods =
369 gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700370 g_neighbour_scan_state =
371 gpr_malloc(sizeof(*g_neighbour_scan_state) * g_num_neighbourhoods);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700372 for (size_t i = 0; i < g_num_neighbourhoods; i++) {
373 gpr_mu_init(&g_neighbourhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700374 }
Craig Tiller4509c472017-04-27 19:05:13 +0000375 return GRPC_ERROR_NONE;
376}
377
378static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000379 gpr_tls_destroy(&g_current_thread_pollset);
380 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000381 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700382 for (size_t i = 0; i < g_num_neighbourhoods; i++) {
383 gpr_mu_destroy(&g_neighbourhoods[i].mu);
384 }
385 gpr_free(g_neighbourhoods);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700386 gpr_free(g_neighbour_scan_state);
Craig Tiller4509c472017-04-27 19:05:13 +0000387}
388
389static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700390 gpr_mu_init(&pollset->mu);
391 *mu = &pollset->mu;
Craig Tillerb4bfc4a2017-04-28 22:58:54 +0000392 pollset->neighbourhood = &g_neighbourhoods[gpr_cpu_current_cpu()];
Craig Tiller6de05932017-04-28 09:17:38 -0700393 pollset->seen_inactive = true;
394 pollset->next = pollset->prev = pollset;
395}
396
397static void pollset_destroy(grpc_pollset *pollset) {
Craig Tiller6de05932017-04-28 09:17:38 -0700398 gpr_mu_lock(&pollset->neighbourhood->mu);
399 pollset->prev->next = pollset->next;
400 pollset->next->prev = pollset->prev;
401 if (pollset == pollset->neighbourhood->active_root) {
402 pollset->neighbourhood->active_root =
403 pollset->next == pollset ? NULL : pollset->next;
Craig Tiller6de05932017-04-28 09:17:38 -0700404 }
405 gpr_mu_unlock(&pollset->neighbourhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700406 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000407}
408
409static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
410 grpc_error *error = GRPC_ERROR_NONE;
411 if (pollset->root_worker != NULL) {
412 grpc_pollset_worker *worker = pollset->root_worker;
413 do {
414 if (worker->initialized_cv) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700415 worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000416 gpr_cv_signal(&worker->cv);
417 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700418 worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000419 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
420 "pollset_shutdown");
421 }
422
Craig Tiller32f90ee2017-04-28 12:46:41 -0700423 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000424 } while (worker != pollset->root_worker);
425 }
426 return error;
427}
428
429static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
430 grpc_pollset *pollset) {
431 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
432 grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
433 pollset->shutdown_closure = NULL;
434 }
435}
436
437static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
438 grpc_closure *closure) {
439 GPR_ASSERT(pollset->shutdown_closure == NULL);
440 pollset->shutdown_closure = closure;
441 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
442 pollset_maybe_finish_shutdown(exec_ctx, pollset);
443}
444
Craig Tiller7cb26982017-04-28 23:05:37 +0000445#define MAX_EPOLL_EVENTS 10
Craig Tiller4509c472017-04-27 19:05:13 +0000446
447static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
448 gpr_timespec now) {
449 gpr_timespec timeout;
450 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
451 return -1;
452 }
453
454 if (gpr_time_cmp(deadline, now) <= 0) {
455 return 0;
456 }
457
458 static const gpr_timespec round_up = {
459 .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
460 timeout = gpr_time_sub(deadline, now);
461 int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
462 return millis >= 1 ? millis : 1;
463}
464
465static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
466 gpr_timespec now, gpr_timespec deadline) {
467 struct epoll_event events[MAX_EPOLL_EVENTS];
468 static const char *err_desc = "pollset_poll";
469
470 int timeout = poll_deadline_to_millis_timeout(deadline, now);
471
472 if (timeout != 0) {
473 GRPC_SCHEDULING_START_BLOCKING_REGION;
474 }
475 int r;
476 do {
477 r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
478 } while (r < 0 && errno == EINTR);
479 if (timeout != 0) {
480 GRPC_SCHEDULING_END_BLOCKING_REGION;
481 }
482
483 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
484
485 grpc_error *error = GRPC_ERROR_NONE;
486 for (int i = 0; i < r; i++) {
487 void *data_ptr = events[i].data.ptr;
488 if (data_ptr == &global_wakeup_fd) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700489 if (gpr_atm_no_barrier_cas(&g_timer_kick, 1, 0)) {
Craig Tiller375eb252017-04-27 23:29:12 +0000490 grpc_timer_consume_kick();
491 }
Craig Tiller4509c472017-04-27 19:05:13 +0000492 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
493 err_desc);
494 } else {
495 grpc_fd *fd = (grpc_fd *)(data_ptr);
496 bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
497 bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
498 bool write_ev = (events[i].events & EPOLLOUT) != 0;
499 if (read_ev || cancel) {
500 fd_become_readable(exec_ctx, fd, pollset);
501 }
502 if (write_ev || cancel) {
503 fd_become_writable(exec_ctx, fd);
504 }
505 }
506 }
507
508 return error;
509}
510
Craig Tiller32f90ee2017-04-28 12:46:41 -0700511#if 0
512static void verify_all_entries_in_neighbourhood_list(
513 grpc_pollset *root, bool should_be_seen_inactive) {
514 if (root == NULL) return;
515 grpc_pollset *p = root;
516 do {
517 GPR_ASSERT(p->seen_inactive == should_be_seen_inactive);
518 p = p->next;
519 } while (p != root);
520}
521
522static void verify_neighbourhood_lists(pollset_neighbourhood *neighbourhood) {
523 // assumes neighbourhood->mu locked
524 verify_all_entries_in_neighbourhood_list(neighbourhood->active_root, false);
525 verify_all_entries_in_neighbourhood_list(neighbourhood->inactive_root, true);
526}
527#endif
528
Craig Tiller4509c472017-04-27 19:05:13 +0000529static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
530 grpc_pollset_worker **worker_hdl, gpr_timespec *now,
531 gpr_timespec deadline) {
Craig Tiller4509c472017-04-27 19:05:13 +0000532 if (worker_hdl != NULL) *worker_hdl = worker;
533 worker->initialized_cv = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700534 worker->kick_state = UNKICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000535
Craig Tiller32f90ee2017-04-28 12:46:41 -0700536 if (pollset->seen_inactive) {
537 // pollset has been observed to be inactive, we need to move back to the
538 // active list
Craig Tiller2acab6e2017-04-30 23:06:33 +0000539 pollset_neighbourhood *neighbourhood = pollset->neighbourhood = &g_neighbourhoods[gpr_cpu_current_cpu()];
Craig Tiller32f90ee2017-04-28 12:46:41 -0700540 gpr_mu_unlock(&pollset->mu);
Craig Tillerf4b0fca2017-04-28 16:08:42 -0700541 // pollset unlocked: state may change (even worker->kick_state)
Craig Tiller2acab6e2017-04-30 23:06:33 +0000542retry_lock_neighbourhood:
Craig Tiller32f90ee2017-04-28 12:46:41 -0700543 gpr_mu_lock(&neighbourhood->mu);
544 gpr_mu_lock(&pollset->mu);
545 if (pollset->seen_inactive) {
Craig Tiller2acab6e2017-04-30 23:06:33 +0000546 if (neighbourhood != pollset->neighbourhood) {
547 gpr_mu_unlock(&neighbourhood->mu);
548 neighbourhood = pollset->neighbourhood;
549 gpr_mu_unlock(&pollset->mu);
550 goto retry_lock_neighbourhood;
551 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700552 pollset->seen_inactive = false;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000553 if (neighbourhood->active_root == NULL) {
554 neighbourhood->active_root = pollset->next = pollset->prev = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700555 if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
Craig Tiller43bf2592017-04-28 23:21:01 +0000556 worker->kick_state = DESIGNATED_POLLER;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700557 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000558 } else {
559 pollset->next = neighbourhood->active_root;
560 pollset->prev = pollset->next->prev;
561 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller4509c472017-04-27 19:05:13 +0000562 }
563 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700564 gpr_mu_unlock(&neighbourhood->mu);
565 }
566 worker_insert(pollset, worker);
567 if (worker->kick_state == UNKICKED) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000568 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700569 worker->initialized_cv = true;
570 gpr_cv_init(&worker->cv);
571 do {
572 if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
573 worker->kick_state == UNKICKED) {
574 worker->kick_state = KICKED;
575 }
576 } while (worker->kick_state == UNKICKED);
Craig Tiller4509c472017-04-27 19:05:13 +0000577 *now = gpr_now(now->clock_type);
578 }
579
Craig Tiller43bf2592017-04-28 23:21:01 +0000580 return worker->kick_state == DESIGNATED_POLLER &&
Craig Tiller32f90ee2017-04-28 12:46:41 -0700581 pollset->shutdown_closure == NULL;
Craig Tiller4509c472017-04-27 19:05:13 +0000582}
583
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700584static bool check_neighbourhood_for_available_poller(
Craig Tillera4b8eb02017-04-29 00:13:52 +0000585 pollset_neighbourhood *neighbourhood) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700586 bool found_worker = false;
587 do {
588 grpc_pollset *inspect = neighbourhood->active_root;
589 if (inspect == NULL) {
590 break;
591 }
592 gpr_mu_lock(&inspect->mu);
593 GPR_ASSERT(!inspect->seen_inactive);
594 grpc_pollset_worker *inspect_worker = inspect->root_worker;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700595 if (inspect_worker != NULL) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000596 do {
597 if (inspect_worker->kick_state == UNKICKED) {
598 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
599 (gpr_atm)inspect_worker)) {
600 inspect_worker->kick_state = DESIGNATED_POLLER;
601 if (inspect_worker->initialized_cv) {
602 gpr_cv_signal(&inspect_worker->cv);
603 }
604 }
605 // even if we didn't win the cas, there's a worker, we can stop
606 found_worker = true;
607 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700608 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000609 inspect_worker = inspect_worker->next;
610 } while (inspect_worker != inspect->root_worker);
611 }
612 if (!found_worker) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700613 inspect->seen_inactive = true;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000614 if (inspect == neighbourhood->active_root) {
615 if (inspect->next == neighbourhood->active_root) {
616 neighbourhood->active_root = NULL;
617 } else {
618 neighbourhood->active_root = inspect->next;
619 }
620 }
621 inspect->next->prev = inspect->prev;
622 inspect->prev->next = inspect->next;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700623 }
624 gpr_mu_unlock(&inspect->mu);
625 } while (!found_worker);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700626 return found_worker;
627}
628
Craig Tiller4509c472017-04-27 19:05:13 +0000629static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
630 grpc_pollset_worker *worker,
631 grpc_pollset_worker **worker_hdl) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700632 if (worker_hdl != NULL) *worker_hdl = NULL;
Craig Tillera4b8eb02017-04-29 00:13:52 +0000633 worker->kick_state = KICKED;
Craig Tiller8502ecb2017-04-28 14:22:01 -0700634 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700635 GPR_ASSERT(!pollset->seen_inactive);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000636 if (worker->next != worker && worker->next->kick_state == UNKICKED) {
Craig Tiller2acab6e2017-04-30 23:06:33 +0000637 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700638 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller43bf2592017-04-28 23:21:01 +0000639 worker->next->kick_state = DESIGNATED_POLLER;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700640 gpr_cv_signal(&worker->next->cv);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700641 if (grpc_exec_ctx_has_work(exec_ctx)) {
642 gpr_mu_unlock(&pollset->mu);
643 grpc_exec_ctx_flush(exec_ctx);
644 gpr_mu_lock(&pollset->mu);
645 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700646 } else {
647 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700648 gpr_mu_unlock(&pollset->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700649 size_t poller_neighbourhood_idx =
650 (size_t)(pollset->neighbourhood - g_neighbourhoods);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700651 bool found_worker = false;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700652 for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
653 pollset_neighbourhood *neighbourhood =
654 &g_neighbourhoods[(poller_neighbourhood_idx + i) %
655 g_num_neighbourhoods];
656 if (gpr_mu_trylock(&neighbourhood->mu)) {
657 found_worker =
Craig Tillera4b8eb02017-04-29 00:13:52 +0000658 check_neighbourhood_for_available_poller(neighbourhood);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700659 gpr_mu_unlock(&neighbourhood->mu);
660 g_neighbour_scan_state[i] = true;
661 } else {
662 g_neighbour_scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700663 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700664 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000665 for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
666 if (g_neighbour_scan_state[i]) continue;
667 pollset_neighbourhood *neighbourhood =
668 &g_neighbourhoods[(poller_neighbourhood_idx + i) %
669 g_num_neighbourhoods];
670 gpr_mu_lock(&neighbourhood->mu);
671 found_worker =
672 check_neighbourhood_for_available_poller(neighbourhood);
673 gpr_mu_unlock(&neighbourhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700674 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700675 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700676 gpr_mu_lock(&pollset->mu);
677 }
Craig Tiller4509c472017-04-27 19:05:13 +0000678 }
679 if (worker->initialized_cv) {
680 gpr_cv_destroy(&worker->cv);
681 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700682 if (EMPTIED == worker_remove(pollset, worker)) {
Craig Tiller4509c472017-04-27 19:05:13 +0000683 pollset_maybe_finish_shutdown(exec_ctx, pollset);
684 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000685 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000686}
687
688/* pollset->po.mu lock must be held by the caller before calling this.
689 The function pollset_work() may temporarily release the lock (pollset->po.mu)
690 during the course of its execution but it will always re-acquire the lock and
691 ensure that it is held by the time the function returns */
692static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
693 grpc_pollset_worker **worker_hdl,
694 gpr_timespec now, gpr_timespec deadline) {
695 grpc_pollset_worker worker;
696 grpc_error *error = GRPC_ERROR_NONE;
697 static const char *err_desc = "pollset_work";
698 if (pollset->kicked_without_poller) {
699 pollset->kicked_without_poller = false;
700 return GRPC_ERROR_NONE;
701 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700702 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000703 if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
Craig Tiller4509c472017-04-27 19:05:13 +0000704 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
705 GPR_ASSERT(!pollset->shutdown_closure);
Craig Tiller2acab6e2017-04-30 23:06:33 +0000706 GPR_ASSERT(!pollset->seen_inactive);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700707 gpr_mu_unlock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000708 append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
709 err_desc);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700710 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000711 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000712 }
713 end_worker(exec_ctx, pollset, &worker, worker_hdl);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700714 gpr_tls_set(&g_current_thread_pollset, 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000715 return error;
716}
717
718static grpc_error *pollset_kick(grpc_pollset *pollset,
719 grpc_pollset_worker *specific_worker) {
720 if (specific_worker == NULL) {
721 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tiller375eb252017-04-27 23:29:12 +0000722 grpc_pollset_worker *root_worker = pollset->root_worker;
723 if (root_worker == NULL) {
Craig Tiller4509c472017-04-27 19:05:13 +0000724 pollset->kicked_without_poller = true;
725 return GRPC_ERROR_NONE;
Craig Tiller375eb252017-04-27 23:29:12 +0000726 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700727 grpc_pollset_worker *next_worker = root_worker->next;
728 if (root_worker == next_worker &&
729 root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
730 &g_active_poller)) {
731 root_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000732 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700733 } else if (next_worker->kick_state == UNKICKED) {
734 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700735 next_worker->kick_state = KICKED;
Craig Tiller375eb252017-04-27 23:29:12 +0000736 gpr_cv_signal(&next_worker->cv);
737 return GRPC_ERROR_NONE;
Craig Tiller8502ecb2017-04-28 14:22:01 -0700738 } else {
739 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000740 }
741 } else {
742 return GRPC_ERROR_NONE;
743 }
Craig Tiller43bf2592017-04-28 23:21:01 +0000744 } else if (specific_worker->kick_state == KICKED) {
Craig Tiller4509c472017-04-27 19:05:13 +0000745 return GRPC_ERROR_NONE;
746 } else if (gpr_tls_get(&g_current_thread_worker) ==
747 (intptr_t)specific_worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700748 specific_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000749 return GRPC_ERROR_NONE;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700750 } else if (specific_worker ==
751 (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
752 specific_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000753 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700754 } else if (specific_worker->initialized_cv) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700755 specific_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000756 gpr_cv_signal(&specific_worker->cv);
757 return GRPC_ERROR_NONE;
Craig Tiller8502ecb2017-04-28 14:22:01 -0700758 } else {
759 specific_worker->kick_state = KICKED;
760 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000761 }
762}
763
764static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
765 grpc_fd *fd) {}
766
767static grpc_error *kick_poller(void) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700768 gpr_atm_no_barrier_store(&g_timer_kick, 1);
Craig Tiller4509c472017-04-27 19:05:13 +0000769 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
770}
771
772/*******************************************************************************
773 * Workqueue Definitions
774 */
775
776#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
777static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
778 const char *file, int line,
779 const char *reason) {
780 return workqueue;
781}
782
783static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
784 const char *file, int line, const char *reason) {}
785#else
786static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
787 return workqueue;
788}
789
790static void workqueue_unref(grpc_exec_ctx *exec_ctx,
791 grpc_workqueue *workqueue) {}
792#endif
793
794static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
795 return grpc_schedule_on_exec_ctx;
796}
Craig Tillerc67cc992017-04-27 10:15:51 -0700797
798/*******************************************************************************
799 * Pollset-set Definitions
800 */
801
802static grpc_pollset_set *pollset_set_create(void) {
803 return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
804}
805
806static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
807 grpc_pollset_set *pss) {}
808
809static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
810 grpc_fd *fd) {}
811
812static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
813 grpc_fd *fd) {}
814
815static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
816 grpc_pollset_set *pss, grpc_pollset *ps) {}
817
818static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
819 grpc_pollset_set *pss, grpc_pollset *ps) {}
820
821static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
822 grpc_pollset_set *bag,
823 grpc_pollset_set *item) {}
824
825static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
826 grpc_pollset_set *bag,
827 grpc_pollset_set *item) {}
828
829/*******************************************************************************
830 * Event engine binding
831 */
832
833static void shutdown_engine(void) {
834 fd_global_shutdown();
835 pollset_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700836}
837
838static const grpc_event_engine_vtable vtable = {
839 .pollset_size = sizeof(grpc_pollset),
840
841 .fd_create = fd_create,
842 .fd_wrapped_fd = fd_wrapped_fd,
843 .fd_orphan = fd_orphan,
844 .fd_shutdown = fd_shutdown,
845 .fd_is_shutdown = fd_is_shutdown,
846 .fd_notify_on_read = fd_notify_on_read,
847 .fd_notify_on_write = fd_notify_on_write,
848 .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
849 .fd_get_workqueue = fd_get_workqueue,
850
851 .pollset_init = pollset_init,
852 .pollset_shutdown = pollset_shutdown,
853 .pollset_destroy = pollset_destroy,
854 .pollset_work = pollset_work,
855 .pollset_kick = pollset_kick,
856 .pollset_add_fd = pollset_add_fd,
857
858 .pollset_set_create = pollset_set_create,
859 .pollset_set_destroy = pollset_set_destroy,
860 .pollset_set_add_pollset = pollset_set_add_pollset,
861 .pollset_set_del_pollset = pollset_set_del_pollset,
862 .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
863 .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
864 .pollset_set_add_fd = pollset_set_add_fd,
865 .pollset_set_del_fd = pollset_set_del_fd,
866
867 .kick_poller = kick_poller,
868
869 .workqueue_ref = workqueue_ref,
870 .workqueue_unref = workqueue_unref,
871 .workqueue_scheduler = workqueue_scheduler,
872
873 .shutdown_engine = shutdown_engine,
874};
875
876/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
877 * Create a dummy epoll_fd to make sure epoll support is available */
Craig Tiller6f0af492017-04-27 19:26:16 +0000878const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700879 if (!grpc_has_wakeup_fd()) {
880 return NULL;
881 }
882
Craig Tiller4509c472017-04-27 19:05:13 +0000883 g_epfd = epoll_create1(EPOLL_CLOEXEC);
884 if (g_epfd < 0) {
885 gpr_log(GPR_ERROR, "epoll unavailable");
Craig Tillerc67cc992017-04-27 10:15:51 -0700886 return NULL;
887 }
888
Craig Tillerc67cc992017-04-27 10:15:51 -0700889 fd_global_init();
890
891 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +0000892 close(g_epfd);
893 fd_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700894 return NULL;
895 }
896
897 return &vtable;
898}
899
900#else /* defined(GRPC_LINUX_EPOLL) */
901#if defined(GRPC_POSIX_SOCKET)
902#include "src/core/lib/iomgr/ev_posix.h"
903/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
904 * NULL */
Craig Tiller9ddb3152017-04-27 21:32:56 +0000905const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
906 return NULL;
907}
Craig Tillerc67cc992017-04-27 10:15:51 -0700908#endif /* defined(GRPC_POSIX_SOCKET) */
909#endif /* !defined(GRPC_LINUX_EPOLL) */