blob: 5c158baa7733ec40d6d3ddacb341e24150f5a0a7 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Craig Tillerd4838a92017-04-27 12:08:18 -07003 * Copyright 2017, Google Inc.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/lib/iomgr/port.h"
35
36/* This polling engine is only relevant on linux kernels supporting epoll() */
37#ifdef GRPC_LINUX_EPOLL
38
Craig Tiller4509c472017-04-27 19:05:13 +000039#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070040
41#include <assert.h>
42#include <errno.h>
43#include <poll.h>
44#include <pthread.h>
45#include <string.h>
46#include <sys/epoll.h>
47#include <sys/socket.h>
48#include <unistd.h>
49
50#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070051#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070052#include <grpc/support/log.h>
53#include <grpc/support/string_util.h>
54#include <grpc/support/tls.h>
55#include <grpc/support/useful.h>
56
57#include "src/core/lib/iomgr/ev_posix.h"
58#include "src/core/lib/iomgr/iomgr_internal.h"
59#include "src/core/lib/iomgr/lockfree_event.h"
60#include "src/core/lib/iomgr/timer.h"
61#include "src/core/lib/iomgr/wakeup_fd_posix.h"
62#include "src/core/lib/iomgr/workqueue.h"
63#include "src/core/lib/profiling/timers.h"
64#include "src/core/lib/support/block_annotate.h"
65
66/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
67 * sure to wake up one polling thread (which can wake up other threads if
68 * needed) */
69static grpc_wakeup_fd global_wakeup_fd;
70static int g_epfd;
Craig Tiller32f90ee2017-04-28 12:46:41 -070071static gpr_atm g_timer_kick;
Craig Tillerc67cc992017-04-27 10:15:51 -070072
73/*******************************************************************************
74 * Fd Declarations
75 */
76
77struct grpc_fd {
78 int fd;
79
Craig Tillerc67cc992017-04-27 10:15:51 -070080 gpr_atm read_closure;
81 gpr_atm write_closure;
82
83 struct grpc_fd *freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -070084
85 /* The pollset that last noticed that the fd is readable. The actual type
86 * stored in this is (grpc_pollset *) */
87 gpr_atm read_notifier_pollset;
88
89 grpc_iomgr_object iomgr_object;
90};
91
92static void fd_global_init(void);
93static void fd_global_shutdown(void);
94
95/*******************************************************************************
96 * Pollset Declarations
97 */
98
Craig Tiller43bf2592017-04-28 23:21:01 +000099typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700100
101struct grpc_pollset_worker {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700102 kick_state kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700103 bool initialized_cv;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700104 grpc_pollset_worker *next;
105 grpc_pollset_worker *prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700106 gpr_cv cv;
107};
108
Craig Tiller6de05932017-04-28 09:17:38 -0700109typedef struct pollset_neighbourhood {
110 gpr_mu mu;
111 grpc_pollset *active_root;
112 grpc_pollset *inactive_root;
113 bool seen_inactive;
114 char pad[GPR_CACHELINE_SIZE];
115} pollset_neighbourhood;
116
Craig Tillerc67cc992017-04-27 10:15:51 -0700117struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700118 gpr_mu mu;
119 pollset_neighbourhood *neighbourhood;
Craig Tiller4509c472017-04-27 19:05:13 +0000120 grpc_pollset_worker *root_worker;
121 bool kicked_without_poller;
Craig Tiller6de05932017-04-28 09:17:38 -0700122 bool seen_inactive;
Craig Tillerc67cc992017-04-27 10:15:51 -0700123 bool shutting_down; /* Is the pollset shutting down ? */
124 bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
Craig Tiller4509c472017-04-27 19:05:13 +0000125 grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
Craig Tiller6de05932017-04-28 09:17:38 -0700126
127 grpc_pollset *next;
128 grpc_pollset *prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700129};
130
131/*******************************************************************************
132 * Pollset-set Declarations
133 */
Craig Tiller6de05932017-04-28 09:17:38 -0700134
Craig Tillerc67cc992017-04-27 10:15:51 -0700135struct grpc_pollset_set {};
136
137/*******************************************************************************
138 * Common helpers
139 */
140
141static bool append_error(grpc_error **composite, grpc_error *error,
142 const char *desc) {
143 if (error == GRPC_ERROR_NONE) return true;
144 if (*composite == GRPC_ERROR_NONE) {
145 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
146 }
147 *composite = grpc_error_add_child(*composite, error);
148 return false;
149}
150
151/*******************************************************************************
152 * Fd Definitions
153 */
154
155/* We need to keep a freelist not because of any concerns of malloc performance
156 * but instead so that implementations with multiple threads in (for example)
157 * epoll_wait deal with the race between pollset removal and incoming poll
158 * notifications.
159 *
160 * The problem is that the poller ultimately holds a reference to this
161 * object, so it is very difficult to know when is safe to free it, at least
162 * without some expensive synchronization.
163 *
164 * If we keep the object freelisted, in the worst case losing this race just
165 * becomes a spurious read notification on a reused fd.
166 */
167
168/* The alarm system needs to be able to wakeup 'some poller' sometimes
169 * (specifically when a new alarm needs to be triggered earlier than the next
170 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
171 * case occurs. */
172
173static grpc_fd *fd_freelist = NULL;
174static gpr_mu fd_freelist_mu;
175
Craig Tillerc67cc992017-04-27 10:15:51 -0700176static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
177
178static void fd_global_shutdown(void) {
179 gpr_mu_lock(&fd_freelist_mu);
180 gpr_mu_unlock(&fd_freelist_mu);
181 while (fd_freelist != NULL) {
182 grpc_fd *fd = fd_freelist;
183 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700184 gpr_free(fd);
185 }
186 gpr_mu_destroy(&fd_freelist_mu);
187}
188
189static grpc_fd *fd_create(int fd, const char *name) {
190 grpc_fd *new_fd = NULL;
191
192 gpr_mu_lock(&fd_freelist_mu);
193 if (fd_freelist != NULL) {
194 new_fd = fd_freelist;
195 fd_freelist = fd_freelist->freelist_next;
196 }
197 gpr_mu_unlock(&fd_freelist_mu);
198
199 if (new_fd == NULL) {
200 new_fd = gpr_malloc(sizeof(grpc_fd));
Craig Tillerc67cc992017-04-27 10:15:51 -0700201 }
202
Craig Tillerc67cc992017-04-27 10:15:51 -0700203 new_fd->fd = fd;
Craig Tillerc67cc992017-04-27 10:15:51 -0700204 grpc_lfev_init(&new_fd->read_closure);
205 grpc_lfev_init(&new_fd->write_closure);
206 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
207
208 new_fd->freelist_next = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700209
210 char *fd_name;
211 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
212 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
213#ifdef GRPC_FD_REF_COUNT_DEBUG
214 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
215#endif
216 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000217
218 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
219 .data.ptr = new_fd};
220 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
221 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
222 }
223
Craig Tillerc67cc992017-04-27 10:15:51 -0700224 return new_fd;
225}
226
Craig Tiller4509c472017-04-27 19:05:13 +0000227static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700228
Craig Tiller9ddb3152017-04-27 21:32:56 +0000229/* Might be called multiple times */
230static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
231 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
232 GRPC_ERROR_REF(why))) {
233 shutdown(fd->fd, SHUT_RDWR);
234 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
235 }
236 GRPC_ERROR_UNREF(why);
237}
238
Craig Tillerc67cc992017-04-27 10:15:51 -0700239static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
240 grpc_closure *on_done, int *release_fd,
241 const char *reason) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700242 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerc67cc992017-04-27 10:15:51 -0700243
Craig Tiller9ddb3152017-04-27 21:32:56 +0000244 if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
245 fd_shutdown(exec_ctx, fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason));
246 }
247
Craig Tillerc67cc992017-04-27 10:15:51 -0700248 /* If release_fd is not NULL, we should be relinquishing control of the file
249 descriptor fd->fd (but we still own the grpc_fd structure). */
250 if (release_fd != NULL) {
251 *release_fd = fd->fd;
252 } else {
253 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700254 }
255
Craig Tiller4509c472017-04-27 19:05:13 +0000256 grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700257
Craig Tiller4509c472017-04-27 19:05:13 +0000258 grpc_iomgr_unregister_object(&fd->iomgr_object);
259 grpc_lfev_destroy(&fd->read_closure);
260 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700261
Craig Tiller4509c472017-04-27 19:05:13 +0000262 gpr_mu_lock(&fd_freelist_mu);
263 fd->freelist_next = fd_freelist;
264 fd_freelist = fd;
265 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700266}
267
268static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
269 grpc_fd *fd) {
270 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
271 return (grpc_pollset *)notifier;
272}
273
274static bool fd_is_shutdown(grpc_fd *fd) {
275 return grpc_lfev_is_shutdown(&fd->read_closure);
276}
277
Craig Tillerc67cc992017-04-27 10:15:51 -0700278static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
279 grpc_closure *closure) {
280 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
281}
282
283static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
284 grpc_closure *closure) {
285 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
286}
287
288static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000289 return NULL; /* TODO(ctiller): add a global workqueue */
290}
291
292static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
293 grpc_pollset *notifier) {
294 grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
295
296 /* Note, it is possible that fd_become_readable might be called twice with
297 different 'notifier's when an fd becomes readable and it is in two epoll
298 sets (This can happen briefly during polling island merges). In such cases
299 it does not really matter which notifer is set as the read_notifier_pollset
300 (They would both point to the same polling island anyway) */
301 /* Use release store to match with acquire load in fd_get_read_notifier */
302 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
303}
304
305static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
306 grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700307}
308
309/*******************************************************************************
310 * Pollset Definitions
311 */
312
Craig Tiller6de05932017-04-28 09:17:38 -0700313GPR_TLS_DECL(g_current_thread_pollset);
314GPR_TLS_DECL(g_current_thread_worker);
315static gpr_atm g_active_poller;
316static pollset_neighbourhood *g_neighbourhoods;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700317static bool *g_neighbour_scan_state;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700318static size_t g_num_neighbourhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700319
Craig Tillerc67cc992017-04-27 10:15:51 -0700320/* Return true if first in list */
Craig Tiller32f90ee2017-04-28 12:46:41 -0700321static bool worker_insert(grpc_pollset *pollset, grpc_pollset_worker *worker) {
322 if (pollset->root_worker == NULL) {
323 pollset->root_worker = worker;
324 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700325 return true;
326 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700327 worker->next = pollset->root_worker;
328 worker->prev = worker->next->prev;
329 worker->next->prev = worker;
330 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700331 return false;
332 }
333}
334
335/* Return true if last in list */
336typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
337
Craig Tiller32f90ee2017-04-28 12:46:41 -0700338static worker_remove_result worker_remove(grpc_pollset *pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -0700339 grpc_pollset_worker *worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700340 if (worker == pollset->root_worker) {
341 if (worker == worker->next) {
342 pollset->root_worker = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700343 return EMPTIED;
344 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700345 pollset->root_worker = worker->next;
346 worker->prev->next = worker->next;
347 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700348 return NEW_ROOT;
349 }
350 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700351 worker->prev->next = worker->next;
352 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700353 return REMOVED;
354 }
355}
356
Craig Tiller4509c472017-04-27 19:05:13 +0000357static grpc_error *pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000358 gpr_tls_init(&g_current_thread_pollset);
359 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700360 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000361 global_wakeup_fd.read_fd = -1;
362 grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
363 if (err != GRPC_ERROR_NONE) return err;
Craig Tiller4509c472017-04-27 19:05:13 +0000364 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
365 .data.ptr = &global_wakeup_fd};
366 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
367 return GRPC_OS_ERROR(errno, "epoll_ctl");
368 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700369 g_num_neighbourhoods = GPR_MAX(1, gpr_cpu_num_cores());
370 g_neighbourhoods =
371 gpr_zalloc(sizeof(*g_neighbourhoods) * g_num_neighbourhoods);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700372 g_neighbour_scan_state =
373 gpr_malloc(sizeof(*g_neighbour_scan_state) * g_num_neighbourhoods);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700374 for (size_t i = 0; i < g_num_neighbourhoods; i++) {
375 gpr_mu_init(&g_neighbourhoods[i].mu);
376 g_neighbourhoods[i].seen_inactive = true;
377 }
Craig Tiller4509c472017-04-27 19:05:13 +0000378 return GRPC_ERROR_NONE;
379}
380
381static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000382 gpr_tls_destroy(&g_current_thread_pollset);
383 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000384 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700385 for (size_t i = 0; i < g_num_neighbourhoods; i++) {
386 gpr_mu_destroy(&g_neighbourhoods[i].mu);
387 }
388 gpr_free(g_neighbourhoods);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700389 gpr_free(g_neighbour_scan_state);
Craig Tiller4509c472017-04-27 19:05:13 +0000390}
391
392static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700393 gpr_mu_init(&pollset->mu);
394 *mu = &pollset->mu;
Craig Tillerb4bfc4a2017-04-28 22:58:54 +0000395 pollset->neighbourhood = &g_neighbourhoods[gpr_cpu_current_cpu()];
Craig Tiller6de05932017-04-28 09:17:38 -0700396 pollset->seen_inactive = true;
397 pollset->next = pollset->prev = pollset;
398}
399
400static void pollset_destroy(grpc_pollset *pollset) {
Craig Tiller6de05932017-04-28 09:17:38 -0700401 gpr_mu_lock(&pollset->neighbourhood->mu);
402 pollset->prev->next = pollset->next;
403 pollset->next->prev = pollset->prev;
404 if (pollset == pollset->neighbourhood->active_root) {
405 pollset->neighbourhood->active_root =
406 pollset->next == pollset ? NULL : pollset->next;
407 } else if (pollset == pollset->neighbourhood->inactive_root) {
408 pollset->neighbourhood->inactive_root =
409 pollset->next == pollset ? NULL : pollset->next;
410 }
411 gpr_mu_unlock(&pollset->neighbourhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700412 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000413}
414
415static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
416 grpc_error *error = GRPC_ERROR_NONE;
417 if (pollset->root_worker != NULL) {
418 grpc_pollset_worker *worker = pollset->root_worker;
419 do {
420 if (worker->initialized_cv) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700421 worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000422 gpr_cv_signal(&worker->cv);
423 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700424 worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000425 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
426 "pollset_shutdown");
427 }
428
Craig Tiller32f90ee2017-04-28 12:46:41 -0700429 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000430 } while (worker != pollset->root_worker);
431 }
432 return error;
433}
434
435static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
436 grpc_pollset *pollset) {
437 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
438 grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
439 pollset->shutdown_closure = NULL;
440 }
441}
442
443static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
444 grpc_closure *closure) {
445 GPR_ASSERT(pollset->shutdown_closure == NULL);
446 pollset->shutdown_closure = closure;
447 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
448 pollset_maybe_finish_shutdown(exec_ctx, pollset);
449}
450
Craig Tiller7cb26982017-04-28 23:05:37 +0000451#define MAX_EPOLL_EVENTS 10
Craig Tiller4509c472017-04-27 19:05:13 +0000452
453static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
454 gpr_timespec now) {
455 gpr_timespec timeout;
456 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
457 return -1;
458 }
459
460 if (gpr_time_cmp(deadline, now) <= 0) {
461 return 0;
462 }
463
464 static const gpr_timespec round_up = {
465 .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
466 timeout = gpr_time_sub(deadline, now);
467 int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
468 return millis >= 1 ? millis : 1;
469}
470
471static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
472 gpr_timespec now, gpr_timespec deadline) {
473 struct epoll_event events[MAX_EPOLL_EVENTS];
474 static const char *err_desc = "pollset_poll";
475
476 int timeout = poll_deadline_to_millis_timeout(deadline, now);
477
478 if (timeout != 0) {
479 GRPC_SCHEDULING_START_BLOCKING_REGION;
480 }
481 int r;
482 do {
483 r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
484 } while (r < 0 && errno == EINTR);
485 if (timeout != 0) {
486 GRPC_SCHEDULING_END_BLOCKING_REGION;
487 }
488
489 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
490
491 grpc_error *error = GRPC_ERROR_NONE;
492 for (int i = 0; i < r; i++) {
493 void *data_ptr = events[i].data.ptr;
494 if (data_ptr == &global_wakeup_fd) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700495 if (gpr_atm_no_barrier_cas(&g_timer_kick, 1, 0)) {
Craig Tiller375eb252017-04-27 23:29:12 +0000496 grpc_timer_consume_kick();
497 }
Craig Tiller4509c472017-04-27 19:05:13 +0000498 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
499 err_desc);
500 } else {
501 grpc_fd *fd = (grpc_fd *)(data_ptr);
502 bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
503 bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
504 bool write_ev = (events[i].events & EPOLLOUT) != 0;
505 if (read_ev || cancel) {
506 fd_become_readable(exec_ctx, fd, pollset);
507 }
508 if (write_ev || cancel) {
509 fd_become_writable(exec_ctx, fd);
510 }
511 }
512 }
513
514 return error;
515}
516
Craig Tiller32f90ee2017-04-28 12:46:41 -0700517#if 0
518static void verify_all_entries_in_neighbourhood_list(
519 grpc_pollset *root, bool should_be_seen_inactive) {
520 if (root == NULL) return;
521 grpc_pollset *p = root;
522 do {
523 GPR_ASSERT(p->seen_inactive == should_be_seen_inactive);
524 p = p->next;
525 } while (p != root);
526}
527
528static void verify_neighbourhood_lists(pollset_neighbourhood *neighbourhood) {
529 // assumes neighbourhood->mu locked
530 verify_all_entries_in_neighbourhood_list(neighbourhood->active_root, false);
531 verify_all_entries_in_neighbourhood_list(neighbourhood->inactive_root, true);
532}
533#endif
534
535static void move_pollset_to_neighbourhood_list(grpc_pollset *pollset,
536 grpc_pollset **from_root,
537 grpc_pollset **to_root) {
538 // remove from old list
539 pollset->prev->next = pollset->next;
540 pollset->next->prev = pollset->prev;
541 if (*from_root == pollset) {
542 *from_root = pollset->next == pollset ? NULL : pollset->next;
543 }
544 // add to new list
545 if (*to_root == NULL) {
546 *to_root = pollset->next = pollset->prev = pollset;
547 } else {
548 pollset->next = *to_root;
549 pollset->prev = pollset->next->prev;
550 pollset->next->prev = pollset->prev->next = pollset;
551 }
552}
553
Craig Tiller4509c472017-04-27 19:05:13 +0000554static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
555 grpc_pollset_worker **worker_hdl, gpr_timespec *now,
556 gpr_timespec deadline) {
Craig Tiller4509c472017-04-27 19:05:13 +0000557 if (worker_hdl != NULL) *worker_hdl = worker;
558 worker->initialized_cv = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700559 worker->kick_state = UNKICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000560
Craig Tiller32f90ee2017-04-28 12:46:41 -0700561 if (pollset->seen_inactive) {
562 // pollset has been observed to be inactive, we need to move back to the
563 // active list
564 pollset_neighbourhood *neighbourhood = pollset->neighbourhood;
565 gpr_mu_unlock(&pollset->mu);
Craig Tillerf4b0fca2017-04-28 16:08:42 -0700566 // pollset unlocked: state may change (even worker->kick_state)
Craig Tiller32f90ee2017-04-28 12:46:41 -0700567 gpr_mu_lock(&neighbourhood->mu);
568 gpr_mu_lock(&pollset->mu);
569 if (pollset->seen_inactive) {
570 pollset->seen_inactive = false;
571 move_pollset_to_neighbourhood_list(pollset, &neighbourhood->inactive_root,
572 &neighbourhood->active_root);
573 if (neighbourhood->seen_inactive) {
574 neighbourhood->seen_inactive = false;
575 if (gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
Craig Tiller43bf2592017-04-28 23:21:01 +0000576 worker->kick_state = DESIGNATED_POLLER;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700577 }
Craig Tiller4509c472017-04-27 19:05:13 +0000578 }
579 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700580 gpr_mu_unlock(&neighbourhood->mu);
581 }
582 worker_insert(pollset, worker);
583 if (worker->kick_state == UNKICKED) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000584 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700585 worker->initialized_cv = true;
586 gpr_cv_init(&worker->cv);
587 do {
588 if (gpr_cv_wait(&worker->cv, &pollset->mu, deadline) &&
589 worker->kick_state == UNKICKED) {
590 worker->kick_state = KICKED;
591 }
592 } while (worker->kick_state == UNKICKED);
Craig Tiller4509c472017-04-27 19:05:13 +0000593 *now = gpr_now(now->clock_type);
594 }
595
Craig Tiller43bf2592017-04-28 23:21:01 +0000596 return worker->kick_state == DESIGNATED_POLLER &&
Craig Tiller32f90ee2017-04-28 12:46:41 -0700597 pollset->shutdown_closure == NULL;
Craig Tiller4509c472017-04-27 19:05:13 +0000598}
599
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700600static bool check_neighbourhood_for_available_poller(
Craig Tillera4b8eb02017-04-29 00:13:52 +0000601 pollset_neighbourhood *neighbourhood) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700602 bool found_worker = false;
603 do {
604 grpc_pollset *inspect = neighbourhood->active_root;
605 if (inspect == NULL) {
606 break;
607 }
608 gpr_mu_lock(&inspect->mu);
609 GPR_ASSERT(!inspect->seen_inactive);
610 grpc_pollset_worker *inspect_worker = inspect->root_worker;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700611 if (inspect_worker != NULL) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000612 do {
613 if (inspect_worker->kick_state == UNKICKED) {
614 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
615 (gpr_atm)inspect_worker)) {
616 inspect_worker->kick_state = DESIGNATED_POLLER;
617 if (inspect_worker->initialized_cv) {
618 gpr_cv_signal(&inspect_worker->cv);
619 }
620 }
621 // even if we didn't win the cas, there's a worker, we can stop
622 found_worker = true;
623 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700624 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000625 inspect_worker = inspect_worker->next;
626 } while (inspect_worker != inspect->root_worker);
627 }
628 if (!found_worker) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700629 inspect->seen_inactive = true;
630 move_pollset_to_neighbourhood_list(inspect, &neighbourhood->active_root,
631 &neighbourhood->inactive_root);
632 }
633 gpr_mu_unlock(&inspect->mu);
634 } while (!found_worker);
635 if (!found_worker) {
636 neighbourhood->seen_inactive = true;
637 }
638 return found_worker;
639}
640
Craig Tiller4509c472017-04-27 19:05:13 +0000641static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
642 grpc_pollset_worker *worker,
643 grpc_pollset_worker **worker_hdl) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700644 if (worker_hdl != NULL) *worker_hdl = NULL;
Craig Tillera4b8eb02017-04-29 00:13:52 +0000645 worker->kick_state = KICKED;
Craig Tiller8502ecb2017-04-28 14:22:01 -0700646 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700647 GPR_ASSERT(!pollset->seen_inactive);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000648 if (worker->next != worker && worker->next->kick_state == UNKICKED) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700649 assert(worker->next->initialized_cv);
650 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller43bf2592017-04-28 23:21:01 +0000651 worker->next->kick_state = DESIGNATED_POLLER;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700652 gpr_cv_signal(&worker->next->cv);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700653 if (grpc_exec_ctx_has_work(exec_ctx)) {
654 gpr_mu_unlock(&pollset->mu);
655 grpc_exec_ctx_flush(exec_ctx);
656 gpr_mu_lock(&pollset->mu);
657 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700658 } else {
659 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700660 gpr_mu_unlock(&pollset->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700661 size_t poller_neighbourhood_idx =
662 (size_t)(pollset->neighbourhood - g_neighbourhoods);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700663 bool found_worker = false;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700664 for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
665 pollset_neighbourhood *neighbourhood =
666 &g_neighbourhoods[(poller_neighbourhood_idx + i) %
667 g_num_neighbourhoods];
668 if (gpr_mu_trylock(&neighbourhood->mu)) {
669 found_worker =
Craig Tillera4b8eb02017-04-29 00:13:52 +0000670 check_neighbourhood_for_available_poller(neighbourhood);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700671 gpr_mu_unlock(&neighbourhood->mu);
672 g_neighbour_scan_state[i] = true;
673 } else {
674 g_neighbour_scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700675 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700676 }
677 if (!found_worker) {
678 for (size_t i = 0; !found_worker && i < g_num_neighbourhoods; i++) {
679 if (g_neighbour_scan_state[i]) continue;
680 pollset_neighbourhood *neighbourhood =
681 &g_neighbourhoods[(poller_neighbourhood_idx + i) %
682 g_num_neighbourhoods];
683 gpr_mu_lock(&neighbourhood->mu);
684 found_worker =
Craig Tillera4b8eb02017-04-29 00:13:52 +0000685 check_neighbourhood_for_available_poller(neighbourhood);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700686 gpr_mu_unlock(&neighbourhood->mu);
687 }
688 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700689 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700690 gpr_mu_lock(&pollset->mu);
691 }
Craig Tiller4509c472017-04-27 19:05:13 +0000692 }
693 if (worker->initialized_cv) {
694 gpr_cv_destroy(&worker->cv);
695 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700696 if (EMPTIED == worker_remove(pollset, worker)) {
Craig Tiller4509c472017-04-27 19:05:13 +0000697 pollset_maybe_finish_shutdown(exec_ctx, pollset);
698 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000699 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000700}
701
702/* pollset->po.mu lock must be held by the caller before calling this.
703 The function pollset_work() may temporarily release the lock (pollset->po.mu)
704 during the course of its execution but it will always re-acquire the lock and
705 ensure that it is held by the time the function returns */
706static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
707 grpc_pollset_worker **worker_hdl,
708 gpr_timespec now, gpr_timespec deadline) {
709 grpc_pollset_worker worker;
710 grpc_error *error = GRPC_ERROR_NONE;
711 static const char *err_desc = "pollset_work";
712 if (pollset->kicked_without_poller) {
713 pollset->kicked_without_poller = false;
714 return GRPC_ERROR_NONE;
715 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700716 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000717 if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
Craig Tiller4509c472017-04-27 19:05:13 +0000718 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
719 GPR_ASSERT(!pollset->shutdown_closure);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700720 gpr_mu_unlock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000721 append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
722 err_desc);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700723 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000724 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000725 }
726 end_worker(exec_ctx, pollset, &worker, worker_hdl);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700727 gpr_tls_set(&g_current_thread_pollset, 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000728 return error;
729}
730
731static grpc_error *pollset_kick(grpc_pollset *pollset,
732 grpc_pollset_worker *specific_worker) {
733 if (specific_worker == NULL) {
734 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tiller375eb252017-04-27 23:29:12 +0000735 grpc_pollset_worker *root_worker = pollset->root_worker;
736 if (root_worker == NULL) {
Craig Tiller4509c472017-04-27 19:05:13 +0000737 pollset->kicked_without_poller = true;
738 return GRPC_ERROR_NONE;
Craig Tiller375eb252017-04-27 23:29:12 +0000739 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700740 grpc_pollset_worker *next_worker = root_worker->next;
741 if (root_worker == next_worker &&
742 root_worker == (grpc_pollset_worker *)gpr_atm_no_barrier_load(
743 &g_active_poller)) {
744 root_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000745 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700746 } else if (next_worker->kick_state == UNKICKED) {
747 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700748 next_worker->kick_state = KICKED;
Craig Tiller375eb252017-04-27 23:29:12 +0000749 gpr_cv_signal(&next_worker->cv);
750 return GRPC_ERROR_NONE;
Craig Tiller8502ecb2017-04-28 14:22:01 -0700751 } else {
752 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000753 }
754 } else {
755 return GRPC_ERROR_NONE;
756 }
Craig Tiller43bf2592017-04-28 23:21:01 +0000757 } else if (specific_worker->kick_state == KICKED) {
Craig Tiller4509c472017-04-27 19:05:13 +0000758 return GRPC_ERROR_NONE;
759 } else if (gpr_tls_get(&g_current_thread_worker) ==
760 (intptr_t)specific_worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700761 specific_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000762 return GRPC_ERROR_NONE;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700763 } else if (specific_worker ==
764 (grpc_pollset_worker *)gpr_atm_no_barrier_load(&g_active_poller)) {
765 specific_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000766 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700767 } else if (specific_worker->initialized_cv) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700768 specific_worker->kick_state = KICKED;
Craig Tiller4509c472017-04-27 19:05:13 +0000769 gpr_cv_signal(&specific_worker->cv);
770 return GRPC_ERROR_NONE;
Craig Tiller8502ecb2017-04-28 14:22:01 -0700771 } else {
772 specific_worker->kick_state = KICKED;
773 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000774 }
775}
776
777static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
778 grpc_fd *fd) {}
779
780static grpc_error *kick_poller(void) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700781 gpr_atm_no_barrier_store(&g_timer_kick, 1);
Craig Tiller4509c472017-04-27 19:05:13 +0000782 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
783}
784
785/*******************************************************************************
786 * Workqueue Definitions
787 */
788
789#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
790static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
791 const char *file, int line,
792 const char *reason) {
793 return workqueue;
794}
795
796static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
797 const char *file, int line, const char *reason) {}
798#else
799static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
800 return workqueue;
801}
802
803static void workqueue_unref(grpc_exec_ctx *exec_ctx,
804 grpc_workqueue *workqueue) {}
805#endif
806
807static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
808 return grpc_schedule_on_exec_ctx;
809}
Craig Tillerc67cc992017-04-27 10:15:51 -0700810
811/*******************************************************************************
812 * Pollset-set Definitions
813 */
814
815static grpc_pollset_set *pollset_set_create(void) {
816 return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
817}
818
819static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
820 grpc_pollset_set *pss) {}
821
822static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
823 grpc_fd *fd) {}
824
825static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
826 grpc_fd *fd) {}
827
828static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
829 grpc_pollset_set *pss, grpc_pollset *ps) {}
830
831static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
832 grpc_pollset_set *pss, grpc_pollset *ps) {}
833
834static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
835 grpc_pollset_set *bag,
836 grpc_pollset_set *item) {}
837
838static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
839 grpc_pollset_set *bag,
840 grpc_pollset_set *item) {}
841
842/*******************************************************************************
843 * Event engine binding
844 */
845
846static void shutdown_engine(void) {
847 fd_global_shutdown();
848 pollset_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700849}
850
851static const grpc_event_engine_vtable vtable = {
852 .pollset_size = sizeof(grpc_pollset),
853
854 .fd_create = fd_create,
855 .fd_wrapped_fd = fd_wrapped_fd,
856 .fd_orphan = fd_orphan,
857 .fd_shutdown = fd_shutdown,
858 .fd_is_shutdown = fd_is_shutdown,
859 .fd_notify_on_read = fd_notify_on_read,
860 .fd_notify_on_write = fd_notify_on_write,
861 .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
862 .fd_get_workqueue = fd_get_workqueue,
863
864 .pollset_init = pollset_init,
865 .pollset_shutdown = pollset_shutdown,
866 .pollset_destroy = pollset_destroy,
867 .pollset_work = pollset_work,
868 .pollset_kick = pollset_kick,
869 .pollset_add_fd = pollset_add_fd,
870
871 .pollset_set_create = pollset_set_create,
872 .pollset_set_destroy = pollset_set_destroy,
873 .pollset_set_add_pollset = pollset_set_add_pollset,
874 .pollset_set_del_pollset = pollset_set_del_pollset,
875 .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
876 .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
877 .pollset_set_add_fd = pollset_set_add_fd,
878 .pollset_set_del_fd = pollset_set_del_fd,
879
880 .kick_poller = kick_poller,
881
882 .workqueue_ref = workqueue_ref,
883 .workqueue_unref = workqueue_unref,
884 .workqueue_scheduler = workqueue_scheduler,
885
886 .shutdown_engine = shutdown_engine,
887};
888
889/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
890 * Create a dummy epoll_fd to make sure epoll support is available */
Craig Tiller6f0af492017-04-27 19:26:16 +0000891const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700892 if (!grpc_has_wakeup_fd()) {
893 return NULL;
894 }
895
Craig Tiller4509c472017-04-27 19:05:13 +0000896 g_epfd = epoll_create1(EPOLL_CLOEXEC);
897 if (g_epfd < 0) {
898 gpr_log(GPR_ERROR, "epoll unavailable");
Craig Tillerc67cc992017-04-27 10:15:51 -0700899 return NULL;
900 }
901
Craig Tillerc67cc992017-04-27 10:15:51 -0700902 fd_global_init();
903
904 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +0000905 close(g_epfd);
906 fd_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700907 return NULL;
908 }
909
910 return &vtable;
911}
912
913#else /* defined(GRPC_LINUX_EPOLL) */
914#if defined(GRPC_POSIX_SOCKET)
915#include "src/core/lib/iomgr/ev_posix.h"
916/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
917 * NULL */
Craig Tiller9ddb3152017-04-27 21:32:56 +0000918const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
919 return NULL;
920}
Craig Tillerc67cc992017-04-27 10:15:51 -0700921#endif /* defined(GRPC_POSIX_SOCKET) */
922#endif /* !defined(GRPC_LINUX_EPOLL) */