blob: fdd6384c8675df7d916bc1361ea3b353e437a7c4 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Craig Tillerd4838a92017-04-27 12:08:18 -07003 * Copyright 2017, Google Inc.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/lib/iomgr/port.h"
35
36/* This polling engine is only relevant on linux kernels supporting epoll() */
37#ifdef GRPC_LINUX_EPOLL
38
Craig Tiller4509c472017-04-27 19:05:13 +000039#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070040
41#include <assert.h>
42#include <errno.h>
43#include <poll.h>
44#include <pthread.h>
45#include <string.h>
46#include <sys/epoll.h>
47#include <sys/socket.h>
48#include <unistd.h>
49
50#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070051#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070052#include <grpc/support/log.h>
53#include <grpc/support/string_util.h>
54#include <grpc/support/tls.h>
55#include <grpc/support/useful.h>
56
57#include "src/core/lib/iomgr/ev_posix.h"
58#include "src/core/lib/iomgr/iomgr_internal.h"
59#include "src/core/lib/iomgr/lockfree_event.h"
60#include "src/core/lib/iomgr/timer.h"
61#include "src/core/lib/iomgr/wakeup_fd_posix.h"
62#include "src/core/lib/iomgr/workqueue.h"
63#include "src/core/lib/profiling/timers.h"
64#include "src/core/lib/support/block_annotate.h"
65
66/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
67 * sure to wake up one polling thread (which can wake up other threads if
68 * needed) */
69static grpc_wakeup_fd global_wakeup_fd;
70static int g_epfd;
Craig Tiller375eb252017-04-27 23:29:12 +000071static bool g_timer_kick = false;
Craig Tillerc67cc992017-04-27 10:15:51 -070072
73/*******************************************************************************
74 * Fd Declarations
75 */
76
77struct grpc_fd {
78 int fd;
79
Craig Tillerc67cc992017-04-27 10:15:51 -070080 gpr_atm read_closure;
81 gpr_atm write_closure;
82
83 struct grpc_fd *freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -070084
85 /* The pollset that last noticed that the fd is readable. The actual type
86 * stored in this is (grpc_pollset *) */
87 gpr_atm read_notifier_pollset;
88
89 grpc_iomgr_object iomgr_object;
90};
91
92static void fd_global_init(void);
93static void fd_global_shutdown(void);
94
95/*******************************************************************************
96 * Pollset Declarations
97 */
98
99typedef struct pollset_worker_link {
100 grpc_pollset_worker *next;
101 grpc_pollset_worker *prev;
102} pollset_worker_link;
103
104typedef enum {
105 PWL_POLLSET,
106 PWL_POLLABLE,
107 POLLSET_WORKER_LINK_COUNT
108} pollset_worker_links;
109
110struct grpc_pollset_worker {
111 bool kicked;
112 bool initialized_cv;
113 pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
114 gpr_cv cv;
115};
116
Craig Tiller6de05932017-04-28 09:17:38 -0700117typedef struct pollset_neighbourhood {
118 gpr_mu mu;
119 grpc_pollset *active_root;
120 grpc_pollset *inactive_root;
121 bool seen_inactive;
122 char pad[GPR_CACHELINE_SIZE];
123} pollset_neighbourhood;
124
Craig Tillerc67cc992017-04-27 10:15:51 -0700125struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700126 gpr_mu mu;
127 pollset_neighbourhood *neighbourhood;
Craig Tiller4509c472017-04-27 19:05:13 +0000128 grpc_pollset_worker *root_worker;
129 bool kicked_without_poller;
Craig Tiller6de05932017-04-28 09:17:38 -0700130 bool seen_inactive;
Craig Tillerc67cc992017-04-27 10:15:51 -0700131 bool shutting_down; /* Is the pollset shutting down ? */
132 bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
Craig Tiller4509c472017-04-27 19:05:13 +0000133 grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
Craig Tiller6de05932017-04-28 09:17:38 -0700134
135 grpc_pollset *next;
136 grpc_pollset *prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700137};
138
139/*******************************************************************************
140 * Pollset-set Declarations
141 */
Craig Tiller6de05932017-04-28 09:17:38 -0700142
Craig Tillerc67cc992017-04-27 10:15:51 -0700143struct grpc_pollset_set {};
144
145/*******************************************************************************
146 * Common helpers
147 */
148
149static bool append_error(grpc_error **composite, grpc_error *error,
150 const char *desc) {
151 if (error == GRPC_ERROR_NONE) return true;
152 if (*composite == GRPC_ERROR_NONE) {
153 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
154 }
155 *composite = grpc_error_add_child(*composite, error);
156 return false;
157}
158
159/*******************************************************************************
160 * Fd Definitions
161 */
162
163/* We need to keep a freelist not because of any concerns of malloc performance
164 * but instead so that implementations with multiple threads in (for example)
165 * epoll_wait deal with the race between pollset removal and incoming poll
166 * notifications.
167 *
168 * The problem is that the poller ultimately holds a reference to this
169 * object, so it is very difficult to know when is safe to free it, at least
170 * without some expensive synchronization.
171 *
172 * If we keep the object freelisted, in the worst case losing this race just
173 * becomes a spurious read notification on a reused fd.
174 */
175
176/* The alarm system needs to be able to wakeup 'some poller' sometimes
177 * (specifically when a new alarm needs to be triggered earlier than the next
178 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
179 * case occurs. */
180
181static grpc_fd *fd_freelist = NULL;
182static gpr_mu fd_freelist_mu;
183
Craig Tillerc67cc992017-04-27 10:15:51 -0700184static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
185
186static void fd_global_shutdown(void) {
187 gpr_mu_lock(&fd_freelist_mu);
188 gpr_mu_unlock(&fd_freelist_mu);
189 while (fd_freelist != NULL) {
190 grpc_fd *fd = fd_freelist;
191 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700192 gpr_free(fd);
193 }
194 gpr_mu_destroy(&fd_freelist_mu);
195}
196
197static grpc_fd *fd_create(int fd, const char *name) {
198 grpc_fd *new_fd = NULL;
199
200 gpr_mu_lock(&fd_freelist_mu);
201 if (fd_freelist != NULL) {
202 new_fd = fd_freelist;
203 fd_freelist = fd_freelist->freelist_next;
204 }
205 gpr_mu_unlock(&fd_freelist_mu);
206
207 if (new_fd == NULL) {
208 new_fd = gpr_malloc(sizeof(grpc_fd));
Craig Tillerc67cc992017-04-27 10:15:51 -0700209 }
210
Craig Tillerc67cc992017-04-27 10:15:51 -0700211 new_fd->fd = fd;
Craig Tillerc67cc992017-04-27 10:15:51 -0700212 grpc_lfev_init(&new_fd->read_closure);
213 grpc_lfev_init(&new_fd->write_closure);
214 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
215
216 new_fd->freelist_next = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700217
218 char *fd_name;
219 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
220 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
221#ifdef GRPC_FD_REF_COUNT_DEBUG
222 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
223#endif
224 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000225
226 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
227 .data.ptr = new_fd};
228 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
229 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
230 }
231
Craig Tillerc67cc992017-04-27 10:15:51 -0700232 return new_fd;
233}
234
Craig Tiller4509c472017-04-27 19:05:13 +0000235static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700236
Craig Tiller9ddb3152017-04-27 21:32:56 +0000237/* Might be called multiple times */
238static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
239 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
240 GRPC_ERROR_REF(why))) {
241 shutdown(fd->fd, SHUT_RDWR);
242 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
243 }
244 GRPC_ERROR_UNREF(why);
245}
246
Craig Tillerc67cc992017-04-27 10:15:51 -0700247static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
248 grpc_closure *on_done, int *release_fd,
249 const char *reason) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700250 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerc67cc992017-04-27 10:15:51 -0700251
Craig Tiller9ddb3152017-04-27 21:32:56 +0000252 if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
253 fd_shutdown(exec_ctx, fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason));
254 }
255
Craig Tillerc67cc992017-04-27 10:15:51 -0700256 /* If release_fd is not NULL, we should be relinquishing control of the file
257 descriptor fd->fd (but we still own the grpc_fd structure). */
258 if (release_fd != NULL) {
259 *release_fd = fd->fd;
260 } else {
261 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700262 }
263
Craig Tiller4509c472017-04-27 19:05:13 +0000264 grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700265
Craig Tiller4509c472017-04-27 19:05:13 +0000266 grpc_iomgr_unregister_object(&fd->iomgr_object);
267 grpc_lfev_destroy(&fd->read_closure);
268 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700269
Craig Tiller4509c472017-04-27 19:05:13 +0000270 gpr_mu_lock(&fd_freelist_mu);
271 fd->freelist_next = fd_freelist;
272 fd_freelist = fd;
273 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700274}
275
276static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
277 grpc_fd *fd) {
278 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
279 return (grpc_pollset *)notifier;
280}
281
282static bool fd_is_shutdown(grpc_fd *fd) {
283 return grpc_lfev_is_shutdown(&fd->read_closure);
284}
285
Craig Tillerc67cc992017-04-27 10:15:51 -0700286static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
287 grpc_closure *closure) {
288 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
289}
290
291static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
292 grpc_closure *closure) {
293 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
294}
295
296static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000297 return NULL; /* TODO(ctiller): add a global workqueue */
298}
299
300static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
301 grpc_pollset *notifier) {
302 grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
303
304 /* Note, it is possible that fd_become_readable might be called twice with
305 different 'notifier's when an fd becomes readable and it is in two epoll
306 sets (This can happen briefly during polling island merges). In such cases
307 it does not really matter which notifer is set as the read_notifier_pollset
308 (They would both point to the same polling island anyway) */
309 /* Use release store to match with acquire load in fd_get_read_notifier */
310 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
311}
312
313static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
314 grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700315}
316
317/*******************************************************************************
318 * Pollset Definitions
319 */
320
Craig Tiller6de05932017-04-28 09:17:38 -0700321GPR_TLS_DECL(g_current_thread_pollset);
322GPR_TLS_DECL(g_current_thread_worker);
323static gpr_atm g_active_poller;
324static pollset_neighbourhood *g_neighbourhoods;
325
Craig Tillerc67cc992017-04-27 10:15:51 -0700326/* Return true if first in list */
327static bool worker_insert(grpc_pollset_worker **root, pollset_worker_links link,
328 grpc_pollset_worker *worker) {
329 if (*root == NULL) {
330 *root = worker;
331 worker->links[link].next = worker->links[link].prev = worker;
332 return true;
333 } else {
334 worker->links[link].next = *root;
335 worker->links[link].prev = worker->links[link].next->links[link].prev;
336 worker->links[link].next->links[link].prev = worker;
337 worker->links[link].prev->links[link].next = worker;
338 return false;
339 }
340}
341
342/* Return true if last in list */
343typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
344
345static worker_remove_result worker_remove(grpc_pollset_worker **root,
346 pollset_worker_links link,
347 grpc_pollset_worker *worker) {
348 if (worker == *root) {
349 if (worker == worker->links[link].next) {
350 *root = NULL;
351 return EMPTIED;
352 } else {
353 *root = worker->links[link].next;
354 worker->links[link].prev->links[link].next = worker->links[link].next;
355 worker->links[link].next->links[link].prev = worker->links[link].prev;
356 return NEW_ROOT;
357 }
358 } else {
359 worker->links[link].prev->links[link].next = worker->links[link].next;
360 worker->links[link].next->links[link].prev = worker->links[link].prev;
361 return REMOVED;
362 }
363}
364
Craig Tiller4509c472017-04-27 19:05:13 +0000365static grpc_error *pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000366 gpr_tls_init(&g_current_thread_pollset);
367 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700368 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000369 global_wakeup_fd.read_fd = -1;
370 grpc_error *err = grpc_wakeup_fd_init(&global_wakeup_fd);
371 if (err != GRPC_ERROR_NONE) return err;
Craig Tiller4509c472017-04-27 19:05:13 +0000372 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
373 .data.ptr = &global_wakeup_fd};
374 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
375 return GRPC_OS_ERROR(errno, "epoll_ctl");
376 }
377 return GRPC_ERROR_NONE;
378}
379
380static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000381 gpr_tls_destroy(&g_current_thread_pollset);
382 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000383 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000384}
385
386static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700387 gpr_mu_init(&pollset->mu);
388 *mu = &pollset->mu;
389 pollset->neighbourhood = &g_neighbourhoods[gpr_cpu_current_cpu()];
390 pollset->seen_inactive = true;
391 pollset->next = pollset->prev = pollset;
392}
393
394static void pollset_destroy(grpc_pollset *pollset) {
395 gpr_mu_destroy(&pollset->mu);
396 gpr_mu_lock(&pollset->neighbourhood->mu);
397 pollset->prev->next = pollset->next;
398 pollset->next->prev = pollset->prev;
399 if (pollset == pollset->neighbourhood->active_root) {
400 pollset->neighbourhood->active_root =
401 pollset->next == pollset ? NULL : pollset->next;
402 } else if (pollset == pollset->neighbourhood->inactive_root) {
403 pollset->neighbourhood->inactive_root =
404 pollset->next == pollset ? NULL : pollset->next;
405 }
406 gpr_mu_unlock(&pollset->neighbourhood->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000407}
408
409static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
410 grpc_error *error = GRPC_ERROR_NONE;
411 if (pollset->root_worker != NULL) {
412 grpc_pollset_worker *worker = pollset->root_worker;
413 do {
414 if (worker->initialized_cv) {
415 worker->kicked = true;
416 gpr_cv_signal(&worker->cv);
417 } else {
418 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
419 "pollset_shutdown");
420 }
421
422 worker = worker->links[PWL_POLLSET].next;
423 } while (worker != pollset->root_worker);
424 }
425 return error;
426}
427
428static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
429 grpc_pollset *pollset) {
430 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
431 grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
432 pollset->shutdown_closure = NULL;
433 }
434}
435
436static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
437 grpc_closure *closure) {
438 GPR_ASSERT(pollset->shutdown_closure == NULL);
439 pollset->shutdown_closure = closure;
440 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
441 pollset_maybe_finish_shutdown(exec_ctx, pollset);
442}
443
Craig Tiller4509c472017-04-27 19:05:13 +0000444#define MAX_EPOLL_EVENTS 100
445
446static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
447 gpr_timespec now) {
448 gpr_timespec timeout;
449 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
450 return -1;
451 }
452
453 if (gpr_time_cmp(deadline, now) <= 0) {
454 return 0;
455 }
456
457 static const gpr_timespec round_up = {
458 .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
459 timeout = gpr_time_sub(deadline, now);
460 int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
461 return millis >= 1 ? millis : 1;
462}
463
464static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
465 gpr_timespec now, gpr_timespec deadline) {
466 struct epoll_event events[MAX_EPOLL_EVENTS];
467 static const char *err_desc = "pollset_poll";
468
469 int timeout = poll_deadline_to_millis_timeout(deadline, now);
470
471 if (timeout != 0) {
472 GRPC_SCHEDULING_START_BLOCKING_REGION;
473 }
474 int r;
475 do {
476 r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
477 } while (r < 0 && errno == EINTR);
478 if (timeout != 0) {
479 GRPC_SCHEDULING_END_BLOCKING_REGION;
480 }
481
482 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
483
484 grpc_error *error = GRPC_ERROR_NONE;
485 for (int i = 0; i < r; i++) {
486 void *data_ptr = events[i].data.ptr;
487 if (data_ptr == &global_wakeup_fd) {
Craig Tiller375eb252017-04-27 23:29:12 +0000488 if (g_timer_kick) {
489 g_timer_kick = false;
490 grpc_timer_consume_kick();
491 }
Craig Tiller4509c472017-04-27 19:05:13 +0000492 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
493 err_desc);
494 } else {
495 grpc_fd *fd = (grpc_fd *)(data_ptr);
496 bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
497 bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
498 bool write_ev = (events[i].events & EPOLLOUT) != 0;
499 if (read_ev || cancel) {
500 fd_become_readable(exec_ctx, fd, pollset);
501 }
502 if (write_ev || cancel) {
503 fd_become_writable(exec_ctx, fd);
504 }
505 }
506 }
507
508 return error;
509}
510
511static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
512 grpc_pollset_worker **worker_hdl, gpr_timespec *now,
513 gpr_timespec deadline) {
514 bool do_poll = true;
515 if (worker_hdl != NULL) *worker_hdl = worker;
516 worker->initialized_cv = false;
517 worker->kicked = false;
518
519 worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
520 if (!worker_insert(&g_root_worker, PWL_POLLABLE, worker)) {
521 worker->initialized_cv = true;
522 gpr_cv_init(&worker->cv);
523 while (do_poll && g_root_worker != worker) {
524 if (gpr_cv_wait(&worker->cv, &g_pollset_mu, deadline)) {
525 do_poll = false;
526 } else if (worker->kicked) {
527 do_poll = false;
528 }
529 }
530 *now = gpr_now(now->clock_type);
531 }
532
533 return do_poll && pollset->shutdown_closure == NULL;
534}
535
536static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
537 grpc_pollset_worker *worker,
538 grpc_pollset_worker **worker_hdl) {
539 if (NEW_ROOT == worker_remove(&g_root_worker, PWL_POLLABLE, worker)) {
540 gpr_cv_signal(&g_root_worker->cv);
541 }
542 if (worker->initialized_cv) {
543 gpr_cv_destroy(&worker->cv);
544 }
545 if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
546 pollset_maybe_finish_shutdown(exec_ctx, pollset);
547 }
548}
549
550/* pollset->po.mu lock must be held by the caller before calling this.
551 The function pollset_work() may temporarily release the lock (pollset->po.mu)
552 during the course of its execution but it will always re-acquire the lock and
553 ensure that it is held by the time the function returns */
554static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
555 grpc_pollset_worker **worker_hdl,
556 gpr_timespec now, gpr_timespec deadline) {
557 grpc_pollset_worker worker;
558 grpc_error *error = GRPC_ERROR_NONE;
559 static const char *err_desc = "pollset_work";
560 if (pollset->kicked_without_poller) {
561 pollset->kicked_without_poller = false;
562 return GRPC_ERROR_NONE;
563 }
564 if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
565 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
566 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
567 GPR_ASSERT(!pollset->shutdown_closure);
568 gpr_mu_unlock(&g_pollset_mu);
569 append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
570 err_desc);
571 grpc_exec_ctx_flush(exec_ctx);
572 gpr_mu_lock(&g_pollset_mu);
573 gpr_tls_set(&g_current_thread_pollset, 0);
574 gpr_tls_set(&g_current_thread_worker, 0);
575 pollset_maybe_finish_shutdown(exec_ctx, pollset);
576 }
577 end_worker(exec_ctx, pollset, &worker, worker_hdl);
578 return error;
579}
580
581static grpc_error *pollset_kick(grpc_pollset *pollset,
582 grpc_pollset_worker *specific_worker) {
583 if (specific_worker == NULL) {
584 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tiller375eb252017-04-27 23:29:12 +0000585 grpc_pollset_worker *root_worker = pollset->root_worker;
586 if (root_worker == NULL) {
Craig Tiller4509c472017-04-27 19:05:13 +0000587 pollset->kicked_without_poller = true;
588 return GRPC_ERROR_NONE;
Craig Tiller375eb252017-04-27 23:29:12 +0000589 }
590 grpc_pollset_worker *next_worker = root_worker->links[PWL_POLLSET].next;
591 if (root_worker == next_worker && root_worker == g_root_worker) {
592 root_worker->kicked = true;
Craig Tiller4509c472017-04-27 19:05:13 +0000593 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000594 } else {
595 next_worker->kicked = true;
596 gpr_cv_signal(&next_worker->cv);
597 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000598 }
599 } else {
600 return GRPC_ERROR_NONE;
601 }
602 } else if (specific_worker->kicked) {
603 return GRPC_ERROR_NONE;
604 } else if (gpr_tls_get(&g_current_thread_worker) ==
605 (intptr_t)specific_worker) {
606 specific_worker->kicked = true;
607 return GRPC_ERROR_NONE;
608 } else if (specific_worker == g_root_worker) {
609 specific_worker->kicked = true;
610 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
611 } else {
612 specific_worker->kicked = true;
613 gpr_cv_signal(&specific_worker->cv);
614 return GRPC_ERROR_NONE;
615 }
616}
617
618static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
619 grpc_fd *fd) {}
620
621static grpc_error *kick_poller(void) {
Craig Tiller375eb252017-04-27 23:29:12 +0000622 gpr_mu_lock(&g_pollset_mu);
623 g_timer_kick = true;
624 gpr_mu_unlock(&g_pollset_mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000625 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
626}
627
628/*******************************************************************************
629 * Workqueue Definitions
630 */
631
632#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
633static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
634 const char *file, int line,
635 const char *reason) {
636 return workqueue;
637}
638
639static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
640 const char *file, int line, const char *reason) {}
641#else
642static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
643 return workqueue;
644}
645
646static void workqueue_unref(grpc_exec_ctx *exec_ctx,
647 grpc_workqueue *workqueue) {}
648#endif
649
650static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
651 return grpc_schedule_on_exec_ctx;
652}
Craig Tillerc67cc992017-04-27 10:15:51 -0700653
654/*******************************************************************************
655 * Pollset-set Definitions
656 */
657
658static grpc_pollset_set *pollset_set_create(void) {
659 return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
660}
661
662static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
663 grpc_pollset_set *pss) {}
664
665static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
666 grpc_fd *fd) {}
667
668static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
669 grpc_fd *fd) {}
670
671static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
672 grpc_pollset_set *pss, grpc_pollset *ps) {}
673
674static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
675 grpc_pollset_set *pss, grpc_pollset *ps) {}
676
677static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
678 grpc_pollset_set *bag,
679 grpc_pollset_set *item) {}
680
681static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
682 grpc_pollset_set *bag,
683 grpc_pollset_set *item) {}
684
685/*******************************************************************************
686 * Event engine binding
687 */
688
689static void shutdown_engine(void) {
690 fd_global_shutdown();
691 pollset_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700692}
693
694static const grpc_event_engine_vtable vtable = {
695 .pollset_size = sizeof(grpc_pollset),
696
697 .fd_create = fd_create,
698 .fd_wrapped_fd = fd_wrapped_fd,
699 .fd_orphan = fd_orphan,
700 .fd_shutdown = fd_shutdown,
701 .fd_is_shutdown = fd_is_shutdown,
702 .fd_notify_on_read = fd_notify_on_read,
703 .fd_notify_on_write = fd_notify_on_write,
704 .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
705 .fd_get_workqueue = fd_get_workqueue,
706
707 .pollset_init = pollset_init,
708 .pollset_shutdown = pollset_shutdown,
709 .pollset_destroy = pollset_destroy,
710 .pollset_work = pollset_work,
711 .pollset_kick = pollset_kick,
712 .pollset_add_fd = pollset_add_fd,
713
714 .pollset_set_create = pollset_set_create,
715 .pollset_set_destroy = pollset_set_destroy,
716 .pollset_set_add_pollset = pollset_set_add_pollset,
717 .pollset_set_del_pollset = pollset_set_del_pollset,
718 .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
719 .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
720 .pollset_set_add_fd = pollset_set_add_fd,
721 .pollset_set_del_fd = pollset_set_del_fd,
722
723 .kick_poller = kick_poller,
724
725 .workqueue_ref = workqueue_ref,
726 .workqueue_unref = workqueue_unref,
727 .workqueue_scheduler = workqueue_scheduler,
728
729 .shutdown_engine = shutdown_engine,
730};
731
732/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
733 * Create a dummy epoll_fd to make sure epoll support is available */
Craig Tiller6f0af492017-04-27 19:26:16 +0000734const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700735 if (!grpc_has_wakeup_fd()) {
736 return NULL;
737 }
738
Craig Tiller4509c472017-04-27 19:05:13 +0000739 g_epfd = epoll_create1(EPOLL_CLOEXEC);
740 if (g_epfd < 0) {
741 gpr_log(GPR_ERROR, "epoll unavailable");
Craig Tillerc67cc992017-04-27 10:15:51 -0700742 return NULL;
743 }
744
Craig Tillerc67cc992017-04-27 10:15:51 -0700745 fd_global_init();
746
747 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +0000748 close(g_epfd);
749 fd_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700750 return NULL;
751 }
752
753 return &vtable;
754}
755
756#else /* defined(GRPC_LINUX_EPOLL) */
757#if defined(GRPC_POSIX_SOCKET)
758#include "src/core/lib/iomgr/ev_posix.h"
759/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
760 * NULL */
Craig Tiller9ddb3152017-04-27 21:32:56 +0000761const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
762 return NULL;
763}
Craig Tillerc67cc992017-04-27 10:15:51 -0700764#endif /* defined(GRPC_POSIX_SOCKET) */
765#endif /* !defined(GRPC_LINUX_EPOLL) */