blob: 669a3df5c59284083c5f7253f1edc67c8cb7f003 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/lib/iomgr/port.h"
35
36/* This polling engine is only relevant on linux kernels supporting epoll() */
37#ifdef GRPC_LINUX_EPOLL
38
Craig Tiller4509c472017-04-27 19:05:13 +000039#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070040
41#include <assert.h>
42#include <errno.h>
43#include <poll.h>
44#include <pthread.h>
45#include <string.h>
46#include <sys/epoll.h>
47#include <sys/socket.h>
48#include <unistd.h>
49
50#include <grpc/support/alloc.h>
51#include <grpc/support/log.h>
52#include <grpc/support/string_util.h>
53#include <grpc/support/tls.h>
54#include <grpc/support/useful.h>
55
56#include "src/core/lib/iomgr/ev_posix.h"
57#include "src/core/lib/iomgr/iomgr_internal.h"
58#include "src/core/lib/iomgr/lockfree_event.h"
59#include "src/core/lib/iomgr/timer.h"
60#include "src/core/lib/iomgr/wakeup_fd_posix.h"
61#include "src/core/lib/iomgr/workqueue.h"
62#include "src/core/lib/profiling/timers.h"
63#include "src/core/lib/support/block_annotate.h"
64
65/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
66 * sure to wake up one polling thread (which can wake up other threads if
67 * needed) */
68static grpc_wakeup_fd global_wakeup_fd;
69static int g_epfd;
70
71/*******************************************************************************
72 * Fd Declarations
73 */
74
75struct grpc_fd {
76 int fd;
77
Craig Tillerc67cc992017-04-27 10:15:51 -070078 gpr_atm read_closure;
79 gpr_atm write_closure;
80
81 struct grpc_fd *freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -070082
83 /* The pollset that last noticed that the fd is readable. The actual type
84 * stored in this is (grpc_pollset *) */
85 gpr_atm read_notifier_pollset;
86
87 grpc_iomgr_object iomgr_object;
88};
89
90static void fd_global_init(void);
91static void fd_global_shutdown(void);
92
93/*******************************************************************************
94 * Pollset Declarations
95 */
96
97typedef struct pollset_worker_link {
98 grpc_pollset_worker *next;
99 grpc_pollset_worker *prev;
100} pollset_worker_link;
101
102typedef enum {
103 PWL_POLLSET,
104 PWL_POLLABLE,
105 POLLSET_WORKER_LINK_COUNT
106} pollset_worker_links;
107
108struct grpc_pollset_worker {
109 bool kicked;
110 bool initialized_cv;
111 pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
112 gpr_cv cv;
113};
114
115struct grpc_pollset {
Craig Tiller4509c472017-04-27 19:05:13 +0000116 grpc_pollset_worker *root_worker;
117 bool kicked_without_poller;
Craig Tillerc67cc992017-04-27 10:15:51 -0700118
119 bool shutting_down; /* Is the pollset shutting down ? */
120 bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
Craig Tiller4509c472017-04-27 19:05:13 +0000121 grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
Craig Tillerc67cc992017-04-27 10:15:51 -0700122};
123
124/*******************************************************************************
125 * Pollset-set Declarations
126 */
127struct grpc_pollset_set {};
128
129/*******************************************************************************
130 * Common helpers
131 */
132
133static bool append_error(grpc_error **composite, grpc_error *error,
134 const char *desc) {
135 if (error == GRPC_ERROR_NONE) return true;
136 if (*composite == GRPC_ERROR_NONE) {
137 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
138 }
139 *composite = grpc_error_add_child(*composite, error);
140 return false;
141}
142
143/*******************************************************************************
144 * Fd Definitions
145 */
146
147/* We need to keep a freelist not because of any concerns of malloc performance
148 * but instead so that implementations with multiple threads in (for example)
149 * epoll_wait deal with the race between pollset removal and incoming poll
150 * notifications.
151 *
152 * The problem is that the poller ultimately holds a reference to this
153 * object, so it is very difficult to know when is safe to free it, at least
154 * without some expensive synchronization.
155 *
156 * If we keep the object freelisted, in the worst case losing this race just
157 * becomes a spurious read notification on a reused fd.
158 */
159
160/* The alarm system needs to be able to wakeup 'some poller' sometimes
161 * (specifically when a new alarm needs to be triggered earlier than the next
162 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
163 * case occurs. */
164
165static grpc_fd *fd_freelist = NULL;
166static gpr_mu fd_freelist_mu;
167
Craig Tillerc67cc992017-04-27 10:15:51 -0700168static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
169
170static void fd_global_shutdown(void) {
171 gpr_mu_lock(&fd_freelist_mu);
172 gpr_mu_unlock(&fd_freelist_mu);
173 while (fd_freelist != NULL) {
174 grpc_fd *fd = fd_freelist;
175 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700176 gpr_free(fd);
177 }
178 gpr_mu_destroy(&fd_freelist_mu);
179}
180
181static grpc_fd *fd_create(int fd, const char *name) {
182 grpc_fd *new_fd = NULL;
183
184 gpr_mu_lock(&fd_freelist_mu);
185 if (fd_freelist != NULL) {
186 new_fd = fd_freelist;
187 fd_freelist = fd_freelist->freelist_next;
188 }
189 gpr_mu_unlock(&fd_freelist_mu);
190
191 if (new_fd == NULL) {
192 new_fd = gpr_malloc(sizeof(grpc_fd));
Craig Tillerc67cc992017-04-27 10:15:51 -0700193 }
194
Craig Tiller4509c472017-04-27 19:05:13 +0000195 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
196 .data.ptr = new_fd};
197 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
198 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
199 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700200
Craig Tillerc67cc992017-04-27 10:15:51 -0700201 new_fd->fd = fd;
Craig Tillerc67cc992017-04-27 10:15:51 -0700202 grpc_lfev_init(&new_fd->read_closure);
203 grpc_lfev_init(&new_fd->write_closure);
204 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
205
206 new_fd->freelist_next = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700207
208 char *fd_name;
209 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
210 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
211#ifdef GRPC_FD_REF_COUNT_DEBUG
212 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
213#endif
214 gpr_free(fd_name);
215 return new_fd;
216}
217
Craig Tiller4509c472017-04-27 19:05:13 +0000218static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700219
220static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
221 grpc_closure *on_done, int *release_fd,
222 const char *reason) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700223 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerc67cc992017-04-27 10:15:51 -0700224
225 /* If release_fd is not NULL, we should be relinquishing control of the file
226 descriptor fd->fd (but we still own the grpc_fd structure). */
227 if (release_fd != NULL) {
228 *release_fd = fd->fd;
229 } else {
230 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700231 }
232
Craig Tiller4509c472017-04-27 19:05:13 +0000233 grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700234
Craig Tiller4509c472017-04-27 19:05:13 +0000235 grpc_iomgr_unregister_object(&fd->iomgr_object);
236 grpc_lfev_destroy(&fd->read_closure);
237 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700238
Craig Tiller4509c472017-04-27 19:05:13 +0000239 gpr_mu_lock(&fd_freelist_mu);
240 fd->freelist_next = fd_freelist;
241 fd_freelist = fd;
242 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700243}
244
245static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
246 grpc_fd *fd) {
247 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
248 return (grpc_pollset *)notifier;
249}
250
251static bool fd_is_shutdown(grpc_fd *fd) {
252 return grpc_lfev_is_shutdown(&fd->read_closure);
253}
254
255/* Might be called multiple times */
256static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
257 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
258 GRPC_ERROR_REF(why))) {
259 shutdown(fd->fd, SHUT_RDWR);
260 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
261 }
262 GRPC_ERROR_UNREF(why);
263}
264
265static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
266 grpc_closure *closure) {
267 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
268}
269
270static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
271 grpc_closure *closure) {
272 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
273}
274
275static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000276 return NULL; /* TODO(ctiller): add a global workqueue */
277}
278
279static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
280 grpc_pollset *notifier) {
281 grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
282
283 /* Note, it is possible that fd_become_readable might be called twice with
284 different 'notifier's when an fd becomes readable and it is in two epoll
285 sets (This can happen briefly during polling island merges). In such cases
286 it does not really matter which notifer is set as the read_notifier_pollset
287 (They would both point to the same polling island anyway) */
288 /* Use release store to match with acquire load in fd_get_read_notifier */
289 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
290}
291
292static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
293 grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700294}
295
296/*******************************************************************************
297 * Pollset Definitions
298 */
299
300/* Return true if first in list */
301static bool worker_insert(grpc_pollset_worker **root, pollset_worker_links link,
302 grpc_pollset_worker *worker) {
303 if (*root == NULL) {
304 *root = worker;
305 worker->links[link].next = worker->links[link].prev = worker;
306 return true;
307 } else {
308 worker->links[link].next = *root;
309 worker->links[link].prev = worker->links[link].next->links[link].prev;
310 worker->links[link].next->links[link].prev = worker;
311 worker->links[link].prev->links[link].next = worker;
312 return false;
313 }
314}
315
316/* Return true if last in list */
317typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
318
319static worker_remove_result worker_remove(grpc_pollset_worker **root,
320 pollset_worker_links link,
321 grpc_pollset_worker *worker) {
322 if (worker == *root) {
323 if (worker == worker->links[link].next) {
324 *root = NULL;
325 return EMPTIED;
326 } else {
327 *root = worker->links[link].next;
328 worker->links[link].prev->links[link].next = worker->links[link].next;
329 worker->links[link].next->links[link].prev = worker->links[link].prev;
330 return NEW_ROOT;
331 }
332 } else {
333 worker->links[link].prev->links[link].next = worker->links[link].next;
334 worker->links[link].next->links[link].prev = worker->links[link].prev;
335 return REMOVED;
336 }
337}
338
339GPR_TLS_DECL(g_current_thread_pollset);
340GPR_TLS_DECL(g_current_thread_worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000341static gpr_mu g_pollset_mu;
342static grpc_pollset_worker *g_root_worker;
343
344static grpc_error *pollset_global_init(void) {
345 gpr_mu_init(&g_pollset_mu);
346 gpr_tls_init(&g_current_thread_pollset);
347 gpr_tls_init(&g_current_thread_worker);
348 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
349 .data.ptr = &global_wakeup_fd};
350 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
351 return GRPC_OS_ERROR(errno, "epoll_ctl");
352 }
353 return GRPC_ERROR_NONE;
354}
355
356static void pollset_global_shutdown(void) {
357 gpr_mu_destroy(&g_pollset_mu);
358 gpr_tls_destroy(&g_current_thread_pollset);
359 gpr_tls_destroy(&g_current_thread_worker);
360}
361
362static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
363 *mu = &g_pollset_mu;
364}
365
366static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
367 grpc_error *error = GRPC_ERROR_NONE;
368 if (pollset->root_worker != NULL) {
369 grpc_pollset_worker *worker = pollset->root_worker;
370 do {
371 if (worker->initialized_cv) {
372 worker->kicked = true;
373 gpr_cv_signal(&worker->cv);
374 } else {
375 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
376 "pollset_shutdown");
377 }
378
379 worker = worker->links[PWL_POLLSET].next;
380 } while (worker != pollset->root_worker);
381 }
382 return error;
383}
384
385static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
386 grpc_pollset *pollset) {
387 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
388 grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
389 pollset->shutdown_closure = NULL;
390 }
391}
392
393static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
394 grpc_closure *closure) {
395 GPR_ASSERT(pollset->shutdown_closure == NULL);
396 pollset->shutdown_closure = closure;
397 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
398 pollset_maybe_finish_shutdown(exec_ctx, pollset);
399}
400
401static void pollset_destroy(grpc_pollset *pollset) {}
402
403#define MAX_EPOLL_EVENTS 100
404
405static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
406 gpr_timespec now) {
407 gpr_timespec timeout;
408 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
409 return -1;
410 }
411
412 if (gpr_time_cmp(deadline, now) <= 0) {
413 return 0;
414 }
415
416 static const gpr_timespec round_up = {
417 .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
418 timeout = gpr_time_sub(deadline, now);
419 int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
420 return millis >= 1 ? millis : 1;
421}
422
423static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
424 gpr_timespec now, gpr_timespec deadline) {
425 struct epoll_event events[MAX_EPOLL_EVENTS];
426 static const char *err_desc = "pollset_poll";
427
428 int timeout = poll_deadline_to_millis_timeout(deadline, now);
429
430 if (timeout != 0) {
431 GRPC_SCHEDULING_START_BLOCKING_REGION;
432 }
433 int r;
434 do {
435 r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
436 } while (r < 0 && errno == EINTR);
437 if (timeout != 0) {
438 GRPC_SCHEDULING_END_BLOCKING_REGION;
439 }
440
441 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
442
443 grpc_error *error = GRPC_ERROR_NONE;
444 for (int i = 0; i < r; i++) {
445 void *data_ptr = events[i].data.ptr;
446 if (data_ptr == &global_wakeup_fd) {
447 grpc_timer_consume_kick();
448 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
449 err_desc);
450 } else {
451 grpc_fd *fd = (grpc_fd *)(data_ptr);
452 bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
453 bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
454 bool write_ev = (events[i].events & EPOLLOUT) != 0;
455 if (read_ev || cancel) {
456 fd_become_readable(exec_ctx, fd, pollset);
457 }
458 if (write_ev || cancel) {
459 fd_become_writable(exec_ctx, fd);
460 }
461 }
462 }
463
464 return error;
465}
466
467static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
468 grpc_pollset_worker **worker_hdl, gpr_timespec *now,
469 gpr_timespec deadline) {
470 bool do_poll = true;
471 if (worker_hdl != NULL) *worker_hdl = worker;
472 worker->initialized_cv = false;
473 worker->kicked = false;
474
475 worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
476 if (!worker_insert(&g_root_worker, PWL_POLLABLE, worker)) {
477 worker->initialized_cv = true;
478 gpr_cv_init(&worker->cv);
479 while (do_poll && g_root_worker != worker) {
480 if (gpr_cv_wait(&worker->cv, &g_pollset_mu, deadline)) {
481 do_poll = false;
482 } else if (worker->kicked) {
483 do_poll = false;
484 }
485 }
486 *now = gpr_now(now->clock_type);
487 }
488
489 return do_poll && pollset->shutdown_closure == NULL;
490}
491
492static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
493 grpc_pollset_worker *worker,
494 grpc_pollset_worker **worker_hdl) {
495 if (NEW_ROOT == worker_remove(&g_root_worker, PWL_POLLABLE, worker)) {
496 gpr_cv_signal(&g_root_worker->cv);
497 }
498 if (worker->initialized_cv) {
499 gpr_cv_destroy(&worker->cv);
500 }
501 if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
502 pollset_maybe_finish_shutdown(exec_ctx, pollset);
503 }
504}
505
506/* pollset->po.mu lock must be held by the caller before calling this.
507 The function pollset_work() may temporarily release the lock (pollset->po.mu)
508 during the course of its execution but it will always re-acquire the lock and
509 ensure that it is held by the time the function returns */
510static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
511 grpc_pollset_worker **worker_hdl,
512 gpr_timespec now, gpr_timespec deadline) {
513 grpc_pollset_worker worker;
514 grpc_error *error = GRPC_ERROR_NONE;
515 static const char *err_desc = "pollset_work";
516 if (pollset->kicked_without_poller) {
517 pollset->kicked_without_poller = false;
518 return GRPC_ERROR_NONE;
519 }
520 if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
521 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
522 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
523 GPR_ASSERT(!pollset->shutdown_closure);
524 gpr_mu_unlock(&g_pollset_mu);
525 append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
526 err_desc);
527 grpc_exec_ctx_flush(exec_ctx);
528 gpr_mu_lock(&g_pollset_mu);
529 gpr_tls_set(&g_current_thread_pollset, 0);
530 gpr_tls_set(&g_current_thread_worker, 0);
531 pollset_maybe_finish_shutdown(exec_ctx, pollset);
532 }
533 end_worker(exec_ctx, pollset, &worker, worker_hdl);
534 return error;
535}
536
537static grpc_error *pollset_kick(grpc_pollset *pollset,
538 grpc_pollset_worker *specific_worker) {
539 if (specific_worker == NULL) {
540 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
541 if (pollset->root_worker == NULL) {
542 pollset->kicked_without_poller = true;
543 return GRPC_ERROR_NONE;
544 } else {
545 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
546 }
547 } else {
548 return GRPC_ERROR_NONE;
549 }
550 } else if (specific_worker->kicked) {
551 return GRPC_ERROR_NONE;
552 } else if (gpr_tls_get(&g_current_thread_worker) ==
553 (intptr_t)specific_worker) {
554 specific_worker->kicked = true;
555 return GRPC_ERROR_NONE;
556 } else if (specific_worker == g_root_worker) {
557 specific_worker->kicked = true;
558 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
559 } else {
560 specific_worker->kicked = true;
561 gpr_cv_signal(&specific_worker->cv);
562 return GRPC_ERROR_NONE;
563 }
564}
565
566static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
567 grpc_fd *fd) {}
568
569static grpc_error *kick_poller(void) {
570 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
571}
572
573/*******************************************************************************
574 * Workqueue Definitions
575 */
576
577#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
578static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
579 const char *file, int line,
580 const char *reason) {
581 return workqueue;
582}
583
584static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
585 const char *file, int line, const char *reason) {}
586#else
587static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
588 return workqueue;
589}
590
591static void workqueue_unref(grpc_exec_ctx *exec_ctx,
592 grpc_workqueue *workqueue) {}
593#endif
594
595static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
596 return grpc_schedule_on_exec_ctx;
597}
Craig Tillerc67cc992017-04-27 10:15:51 -0700598
599/*******************************************************************************
600 * Pollset-set Definitions
601 */
602
603static grpc_pollset_set *pollset_set_create(void) {
604 return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
605}
606
607static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
608 grpc_pollset_set *pss) {}
609
610static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
611 grpc_fd *fd) {}
612
613static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
614 grpc_fd *fd) {}
615
616static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
617 grpc_pollset_set *pss, grpc_pollset *ps) {}
618
619static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
620 grpc_pollset_set *pss, grpc_pollset *ps) {}
621
622static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
623 grpc_pollset_set *bag,
624 grpc_pollset_set *item) {}
625
626static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
627 grpc_pollset_set *bag,
628 grpc_pollset_set *item) {}
629
630/*******************************************************************************
631 * Event engine binding
632 */
633
634static void shutdown_engine(void) {
635 fd_global_shutdown();
636 pollset_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700637}
638
639static const grpc_event_engine_vtable vtable = {
640 .pollset_size = sizeof(grpc_pollset),
641
642 .fd_create = fd_create,
643 .fd_wrapped_fd = fd_wrapped_fd,
644 .fd_orphan = fd_orphan,
645 .fd_shutdown = fd_shutdown,
646 .fd_is_shutdown = fd_is_shutdown,
647 .fd_notify_on_read = fd_notify_on_read,
648 .fd_notify_on_write = fd_notify_on_write,
649 .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
650 .fd_get_workqueue = fd_get_workqueue,
651
652 .pollset_init = pollset_init,
653 .pollset_shutdown = pollset_shutdown,
654 .pollset_destroy = pollset_destroy,
655 .pollset_work = pollset_work,
656 .pollset_kick = pollset_kick,
657 .pollset_add_fd = pollset_add_fd,
658
659 .pollset_set_create = pollset_set_create,
660 .pollset_set_destroy = pollset_set_destroy,
661 .pollset_set_add_pollset = pollset_set_add_pollset,
662 .pollset_set_del_pollset = pollset_set_del_pollset,
663 .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
664 .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
665 .pollset_set_add_fd = pollset_set_add_fd,
666 .pollset_set_del_fd = pollset_set_del_fd,
667
668 .kick_poller = kick_poller,
669
670 .workqueue_ref = workqueue_ref,
671 .workqueue_unref = workqueue_unref,
672 .workqueue_scheduler = workqueue_scheduler,
673
674 .shutdown_engine = shutdown_engine,
675};
676
677/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
678 * Create a dummy epoll_fd to make sure epoll support is available */
Craig Tillerc67cc992017-04-27 10:15:51 -0700679const grpc_event_engine_vtable *grpc_init_epoll1_linux(void) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700680 if (!grpc_has_wakeup_fd()) {
681 return NULL;
682 }
683
Craig Tiller4509c472017-04-27 19:05:13 +0000684 g_epfd = epoll_create1(EPOLL_CLOEXEC);
685 if (g_epfd < 0) {
686 gpr_log(GPR_ERROR, "epoll unavailable");
Craig Tillerc67cc992017-04-27 10:15:51 -0700687 return NULL;
688 }
689
Craig Tillerc67cc992017-04-27 10:15:51 -0700690 fd_global_init();
691
692 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +0000693 close(g_epfd);
694 fd_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700695 return NULL;
696 }
697
698 return &vtable;
699}
700
701#else /* defined(GRPC_LINUX_EPOLL) */
702#if defined(GRPC_POSIX_SOCKET)
703#include "src/core/lib/iomgr/ev_posix.h"
704/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
705 * NULL */
706const grpc_event_engine_vtable *grpc_init_epoll1_linux(void) { return NULL; }
707#endif /* defined(GRPC_POSIX_SOCKET) */
708#endif /* !defined(GRPC_LINUX_EPOLL) */