blob: d8d3cb6376633e893e18aba4d8730e9a8160c746 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Craig Tillerd4838a92017-04-27 12:08:18 -07003 * Copyright 2017, Google Inc.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
34#include "src/core/lib/iomgr/port.h"
35
36/* This polling engine is only relevant on linux kernels supporting epoll() */
37#ifdef GRPC_LINUX_EPOLL
38
Craig Tiller4509c472017-04-27 19:05:13 +000039#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070040
41#include <assert.h>
42#include <errno.h>
43#include <poll.h>
44#include <pthread.h>
45#include <string.h>
46#include <sys/epoll.h>
47#include <sys/socket.h>
48#include <unistd.h>
49
50#include <grpc/support/alloc.h>
51#include <grpc/support/log.h>
52#include <grpc/support/string_util.h>
53#include <grpc/support/tls.h>
54#include <grpc/support/useful.h>
55
56#include "src/core/lib/iomgr/ev_posix.h"
57#include "src/core/lib/iomgr/iomgr_internal.h"
58#include "src/core/lib/iomgr/lockfree_event.h"
59#include "src/core/lib/iomgr/timer.h"
60#include "src/core/lib/iomgr/wakeup_fd_posix.h"
61#include "src/core/lib/iomgr/workqueue.h"
62#include "src/core/lib/profiling/timers.h"
63#include "src/core/lib/support/block_annotate.h"
64
65/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
66 * sure to wake up one polling thread (which can wake up other threads if
67 * needed) */
68static grpc_wakeup_fd global_wakeup_fd;
69static int g_epfd;
70
71/*******************************************************************************
72 * Fd Declarations
73 */
74
75struct grpc_fd {
76 int fd;
77
Craig Tillerc67cc992017-04-27 10:15:51 -070078 gpr_atm read_closure;
79 gpr_atm write_closure;
80
81 struct grpc_fd *freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -070082
83 /* The pollset that last noticed that the fd is readable. The actual type
84 * stored in this is (grpc_pollset *) */
85 gpr_atm read_notifier_pollset;
86
87 grpc_iomgr_object iomgr_object;
88};
89
90static void fd_global_init(void);
91static void fd_global_shutdown(void);
92
93/*******************************************************************************
94 * Pollset Declarations
95 */
96
97typedef struct pollset_worker_link {
98 grpc_pollset_worker *next;
99 grpc_pollset_worker *prev;
100} pollset_worker_link;
101
102typedef enum {
103 PWL_POLLSET,
104 PWL_POLLABLE,
105 POLLSET_WORKER_LINK_COUNT
106} pollset_worker_links;
107
108struct grpc_pollset_worker {
109 bool kicked;
110 bool initialized_cv;
111 pollset_worker_link links[POLLSET_WORKER_LINK_COUNT];
112 gpr_cv cv;
113};
114
115struct grpc_pollset {
Craig Tiller4509c472017-04-27 19:05:13 +0000116 grpc_pollset_worker *root_worker;
117 bool kicked_without_poller;
Craig Tillerc67cc992017-04-27 10:15:51 -0700118
119 bool shutting_down; /* Is the pollset shutting down ? */
120 bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
Craig Tiller4509c472017-04-27 19:05:13 +0000121 grpc_closure *shutdown_closure; /* Called after after shutdown is complete */
Craig Tillerc67cc992017-04-27 10:15:51 -0700122};
123
124/*******************************************************************************
125 * Pollset-set Declarations
126 */
127struct grpc_pollset_set {};
128
129/*******************************************************************************
130 * Common helpers
131 */
132
133static bool append_error(grpc_error **composite, grpc_error *error,
134 const char *desc) {
135 if (error == GRPC_ERROR_NONE) return true;
136 if (*composite == GRPC_ERROR_NONE) {
137 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
138 }
139 *composite = grpc_error_add_child(*composite, error);
140 return false;
141}
142
143/*******************************************************************************
144 * Fd Definitions
145 */
146
147/* We need to keep a freelist not because of any concerns of malloc performance
148 * but instead so that implementations with multiple threads in (for example)
149 * epoll_wait deal with the race between pollset removal and incoming poll
150 * notifications.
151 *
152 * The problem is that the poller ultimately holds a reference to this
153 * object, so it is very difficult to know when is safe to free it, at least
154 * without some expensive synchronization.
155 *
156 * If we keep the object freelisted, in the worst case losing this race just
157 * becomes a spurious read notification on a reused fd.
158 */
159
160/* The alarm system needs to be able to wakeup 'some poller' sometimes
161 * (specifically when a new alarm needs to be triggered earlier than the next
162 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
163 * case occurs. */
164
165static grpc_fd *fd_freelist = NULL;
166static gpr_mu fd_freelist_mu;
167
Craig Tillerc67cc992017-04-27 10:15:51 -0700168static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
169
170static void fd_global_shutdown(void) {
171 gpr_mu_lock(&fd_freelist_mu);
172 gpr_mu_unlock(&fd_freelist_mu);
173 while (fd_freelist != NULL) {
174 grpc_fd *fd = fd_freelist;
175 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700176 gpr_free(fd);
177 }
178 gpr_mu_destroy(&fd_freelist_mu);
179}
180
181static grpc_fd *fd_create(int fd, const char *name) {
182 grpc_fd *new_fd = NULL;
183
184 gpr_mu_lock(&fd_freelist_mu);
185 if (fd_freelist != NULL) {
186 new_fd = fd_freelist;
187 fd_freelist = fd_freelist->freelist_next;
188 }
189 gpr_mu_unlock(&fd_freelist_mu);
190
191 if (new_fd == NULL) {
192 new_fd = gpr_malloc(sizeof(grpc_fd));
Craig Tillerc67cc992017-04-27 10:15:51 -0700193 }
194
Craig Tillerc67cc992017-04-27 10:15:51 -0700195 new_fd->fd = fd;
Craig Tillerc67cc992017-04-27 10:15:51 -0700196 grpc_lfev_init(&new_fd->read_closure);
197 grpc_lfev_init(&new_fd->write_closure);
198 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
199
200 new_fd->freelist_next = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700201
202 char *fd_name;
203 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
204 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
205#ifdef GRPC_FD_REF_COUNT_DEBUG
206 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
207#endif
208 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000209
210 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET),
211 .data.ptr = new_fd};
212 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
213 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
214 }
215
Craig Tillerc67cc992017-04-27 10:15:51 -0700216 return new_fd;
217}
218
Craig Tiller4509c472017-04-27 19:05:13 +0000219static int fd_wrapped_fd(grpc_fd *fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700220
Craig Tiller9ddb3152017-04-27 21:32:56 +0000221/* Might be called multiple times */
222static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
223 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
224 GRPC_ERROR_REF(why))) {
225 shutdown(fd->fd, SHUT_RDWR);
226 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
227 }
228 GRPC_ERROR_UNREF(why);
229}
230
Craig Tillerc67cc992017-04-27 10:15:51 -0700231static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
232 grpc_closure *on_done, int *release_fd,
233 const char *reason) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700234 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerc67cc992017-04-27 10:15:51 -0700235
Craig Tiller9ddb3152017-04-27 21:32:56 +0000236 if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
237 fd_shutdown(exec_ctx, fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason));
238 }
239
Craig Tillerc67cc992017-04-27 10:15:51 -0700240 /* If release_fd is not NULL, we should be relinquishing control of the file
241 descriptor fd->fd (but we still own the grpc_fd structure). */
242 if (release_fd != NULL) {
243 *release_fd = fd->fd;
244 } else {
245 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700246 }
247
Craig Tiller4509c472017-04-27 19:05:13 +0000248 grpc_closure_sched(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700249
Craig Tiller4509c472017-04-27 19:05:13 +0000250 grpc_iomgr_unregister_object(&fd->iomgr_object);
251 grpc_lfev_destroy(&fd->read_closure);
252 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700253
Craig Tiller4509c472017-04-27 19:05:13 +0000254 gpr_mu_lock(&fd_freelist_mu);
255 fd->freelist_next = fd_freelist;
256 fd_freelist = fd;
257 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700258}
259
260static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
261 grpc_fd *fd) {
262 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
263 return (grpc_pollset *)notifier;
264}
265
266static bool fd_is_shutdown(grpc_fd *fd) {
267 return grpc_lfev_is_shutdown(&fd->read_closure);
268}
269
Craig Tillerc67cc992017-04-27 10:15:51 -0700270static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
271 grpc_closure *closure) {
272 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure);
273}
274
275static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
276 grpc_closure *closure) {
277 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure);
278}
279
280static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000281 return NULL; /* TODO(ctiller): add a global workqueue */
282}
283
284static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
285 grpc_pollset *notifier) {
286 grpc_lfev_set_ready(exec_ctx, &fd->read_closure);
287
288 /* Note, it is possible that fd_become_readable might be called twice with
289 different 'notifier's when an fd becomes readable and it is in two epoll
290 sets (This can happen briefly during polling island merges). In such cases
291 it does not really matter which notifer is set as the read_notifier_pollset
292 (They would both point to the same polling island anyway) */
293 /* Use release store to match with acquire load in fd_get_read_notifier */
294 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
295}
296
297static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
298 grpc_lfev_set_ready(exec_ctx, &fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700299}
300
301/*******************************************************************************
302 * Pollset Definitions
303 */
304
305/* Return true if first in list */
306static bool worker_insert(grpc_pollset_worker **root, pollset_worker_links link,
307 grpc_pollset_worker *worker) {
308 if (*root == NULL) {
309 *root = worker;
310 worker->links[link].next = worker->links[link].prev = worker;
311 return true;
312 } else {
313 worker->links[link].next = *root;
314 worker->links[link].prev = worker->links[link].next->links[link].prev;
315 worker->links[link].next->links[link].prev = worker;
316 worker->links[link].prev->links[link].next = worker;
317 return false;
318 }
319}
320
321/* Return true if last in list */
322typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
323
324static worker_remove_result worker_remove(grpc_pollset_worker **root,
325 pollset_worker_links link,
326 grpc_pollset_worker *worker) {
327 if (worker == *root) {
328 if (worker == worker->links[link].next) {
329 *root = NULL;
330 return EMPTIED;
331 } else {
332 *root = worker->links[link].next;
333 worker->links[link].prev->links[link].next = worker->links[link].next;
334 worker->links[link].next->links[link].prev = worker->links[link].prev;
335 return NEW_ROOT;
336 }
337 } else {
338 worker->links[link].prev->links[link].next = worker->links[link].next;
339 worker->links[link].next->links[link].prev = worker->links[link].prev;
340 return REMOVED;
341 }
342}
343
344GPR_TLS_DECL(g_current_thread_pollset);
345GPR_TLS_DECL(g_current_thread_worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000346static gpr_mu g_pollset_mu;
347static grpc_pollset_worker *g_root_worker;
348
349static grpc_error *pollset_global_init(void) {
350 gpr_mu_init(&g_pollset_mu);
351 gpr_tls_init(&g_current_thread_pollset);
352 gpr_tls_init(&g_current_thread_worker);
353 struct epoll_event ev = {.events = (uint32_t)(EPOLLIN | EPOLLET),
354 .data.ptr = &global_wakeup_fd};
355 if (epoll_ctl(g_epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd, &ev) != 0) {
356 return GRPC_OS_ERROR(errno, "epoll_ctl");
357 }
358 return GRPC_ERROR_NONE;
359}
360
361static void pollset_global_shutdown(void) {
362 gpr_mu_destroy(&g_pollset_mu);
363 gpr_tls_destroy(&g_current_thread_pollset);
364 gpr_tls_destroy(&g_current_thread_worker);
365}
366
367static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
368 *mu = &g_pollset_mu;
369}
370
371static grpc_error *pollset_kick_all(grpc_pollset *pollset) {
372 grpc_error *error = GRPC_ERROR_NONE;
373 if (pollset->root_worker != NULL) {
374 grpc_pollset_worker *worker = pollset->root_worker;
375 do {
376 if (worker->initialized_cv) {
377 worker->kicked = true;
378 gpr_cv_signal(&worker->cv);
379 } else {
380 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
381 "pollset_shutdown");
382 }
383
384 worker = worker->links[PWL_POLLSET].next;
385 } while (worker != pollset->root_worker);
386 }
387 return error;
388}
389
390static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
391 grpc_pollset *pollset) {
392 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL) {
393 grpc_closure_sched(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
394 pollset->shutdown_closure = NULL;
395 }
396}
397
398static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
399 grpc_closure *closure) {
400 GPR_ASSERT(pollset->shutdown_closure == NULL);
401 pollset->shutdown_closure = closure;
402 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
403 pollset_maybe_finish_shutdown(exec_ctx, pollset);
404}
405
406static void pollset_destroy(grpc_pollset *pollset) {}
407
408#define MAX_EPOLL_EVENTS 100
409
410static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
411 gpr_timespec now) {
412 gpr_timespec timeout;
413 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
414 return -1;
415 }
416
417 if (gpr_time_cmp(deadline, now) <= 0) {
418 return 0;
419 }
420
421 static const gpr_timespec round_up = {
422 .clock_type = GPR_TIMESPAN, .tv_sec = 0, .tv_nsec = GPR_NS_PER_MS - 1};
423 timeout = gpr_time_sub(deadline, now);
424 int millis = gpr_time_to_millis(gpr_time_add(timeout, round_up));
425 return millis >= 1 ? millis : 1;
426}
427
428static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
429 gpr_timespec now, gpr_timespec deadline) {
430 struct epoll_event events[MAX_EPOLL_EVENTS];
431 static const char *err_desc = "pollset_poll";
432
433 int timeout = poll_deadline_to_millis_timeout(deadline, now);
434
435 if (timeout != 0) {
436 GRPC_SCHEDULING_START_BLOCKING_REGION;
437 }
438 int r;
439 do {
440 r = epoll_wait(g_epfd, events, MAX_EPOLL_EVENTS, timeout);
441 } while (r < 0 && errno == EINTR);
442 if (timeout != 0) {
443 GRPC_SCHEDULING_END_BLOCKING_REGION;
444 }
445
446 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
447
448 grpc_error *error = GRPC_ERROR_NONE;
449 for (int i = 0; i < r; i++) {
450 void *data_ptr = events[i].data.ptr;
451 if (data_ptr == &global_wakeup_fd) {
452 grpc_timer_consume_kick();
453 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
454 err_desc);
455 } else {
456 grpc_fd *fd = (grpc_fd *)(data_ptr);
457 bool cancel = (events[i].events & (EPOLLERR | EPOLLHUP)) != 0;
458 bool read_ev = (events[i].events & (EPOLLIN | EPOLLPRI)) != 0;
459 bool write_ev = (events[i].events & EPOLLOUT) != 0;
460 if (read_ev || cancel) {
461 fd_become_readable(exec_ctx, fd, pollset);
462 }
463 if (write_ev || cancel) {
464 fd_become_writable(exec_ctx, fd);
465 }
466 }
467 }
468
469 return error;
470}
471
472static bool begin_worker(grpc_pollset *pollset, grpc_pollset_worker *worker,
473 grpc_pollset_worker **worker_hdl, gpr_timespec *now,
474 gpr_timespec deadline) {
475 bool do_poll = true;
476 if (worker_hdl != NULL) *worker_hdl = worker;
477 worker->initialized_cv = false;
478 worker->kicked = false;
479
480 worker_insert(&pollset->root_worker, PWL_POLLSET, worker);
481 if (!worker_insert(&g_root_worker, PWL_POLLABLE, worker)) {
482 worker->initialized_cv = true;
483 gpr_cv_init(&worker->cv);
484 while (do_poll && g_root_worker != worker) {
485 if (gpr_cv_wait(&worker->cv, &g_pollset_mu, deadline)) {
486 do_poll = false;
487 } else if (worker->kicked) {
488 do_poll = false;
489 }
490 }
491 *now = gpr_now(now->clock_type);
492 }
493
494 return do_poll && pollset->shutdown_closure == NULL;
495}
496
497static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
498 grpc_pollset_worker *worker,
499 grpc_pollset_worker **worker_hdl) {
500 if (NEW_ROOT == worker_remove(&g_root_worker, PWL_POLLABLE, worker)) {
501 gpr_cv_signal(&g_root_worker->cv);
502 }
503 if (worker->initialized_cv) {
504 gpr_cv_destroy(&worker->cv);
505 }
506 if (EMPTIED == worker_remove(&pollset->root_worker, PWL_POLLSET, worker)) {
507 pollset_maybe_finish_shutdown(exec_ctx, pollset);
508 }
509}
510
511/* pollset->po.mu lock must be held by the caller before calling this.
512 The function pollset_work() may temporarily release the lock (pollset->po.mu)
513 during the course of its execution but it will always re-acquire the lock and
514 ensure that it is held by the time the function returns */
515static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
516 grpc_pollset_worker **worker_hdl,
517 gpr_timespec now, gpr_timespec deadline) {
518 grpc_pollset_worker worker;
519 grpc_error *error = GRPC_ERROR_NONE;
520 static const char *err_desc = "pollset_work";
521 if (pollset->kicked_without_poller) {
522 pollset->kicked_without_poller = false;
523 return GRPC_ERROR_NONE;
524 }
525 if (begin_worker(pollset, &worker, worker_hdl, &now, deadline)) {
526 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
527 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
528 GPR_ASSERT(!pollset->shutdown_closure);
529 gpr_mu_unlock(&g_pollset_mu);
530 append_error(&error, pollset_epoll(exec_ctx, pollset, now, deadline),
531 err_desc);
532 grpc_exec_ctx_flush(exec_ctx);
533 gpr_mu_lock(&g_pollset_mu);
534 gpr_tls_set(&g_current_thread_pollset, 0);
535 gpr_tls_set(&g_current_thread_worker, 0);
536 pollset_maybe_finish_shutdown(exec_ctx, pollset);
537 }
538 end_worker(exec_ctx, pollset, &worker, worker_hdl);
539 return error;
540}
541
542static grpc_error *pollset_kick(grpc_pollset *pollset,
543 grpc_pollset_worker *specific_worker) {
544 if (specific_worker == NULL) {
545 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
546 if (pollset->root_worker == NULL) {
547 pollset->kicked_without_poller = true;
548 return GRPC_ERROR_NONE;
549 } else {
550 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
551 }
552 } else {
553 return GRPC_ERROR_NONE;
554 }
555 } else if (specific_worker->kicked) {
556 return GRPC_ERROR_NONE;
557 } else if (gpr_tls_get(&g_current_thread_worker) ==
558 (intptr_t)specific_worker) {
559 specific_worker->kicked = true;
560 return GRPC_ERROR_NONE;
561 } else if (specific_worker == g_root_worker) {
562 specific_worker->kicked = true;
563 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
564 } else {
565 specific_worker->kicked = true;
566 gpr_cv_signal(&specific_worker->cv);
567 return GRPC_ERROR_NONE;
568 }
569}
570
571static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
572 grpc_fd *fd) {}
573
574static grpc_error *kick_poller(void) {
575 return grpc_wakeup_fd_wakeup(&global_wakeup_fd);
576}
577
578/*******************************************************************************
579 * Workqueue Definitions
580 */
581
582#ifdef GRPC_WORKQUEUE_REFCOUNT_DEBUG
583static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue,
584 const char *file, int line,
585 const char *reason) {
586 return workqueue;
587}
588
589static void workqueue_unref(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue,
590 const char *file, int line, const char *reason) {}
591#else
592static grpc_workqueue *workqueue_ref(grpc_workqueue *workqueue) {
593 return workqueue;
594}
595
596static void workqueue_unref(grpc_exec_ctx *exec_ctx,
597 grpc_workqueue *workqueue) {}
598#endif
599
600static grpc_closure_scheduler *workqueue_scheduler(grpc_workqueue *workqueue) {
601 return grpc_schedule_on_exec_ctx;
602}
Craig Tillerc67cc992017-04-27 10:15:51 -0700603
604/*******************************************************************************
605 * Pollset-set Definitions
606 */
607
608static grpc_pollset_set *pollset_set_create(void) {
609 return (grpc_pollset_set *)((intptr_t)0xdeafbeef);
610}
611
612static void pollset_set_destroy(grpc_exec_ctx *exec_ctx,
613 grpc_pollset_set *pss) {}
614
615static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
616 grpc_fd *fd) {}
617
618static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
619 grpc_fd *fd) {}
620
621static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
622 grpc_pollset_set *pss, grpc_pollset *ps) {}
623
624static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
625 grpc_pollset_set *pss, grpc_pollset *ps) {}
626
627static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
628 grpc_pollset_set *bag,
629 grpc_pollset_set *item) {}
630
631static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
632 grpc_pollset_set *bag,
633 grpc_pollset_set *item) {}
634
635/*******************************************************************************
636 * Event engine binding
637 */
638
639static void shutdown_engine(void) {
640 fd_global_shutdown();
641 pollset_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700642}
643
644static const grpc_event_engine_vtable vtable = {
645 .pollset_size = sizeof(grpc_pollset),
646
647 .fd_create = fd_create,
648 .fd_wrapped_fd = fd_wrapped_fd,
649 .fd_orphan = fd_orphan,
650 .fd_shutdown = fd_shutdown,
651 .fd_is_shutdown = fd_is_shutdown,
652 .fd_notify_on_read = fd_notify_on_read,
653 .fd_notify_on_write = fd_notify_on_write,
654 .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
655 .fd_get_workqueue = fd_get_workqueue,
656
657 .pollset_init = pollset_init,
658 .pollset_shutdown = pollset_shutdown,
659 .pollset_destroy = pollset_destroy,
660 .pollset_work = pollset_work,
661 .pollset_kick = pollset_kick,
662 .pollset_add_fd = pollset_add_fd,
663
664 .pollset_set_create = pollset_set_create,
665 .pollset_set_destroy = pollset_set_destroy,
666 .pollset_set_add_pollset = pollset_set_add_pollset,
667 .pollset_set_del_pollset = pollset_set_del_pollset,
668 .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
669 .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
670 .pollset_set_add_fd = pollset_set_add_fd,
671 .pollset_set_del_fd = pollset_set_del_fd,
672
673 .kick_poller = kick_poller,
674
675 .workqueue_ref = workqueue_ref,
676 .workqueue_unref = workqueue_unref,
677 .workqueue_scheduler = workqueue_scheduler,
678
679 .shutdown_engine = shutdown_engine,
680};
681
682/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
683 * Create a dummy epoll_fd to make sure epoll support is available */
Craig Tiller6f0af492017-04-27 19:26:16 +0000684const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700685 if (!grpc_has_wakeup_fd()) {
686 return NULL;
687 }
688
Craig Tiller4509c472017-04-27 19:05:13 +0000689 g_epfd = epoll_create1(EPOLL_CLOEXEC);
690 if (g_epfd < 0) {
691 gpr_log(GPR_ERROR, "epoll unavailable");
Craig Tillerc67cc992017-04-27 10:15:51 -0700692 return NULL;
693 }
694
Craig Tillerc67cc992017-04-27 10:15:51 -0700695 fd_global_init();
696
697 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +0000698 close(g_epfd);
699 fd_global_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700700 return NULL;
701 }
702
703 return &vtable;
704}
705
706#else /* defined(GRPC_LINUX_EPOLL) */
707#if defined(GRPC_POSIX_SOCKET)
708#include "src/core/lib/iomgr/ev_posix.h"
709/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
710 * NULL */
Craig Tiller9ddb3152017-04-27 21:32:56 +0000711const grpc_event_engine_vtable *grpc_init_epoll1_linux(bool explicit_request) {
712 return NULL;
713}
Craig Tillerc67cc992017-04-27 10:15:51 -0700714#endif /* defined(GRPC_POSIX_SOCKET) */
715#endif /* !defined(GRPC_LINUX_EPOLL) */