blob: 12004127be74d59534bead0093906ad803c84307 [file] [log] [blame]
Craig Tillerdd86b692017-04-06 10:43:11 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerdd86b692017-04-06 10:43:11 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerdd86b692017-04-06 10:43:11 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerdd86b692017-04-06 10:43:11 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerdd86b692017-04-06 10:43:11 -070016 *
17 */
18
19#include "src/core/lib/iomgr/port.h"
20
21/* This polling engine is only relevant on linux kernels supporting epoll() */
22#ifdef GRPC_LINUX_EPOLL
23
Craig Tillerc6109852017-05-01 14:26:49 -070024#include "src/core/lib/iomgr/ev_epollex_linux.h"
Craig Tillerdd86b692017-04-06 10:43:11 -070025
26#include <assert.h>
27#include <errno.h>
Craig Tiller20397792017-07-18 11:35:27 -070028#include <limits.h>
Craig Tillerdd86b692017-04-06 10:43:11 -070029#include <poll.h>
30#include <pthread.h>
31#include <string.h>
Craig Tillerdd86b692017-04-06 10:43:11 -070032#include <sys/socket.h>
Craig Tiller1e8c2ab2017-10-12 15:50:13 -070033#include <sys/syscall.h>
Craig Tillerdd86b692017-04-06 10:43:11 -070034#include <unistd.h>
35
36#include <grpc/support/alloc.h>
37#include <grpc/support/log.h>
38#include <grpc/support/string_util.h>
39#include <grpc/support/tls.h>
40#include <grpc/support/useful.h>
41
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070042#include "src/core/lib/debug/stats.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070043#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerdd86b692017-04-06 10:43:11 -070044#include "src/core/lib/iomgr/iomgr_internal.h"
Craig Tiller819cd882017-04-25 13:18:22 -070045#include "src/core/lib/iomgr/is_epollexclusive_available.h"
Craig Tillerdd86b692017-04-06 10:43:11 -070046#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tiller819cd882017-04-25 13:18:22 -070047#include "src/core/lib/iomgr/sys_epoll_wrapper.h"
Craig Tillerdd86b692017-04-06 10:43:11 -070048#include "src/core/lib/iomgr/timer.h"
49#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerdd86b692017-04-06 10:43:11 -070050#include "src/core/lib/profiling/timers.h"
Craig Tiller0b4c9012017-04-06 17:19:37 -070051#include "src/core/lib/support/spinlock.h"
Craig Tillerdd86b692017-04-06 10:43:11 -070052
Craig Tiller29a9c3a2017-10-04 15:15:04 -070053// debug aid: create workers on the heap (allows asan to spot
54// use-after-destruction)
Craig Tiller513daab2017-10-06 09:18:34 -070055//#define GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP 1
Craig Tiller90a9d7d2017-10-04 21:24:03 +000056
Craig Tiller389ea902017-10-04 17:42:49 +000057#ifndef NDEBUG
58grpc_tracer_flag grpc_trace_pollable_refcount =
59 GRPC_TRACER_INITIALIZER(false, "pollable_refcount");
60#endif
61
Craig Tillerdd86b692017-04-06 10:43:11 -070062/*******************************************************************************
Craig Tiller389ea902017-10-04 17:42:49 +000063 * pollable Declarations
Craig Tiller8fc1ca12017-04-07 13:01:48 -070064 */
65
Craig Tiller249de2b2017-10-02 11:49:19 -070066typedef enum { PO_MULTI, PO_FD, PO_EMPTY } pollable_type;
Craig Tiller8fc1ca12017-04-07 13:01:48 -070067
Craig Tiller249de2b2017-10-02 11:49:19 -070068typedef struct pollable pollable;
Craig Tillerf18286b2017-04-10 14:44:09 -070069
Craig Tiller249de2b2017-10-02 11:49:19 -070070struct pollable {
71 pollable_type type; // immutable
Craig Tillerf18286b2017-04-10 14:44:09 -070072 gpr_refcount refs;
Craig Tillerf18286b2017-04-10 14:44:09 -070073
Craig Tillerd1d7fdd2017-04-14 16:16:24 -070074 int epfd;
75 grpc_wakeup_fd wakeup;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -070076
Craig Tiller249de2b2017-10-02 11:49:19 -070077 // only for type fd... one ref to the owner fd
78 grpc_fd *owner_fd;
79
80 grpc_pollset_set *pollset_set;
81 pollable *next;
82 pollable *prev;
83
84 gpr_mu mu;
85 grpc_pollset_worker *root_worker;
86};
87
88static const char *pollable_type_string(pollable_type t) {
Craig Tiller86ffd652017-06-20 00:16:54 +000089 switch (t) {
Craig Tiller249de2b2017-10-02 11:49:19 -070090 case PO_MULTI:
Craig Tiller7fd8f672017-07-05 15:10:12 -070091 return "pollset";
92 case PO_FD:
93 return "fd";
Craig Tiller249de2b2017-10-02 11:49:19 -070094 case PO_EMPTY:
95 return "empty";
Craig Tiller86ffd652017-06-20 00:16:54 +000096 }
97 return "<invalid>";
98}
99
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700100static char *pollable_desc(pollable *p) {
Craig Tiller86ffd652017-06-20 00:16:54 +0000101 char *out;
Craig Tiller249de2b2017-10-02 11:49:19 -0700102 gpr_asprintf(&out, "type=%s epfd=%d wakeup=%d", pollable_type_string(p->type),
103 p->epfd, p->wakeup.read_fd);
Craig Tiller86ffd652017-06-20 00:16:54 +0000104 return out;
105}
106
Craig Tiller249de2b2017-10-02 11:49:19 -0700107static pollable *g_empty_pollable;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700108
Craig Tiller249de2b2017-10-02 11:49:19 -0700109static grpc_error *pollable_create(pollable_type type, pollable **p);
Craig Tiller389ea902017-10-04 17:42:49 +0000110#ifdef NDEBUG
Craig Tiller249de2b2017-10-02 11:49:19 -0700111static pollable *pollable_ref(pollable *p);
112static void pollable_unref(pollable *p);
Craig Tiller389ea902017-10-04 17:42:49 +0000113#define POLLABLE_REF(p, r) pollable_ref(p)
114#define POLLABLE_UNREF(p, r) pollable_unref(p)
115#else
116static pollable *pollable_ref(pollable *p, int line, const char *reason);
117static void pollable_unref(pollable *p, int line, const char *reason);
118#define POLLABLE_REF(p, r) pollable_ref((p), __LINE__, (r))
119#define POLLABLE_UNREF(p, r) pollable_unref((p), __LINE__, (r))
120#endif
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700121
122/*******************************************************************************
Craig Tillerdd86b692017-04-06 10:43:11 -0700123 * Fd Declarations
124 */
125
Craig Tillerdd86b692017-04-06 10:43:11 -0700126struct grpc_fd {
Craig Tillerdd86b692017-04-06 10:43:11 -0700127 int fd;
128 /* refst format:
129 bit 0 : 1=Active / 0=Orphaned
130 bits 1-n : refcount
131 Ref/Unref by two to avoid altering the orphaned bit */
132 gpr_atm refst;
133
Craig Tiller4fd6a412017-10-04 22:41:13 +0000134 gpr_mu orphan_mu;
135
Craig Tiller249de2b2017-10-02 11:49:19 -0700136 gpr_mu pollable_mu;
137 pollable *pollable_obj;
Craig Tillerdd86b692017-04-06 10:43:11 -0700138
139 gpr_atm read_closure;
140 gpr_atm write_closure;
141
142 struct grpc_fd *freelist_next;
143 grpc_closure *on_done_closure;
144
145 /* The pollset that last noticed that the fd is readable. The actual type
146 * stored in this is (grpc_pollset *) */
147 gpr_atm read_notifier_pollset;
148
149 grpc_iomgr_object iomgr_object;
150};
151
Craig Tillerdd86b692017-04-06 10:43:11 -0700152static void fd_global_init(void);
153static void fd_global_shutdown(void);
154
Craig Tillerdd86b692017-04-06 10:43:11 -0700155/*******************************************************************************
156 * Pollset Declarations
157 */
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700158
Craig Tiller23adbd52017-10-02 15:29:18 -0700159typedef struct {
160 grpc_pollset_worker *next;
161 grpc_pollset_worker *prev;
162} pwlink;
163
164typedef enum { PWLINK_POLLABLE = 0, PWLINK_POLLSET, PWLINK_COUNT } pwlinks;
165
Craig Tillerdd86b692017-04-06 10:43:11 -0700166struct grpc_pollset_worker {
Craig Tillere24b24d2017-04-06 16:05:45 -0700167 bool kicked;
168 bool initialized_cv;
Craig Tillerad059f72017-10-12 22:47:05 +0000169 pid_t originator;
Craig Tillere24b24d2017-04-06 16:05:45 -0700170 gpr_cv cv;
Craig Tiller9f012512017-04-13 15:37:14 -0700171 grpc_pollset *pollset;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700172 pollable *pollable_obj;
Craig Tiller249de2b2017-10-02 11:49:19 -0700173
Craig Tiller23adbd52017-10-02 15:29:18 -0700174 pwlink links[PWLINK_COUNT];
Craig Tillerdd86b692017-04-06 10:43:11 -0700175};
176
Craig Tiller4f07ea82017-06-08 23:56:06 +0000177#define MAX_EPOLL_EVENTS 100
Craig Tillerca4fc662017-06-09 11:13:56 -0700178#define MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL 5
Craig Tiller4f07ea82017-06-08 23:56:06 +0000179
Craig Tillere24b24d2017-04-06 16:05:45 -0700180struct grpc_pollset {
Craig Tiller249de2b2017-10-02 11:49:19 -0700181 gpr_mu mu;
182 pollable *active_pollable;
Craig Tillerbb93af62017-04-07 23:49:00 +0000183 bool kicked_without_poller;
Craig Tillere24b24d2017-04-06 16:05:45 -0700184 grpc_closure *shutdown_closure;
Craig Tiller23adbd52017-10-02 15:29:18 -0700185 grpc_pollset_worker *root_worker;
Craig Tillerad059f72017-10-12 22:47:05 +0000186 int containing_pollset_set_count;
Craig Tiller4f07ea82017-06-08 23:56:06 +0000187
188 int event_cursor;
189 int event_count;
190 struct epoll_event events[MAX_EPOLL_EVENTS];
Craig Tillere24b24d2017-04-06 16:05:45 -0700191};
Craig Tillerdd86b692017-04-06 10:43:11 -0700192
193/*******************************************************************************
194 * Pollset-set Declarations
195 */
Craig Tiller23adbd52017-10-02 15:29:18 -0700196
Craig Tiller8fc1ca12017-04-07 13:01:48 -0700197struct grpc_pollset_set {
Craig Tiller249de2b2017-10-02 11:49:19 -0700198 gpr_refcount refs;
199 gpr_mu mu;
200 grpc_pollset_set *parent;
Craig Tiller23adbd52017-10-02 15:29:18 -0700201
202 size_t pollset_count;
203 size_t pollset_capacity;
204 pollable **pollsets;
205
206 size_t fd_count;
207 size_t fd_capacity;
208 grpc_fd **fds;
Craig Tiller8fc1ca12017-04-07 13:01:48 -0700209};
Craig Tillerdd86b692017-04-06 10:43:11 -0700210
211/*******************************************************************************
212 * Common helpers
213 */
214
Craig Tillere24b24d2017-04-06 16:05:45 -0700215static bool append_error(grpc_error **composite, grpc_error *error,
216 const char *desc) {
217 if (error == GRPC_ERROR_NONE) return true;
218 if (*composite == GRPC_ERROR_NONE) {
219 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
220 }
221 *composite = grpc_error_add_child(*composite, error);
222 return false;
223}
224
Craig Tillerdd86b692017-04-06 10:43:11 -0700225/*******************************************************************************
226 * Fd Definitions
227 */
228
229/* We need to keep a freelist not because of any concerns of malloc performance
230 * but instead so that implementations with multiple threads in (for example)
231 * epoll_wait deal with the race between pollset removal and incoming poll
232 * notifications.
233 *
234 * The problem is that the poller ultimately holds a reference to this
235 * object, so it is very difficult to know when is safe to free it, at least
236 * without some expensive synchronization.
237 *
238 * If we keep the object freelisted, in the worst case losing this race just
239 * becomes a spurious read notification on a reused fd.
240 */
241
242/* The alarm system needs to be able to wakeup 'some poller' sometimes
243 * (specifically when a new alarm needs to be triggered earlier than the next
244 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
245 * case occurs. */
246
247static grpc_fd *fd_freelist = NULL;
248static gpr_mu fd_freelist_mu;
249
Noah Eisen264879f2017-06-20 17:14:47 -0700250#ifndef NDEBUG
Craig Tillerdd86b692017-04-06 10:43:11 -0700251#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
Craig Tillerf8401102017-04-17 09:47:28 -0700252#define UNREF_BY(ec, fd, n, reason) \
253 unref_by(ec, fd, n, reason, __FILE__, __LINE__)
Craig Tillerdd86b692017-04-06 10:43:11 -0700254static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
255 int line) {
Noah Eisen264879f2017-06-20 17:14:47 -0700256 if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
257 gpr_log(GPR_DEBUG,
258 "FD %d %p ref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
259 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
260 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
261 }
Craig Tillerdd86b692017-04-06 10:43:11 -0700262#else
263#define REF_BY(fd, n, reason) ref_by(fd, n)
Craig Tiller50480b22017-04-17 16:34:52 +0000264#define UNREF_BY(ec, fd, n, reason) unref_by(ec, fd, n)
Craig Tillerdd86b692017-04-06 10:43:11 -0700265static void ref_by(grpc_fd *fd, int n) {
266#endif
267 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
268}
269
Craig Tiller50480b22017-04-17 16:34:52 +0000270static void fd_destroy(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700271 grpc_fd *fd = (grpc_fd *)arg;
Craig Tillerf8401102017-04-17 09:47:28 -0700272 /* Add the fd to the freelist */
273 grpc_iomgr_unregister_object(&fd->iomgr_object);
Craig Tiller389ea902017-10-04 17:42:49 +0000274 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
Craig Tiller249de2b2017-10-02 11:49:19 -0700275 gpr_mu_destroy(&fd->pollable_mu);
Craig Tiller4fd6a412017-10-04 22:41:13 +0000276 gpr_mu_destroy(&fd->orphan_mu);
Craig Tillerf8401102017-04-17 09:47:28 -0700277 gpr_mu_lock(&fd_freelist_mu);
278 fd->freelist_next = fd_freelist;
279 fd_freelist = fd;
Craig Tillerdd86b692017-04-06 10:43:11 -0700280
Craig Tillerf8401102017-04-17 09:47:28 -0700281 grpc_lfev_destroy(&fd->read_closure);
282 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerdd86b692017-04-06 10:43:11 -0700283
Craig Tillerf8401102017-04-17 09:47:28 -0700284 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller50480b22017-04-17 16:34:52 +0000285}
286
Noah Eisen264879f2017-06-20 17:14:47 -0700287#ifndef NDEBUG
Craig Tillerf8401102017-04-17 09:47:28 -0700288static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n,
289 const char *reason, const char *file, int line) {
Noah Eisen264879f2017-06-20 17:14:47 -0700290 if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
291 gpr_log(GPR_DEBUG,
292 "FD %d %p unref %d %" PRIdPTR " -> %" PRIdPTR " [%s; %s:%d]",
293 fd->fd, fd, n, gpr_atm_no_barrier_load(&fd->refst),
294 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
295 }
Craig Tiller50480b22017-04-17 16:34:52 +0000296#else
297static void unref_by(grpc_exec_ctx *exec_ctx, grpc_fd *fd, int n) {
Craig Tiller50480b22017-04-17 16:34:52 +0000298#endif
Noah Eisen264879f2017-06-20 17:14:47 -0700299 gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller50480b22017-04-17 16:34:52 +0000300 if (old == n) {
ncteisen274bbbe2017-06-08 14:57:11 -0700301 GRPC_CLOSURE_SCHED(exec_ctx, GRPC_CLOSURE_CREATE(fd_destroy, fd,
Craig Tillerf8401102017-04-17 09:47:28 -0700302 grpc_schedule_on_exec_ctx),
303 GRPC_ERROR_NONE);
Craig Tillerdd86b692017-04-06 10:43:11 -0700304 } else {
305 GPR_ASSERT(old > n);
306 }
307}
308
309static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
310
311static void fd_global_shutdown(void) {
312 gpr_mu_lock(&fd_freelist_mu);
313 gpr_mu_unlock(&fd_freelist_mu);
314 while (fd_freelist != NULL) {
315 grpc_fd *fd = fd_freelist;
316 fd_freelist = fd_freelist->freelist_next;
Craig Tillerdd86b692017-04-06 10:43:11 -0700317 gpr_free(fd);
318 }
319 gpr_mu_destroy(&fd_freelist_mu);
320}
321
322static grpc_fd *fd_create(int fd, const char *name) {
323 grpc_fd *new_fd = NULL;
324
325 gpr_mu_lock(&fd_freelist_mu);
326 if (fd_freelist != NULL) {
327 new_fd = fd_freelist;
328 fd_freelist = fd_freelist->freelist_next;
329 }
330 gpr_mu_unlock(&fd_freelist_mu);
331
332 if (new_fd == NULL) {
Yash Tibrewalca3c1c02017-09-07 22:47:16 -0700333 new_fd = (grpc_fd *)gpr_malloc(sizeof(grpc_fd));
Craig Tillerdd86b692017-04-06 10:43:11 -0700334 }
335
Craig Tiller249de2b2017-10-02 11:49:19 -0700336 gpr_mu_init(&new_fd->pollable_mu);
Craig Tiller4fd6a412017-10-04 22:41:13 +0000337 gpr_mu_init(&new_fd->orphan_mu);
Craig Tiller249de2b2017-10-02 11:49:19 -0700338 new_fd->pollable_obj = NULL;
Craig Tillerdd86b692017-04-06 10:43:11 -0700339 gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
340 new_fd->fd = fd;
Craig Tillerdd86b692017-04-06 10:43:11 -0700341 grpc_lfev_init(&new_fd->read_closure);
342 grpc_lfev_init(&new_fd->write_closure);
343 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
344
345 new_fd->freelist_next = NULL;
346 new_fd->on_done_closure = NULL;
347
Craig Tillerdd86b692017-04-06 10:43:11 -0700348 char *fd_name;
349 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
350 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700351#ifndef NDEBUG
352 if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
353 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
354 }
Craig Tillerdd86b692017-04-06 10:43:11 -0700355#endif
356 gpr_free(fd_name);
357 return new_fd;
358}
359
360static int fd_wrapped_fd(grpc_fd *fd) {
Craig Tiller249de2b2017-10-02 11:49:19 -0700361 int ret_fd = fd->fd;
362 return (gpr_atm_acq_load(&fd->refst) & 1) ? ret_fd : -1;
Craig Tillerdd86b692017-04-06 10:43:11 -0700363}
364
365static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
366 grpc_closure *on_done, int *release_fd,
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700367 bool already_closed, const char *reason) {
368 bool is_fd_closed = already_closed;
Craig Tillerdd86b692017-04-06 10:43:11 -0700369
Craig Tiller4fd6a412017-10-04 22:41:13 +0000370 gpr_mu_lock(&fd->orphan_mu);
371
Craig Tillerdd86b692017-04-06 10:43:11 -0700372 fd->on_done_closure = on_done;
373
374 /* If release_fd is not NULL, we should be relinquishing control of the file
375 descriptor fd->fd (but we still own the grpc_fd structure). */
376 if (release_fd != NULL) {
377 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700378 } else if (!is_fd_closed) {
Craig Tillerdd86b692017-04-06 10:43:11 -0700379 close(fd->fd);
380 is_fd_closed = true;
381 }
382
Craig Tillerf4360d72017-04-07 08:51:00 -0700383 if (!is_fd_closed) {
384 gpr_log(GPR_DEBUG, "TODO: handle fd removal?");
385 }
386
Craig Tillerdd86b692017-04-06 10:43:11 -0700387 /* Remove the active status but keep referenced. We want this grpc_fd struct
388 to be alive (and not added to freelist) until the end of this function */
389 REF_BY(fd, 1, reason);
390
Craig Tiller249de2b2017-10-02 11:49:19 -0700391 GRPC_CLOSURE_SCHED(exec_ctx, fd->on_done_closure, GRPC_ERROR_NONE);
Craig Tillerdd86b692017-04-06 10:43:11 -0700392
Craig Tiller4fd6a412017-10-04 22:41:13 +0000393 gpr_mu_unlock(&fd->orphan_mu);
394
Craig Tiller50480b22017-04-17 16:34:52 +0000395 UNREF_BY(exec_ctx, fd, 2, reason); /* Drop the reference */
Craig Tillerdd86b692017-04-06 10:43:11 -0700396}
397
398static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
399 grpc_fd *fd) {
400 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
401 return (grpc_pollset *)notifier;
402}
403
404static bool fd_is_shutdown(grpc_fd *fd) {
405 return grpc_lfev_is_shutdown(&fd->read_closure);
406}
407
408/* Might be called multiple times */
409static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd, grpc_error *why) {
410 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
411 GRPC_ERROR_REF(why))) {
412 shutdown(fd->fd, SHUT_RDWR);
413 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
414 }
415 GRPC_ERROR_UNREF(why);
416}
417
418static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
419 grpc_closure *closure) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700420 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
Craig Tillerdd86b692017-04-06 10:43:11 -0700421}
422
423static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
424 grpc_closure *closure) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700425 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
Craig Tillerdd86b692017-04-06 10:43:11 -0700426}
427
Craig Tillerdd86b692017-04-06 10:43:11 -0700428/*******************************************************************************
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700429 * Pollable Definitions
430 */
431
Craig Tiller249de2b2017-10-02 11:49:19 -0700432static grpc_error *pollable_create(pollable_type type, pollable **p) {
433 *p = NULL;
434
435 int epfd = epoll_create1(EPOLL_CLOEXEC);
436 if (epfd == -1) {
437 return GRPC_OS_ERROR(errno, "epoll_create1");
438 }
Craig Tiller513daab2017-10-06 09:18:34 -0700439 *p = (pollable *)gpr_malloc(sizeof(**p));
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000440 grpc_error *err = grpc_wakeup_fd_init(&(*p)->wakeup);
Craig Tiller249de2b2017-10-02 11:49:19 -0700441 if (err != GRPC_ERROR_NONE) {
442 close(epfd);
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000443 gpr_free(*p);
444 *p = NULL;
Craig Tiller249de2b2017-10-02 11:49:19 -0700445 return err;
446 }
447 struct epoll_event ev;
448 ev.events = (uint32_t)(EPOLLIN | EPOLLET);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700449 ev.data.ptr = (void *)(1 | (intptr_t) & (*p)->wakeup);
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000450 if (epoll_ctl(epfd, EPOLL_CTL_ADD, (*p)->wakeup.read_fd, &ev) != 0) {
Craig Tiller249de2b2017-10-02 11:49:19 -0700451 err = GRPC_OS_ERROR(errno, "epoll_ctl");
452 close(epfd);
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000453 grpc_wakeup_fd_destroy(&(*p)->wakeup);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700454 gpr_free(*p);
455 *p = NULL;
Craig Tiller249de2b2017-10-02 11:49:19 -0700456 return err;
457 }
458
Craig Tiller249de2b2017-10-02 11:49:19 -0700459 (*p)->type = type;
460 gpr_ref_init(&(*p)->refs, 1);
Craig Tiller65da0ef2017-10-02 15:35:59 -0700461 gpr_mu_init(&(*p)->mu);
Craig Tiller249de2b2017-10-02 11:49:19 -0700462 (*p)->epfd = epfd;
Craig Tiller249de2b2017-10-02 11:49:19 -0700463 (*p)->owner_fd = NULL;
464 (*p)->pollset_set = NULL;
465 (*p)->next = (*p)->prev = *p;
466 (*p)->root_worker = NULL;
467 return GRPC_ERROR_NONE;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700468}
469
Craig Tiller389ea902017-10-04 17:42:49 +0000470#ifdef NDEBUG
Craig Tiller249de2b2017-10-02 11:49:19 -0700471static pollable *pollable_ref(pollable *p) {
Craig Tiller389ea902017-10-04 17:42:49 +0000472#else
473static pollable *pollable_ref(pollable *p, int line, const char *reason) {
474 if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
475 int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700476 gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
477 "POLLABLE:%p ref %d->%d %s", p, r, r + 1, reason);
Craig Tiller389ea902017-10-04 17:42:49 +0000478 }
479#endif
Craig Tiller249de2b2017-10-02 11:49:19 -0700480 gpr_ref(&p->refs);
481 return p;
482}
483
Craig Tiller389ea902017-10-04 17:42:49 +0000484#ifdef NDEBUG
Craig Tiller249de2b2017-10-02 11:49:19 -0700485static void pollable_unref(pollable *p) {
Craig Tiller389ea902017-10-04 17:42:49 +0000486#else
487static void pollable_unref(pollable *p, int line, const char *reason) {
488 if (p == NULL) return;
489 if (GRPC_TRACER_ON(grpc_trace_pollable_refcount)) {
490 int r = (int)gpr_atm_no_barrier_load(&p->refs.count);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700491 gpr_log(__FILE__, line, GPR_LOG_SEVERITY_DEBUG,
492 "POLLABLE:%p unref %d->%d %s", p, r, r - 1, reason);
Craig Tiller389ea902017-10-04 17:42:49 +0000493 }
494#endif
Craig Tiller249de2b2017-10-02 11:49:19 -0700495 if (p != NULL && gpr_unref(&p->refs)) {
Craig Tiller79d24fb2017-04-17 19:35:19 +0000496 close(p->epfd);
497 grpc_wakeup_fd_destroy(&p->wakeup);
Craig Tiller389ea902017-10-04 17:42:49 +0000498 gpr_free(p);
Craig Tiller79d24fb2017-04-17 19:35:19 +0000499 }
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700500}
501
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700502static grpc_error *pollable_add_fd(pollable *p, grpc_fd *fd) {
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700503 grpc_error *error = GRPC_ERROR_NONE;
504 static const char *err_desc = "pollable_add_fd";
505 const int epfd = p->epfd;
506
Craig Tiller6c8383a2017-05-05 16:54:42 +0000507 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller86ffd652017-06-20 00:16:54 +0000508 gpr_log(GPR_DEBUG, "add fd %p (%d) to pollable %p", fd, fd->fd, p);
Craig Tiller6c8383a2017-05-05 16:54:42 +0000509 }
510
Yash Tibrewal533d1182017-09-18 10:48:22 -0700511 struct epoll_event ev_fd;
512 ev_fd.events = (uint32_t)(EPOLLET | EPOLLIN | EPOLLOUT | EPOLLEXCLUSIVE);
513 ev_fd.data.ptr = fd;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700514 if (epoll_ctl(epfd, EPOLL_CTL_ADD, fd->fd, &ev_fd) != 0) {
515 switch (errno) {
Craig Tiller61f96c12017-05-12 13:36:39 -0700516 case EEXIST:
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700517 break;
518 default:
519 append_error(&error, GRPC_OS_ERROR(errno, "epoll_ctl"), err_desc);
520 }
521 }
522
523 return error;
524}
525
526/*******************************************************************************
Craig Tillerdd86b692017-04-06 10:43:11 -0700527 * Pollset Definitions
528 */
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700529
Craig Tillerdd86b692017-04-06 10:43:11 -0700530GPR_TLS_DECL(g_current_thread_pollset);
531GPR_TLS_DECL(g_current_thread_worker);
532
Craig Tillerdd86b692017-04-06 10:43:11 -0700533/* Global state management */
534static grpc_error *pollset_global_init(void) {
535 gpr_tls_init(&g_current_thread_pollset);
536 gpr_tls_init(&g_current_thread_worker);
Craig Tiller249de2b2017-10-02 11:49:19 -0700537 return pollable_create(PO_EMPTY, &g_empty_pollable);
Craig Tillerdd86b692017-04-06 10:43:11 -0700538}
539
540static void pollset_global_shutdown(void) {
Craig Tiller389ea902017-10-04 17:42:49 +0000541 POLLABLE_UNREF(g_empty_pollable, "g_empty_pollable");
Craig Tillerdd86b692017-04-06 10:43:11 -0700542 gpr_tls_destroy(&g_current_thread_pollset);
543 gpr_tls_destroy(&g_current_thread_worker);
544}
545
Craig Tillere16722c2017-05-17 21:50:37 +0000546static void pollset_maybe_finish_shutdown(grpc_exec_ctx *exec_ctx,
547 grpc_pollset *pollset) {
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700548 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
549 pollset->containing_pollset_set_count == 0) {
Craig Tillerca669b02017-06-09 12:41:51 -0700550 GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tillere16722c2017-05-17 21:50:37 +0000551 pollset->shutdown_closure = NULL;
552 }
553}
554
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700555/* pollset->mu must be held before calling this function,
556 * pollset->active_pollable->mu & specific_worker->pollable_obj->mu must not be
557 * held */
Craig Tiller23adbd52017-10-02 15:29:18 -0700558static grpc_error *pollset_kick_one(grpc_exec_ctx *exec_ctx,
559 grpc_pollset *pollset,
560 grpc_pollset_worker *specific_worker) {
Craig Tiller3d073c22017-10-04 22:10:58 +0000561 pollable *p = specific_worker->pollable_obj;
Craig Tillerad059f72017-10-12 22:47:05 +0000562 grpc_core::mu_guard lock(&p->mu);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700563 GPR_ASSERT(specific_worker != NULL);
Craig Tiller23adbd52017-10-02 15:29:18 -0700564 if (specific_worker->kicked) {
565 if (GRPC_TRACER_ON(grpc_polling_trace)) {
566 gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_already_kicked", p);
567 }
568 return GRPC_ERROR_NONE;
Craig Tillerad059f72017-10-12 22:47:05 +0000569 }
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700570 if (gpr_tls_get(&g_current_thread_worker) == (intptr_t)specific_worker) {
Craig Tiller23adbd52017-10-02 15:29:18 -0700571 if (GRPC_TRACER_ON(grpc_polling_trace)) {
572 gpr_log(GPR_DEBUG, "PS:%p kicked_specific_but_awake", p);
573 }
574 specific_worker->kicked = true;
575 return GRPC_ERROR_NONE;
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700576 }
577 if (specific_worker == p->root_worker) {
Craig Tiller23adbd52017-10-02 15:29:18 -0700578 if (GRPC_TRACER_ON(grpc_polling_trace)) {
579 gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_wakeup_fd", p);
580 }
581 specific_worker->kicked = true;
Craig Tillerad059f72017-10-12 22:47:05 +0000582 grpc_error *error = grpc_wakeup_fd_wakeup(&p->wakeup);
583 return error;
584 }
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700585 if (specific_worker->initialized_cv) {
Craig Tiller23adbd52017-10-02 15:29:18 -0700586 if (GRPC_TRACER_ON(grpc_polling_trace)) {
587 gpr_log(GPR_DEBUG, "PS:%p kicked_specific_via_cv", p);
588 }
589 specific_worker->kicked = true;
590 gpr_cv_signal(&specific_worker->cv);
591 return GRPC_ERROR_NONE;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700592 }
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700593 // we can get here during end_worker after removing specific_worker from the
594 // pollable list but before removing it from the pollset list
Craig Tillerad059f72017-10-12 22:47:05 +0000595 return GRPC_ERROR_NONE;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700596}
597
Craig Tillerad059f72017-10-12 22:47:05 +0000598static grpc_error *pollset_kick(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
599 grpc_pollset_worker *specific_worker) {
Craig Tillercfaa0462017-05-05 15:27:40 +0000600 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tillercd0354b2017-04-10 16:19:18 -0700601 gpr_log(GPR_DEBUG,
Craig Tiller3d073c22017-10-04 22:10:58 +0000602 "PS:%p kick %p tls_pollset=%p tls_worker=%p pollset.root_worker=%p",
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700603 pollset, specific_worker,
604 (void *)gpr_tls_get(&g_current_thread_pollset),
605 (void *)gpr_tls_get(&g_current_thread_worker),
606 pollset->root_worker);
Craig Tillercd0354b2017-04-10 16:19:18 -0700607 }
Craig Tillere24b24d2017-04-06 16:05:45 -0700608 if (specific_worker == NULL) {
Craig Tillerb72a74a2017-04-27 12:07:05 -0700609 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700610 if (pollset->root_worker == NULL) {
Craig Tillercfaa0462017-05-05 15:27:40 +0000611 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller3d073c22017-10-04 22:10:58 +0000612 gpr_log(GPR_DEBUG, "PS:%p kicked_any_without_poller", pollset);
Craig Tiller1ad94772017-04-11 16:15:19 +0000613 }
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700614 pollset->kicked_without_poller = true;
Craig Tillerbb93af62017-04-07 23:49:00 +0000615 return GRPC_ERROR_NONE;
616 } else {
Craig Tiller23adbd52017-10-02 15:29:18 -0700617 return pollset_kick_one(exec_ctx, pollset, pollset->root_worker);
Craig Tillerbb93af62017-04-07 23:49:00 +0000618 }
Craig Tillere24b24d2017-04-06 16:05:45 -0700619 } else {
Craig Tillercfaa0462017-05-05 15:27:40 +0000620 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller3d073c22017-10-04 22:10:58 +0000621 gpr_log(GPR_DEBUG, "PS:%p kicked_any_but_awake", pollset);
Craig Tiller1ad94772017-04-11 16:15:19 +0000622 }
Craig Tillere24b24d2017-04-06 16:05:45 -0700623 return GRPC_ERROR_NONE;
624 }
Craig Tillere24b24d2017-04-06 16:05:45 -0700625 } else {
Craig Tiller23adbd52017-10-02 15:29:18 -0700626 return pollset_kick_one(exec_ctx, pollset, specific_worker);
Craig Tillere24b24d2017-04-06 16:05:45 -0700627 }
Craig Tiller26017de2017-04-15 00:07:25 +0000628}
629
Craig Tiller23adbd52017-10-02 15:29:18 -0700630static grpc_error *pollset_kick_all(grpc_exec_ctx *exec_ctx,
631 grpc_pollset *pollset) {
Craig Tiller23adbd52017-10-02 15:29:18 -0700632 grpc_error *error = GRPC_ERROR_NONE;
633 const char *err_desc = "pollset_kick_all";
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700634 grpc_pollset_worker *w = pollset->root_worker;
635 if (w != NULL) {
636 do {
637 append_error(&error, pollset_kick_one(exec_ctx, pollset, w), err_desc);
638 w = w->links[PWLINK_POLLSET].next;
639 } while (w != pollset->root_worker);
640 }
Craig Tiller23adbd52017-10-02 15:29:18 -0700641 return error;
Craig Tillerdd86b692017-04-06 10:43:11 -0700642}
643
Craig Tillere24b24d2017-04-06 16:05:45 -0700644static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
Craig Tiller249de2b2017-10-02 11:49:19 -0700645 gpr_mu_init(&pollset->mu);
Craig Tiller389ea902017-10-04 17:42:49 +0000646 pollset->active_pollable = POLLABLE_REF(g_empty_pollable, "pollset");
Craig Tiller249de2b2017-10-02 11:49:19 -0700647 *mu = &pollset->mu;
Craig Tiller9f012512017-04-13 15:37:14 -0700648}
649
Craig Tiller20397792017-07-18 11:35:27 -0700650static int poll_deadline_to_millis_timeout(grpc_exec_ctx *exec_ctx,
651 grpc_millis millis) {
652 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
653 grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
654 if (delta > INT_MAX)
655 return INT_MAX;
656 else if (delta < 0)
Craig Tillerdd86b692017-04-06 10:43:11 -0700657 return 0;
Craig Tiller20397792017-07-18 11:35:27 -0700658 else
659 return (int)delta;
Craig Tillerdd86b692017-04-06 10:43:11 -0700660}
661
662static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
663 grpc_pollset *notifier) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700664 grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
Craig Tillerdd86b692017-04-06 10:43:11 -0700665
666 /* Note, it is possible that fd_become_readable might be called twice with
667 different 'notifier's when an fd becomes readable and it is in two epoll
668 sets (This can happen briefly during polling island merges). In such cases
669 it does not really matter which notifer is set as the read_notifier_pollset
670 (They would both point to the same polling island anyway) */
671 /* Use release store to match with acquire load in fd_get_read_notifier */
672 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
673}
674
675static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700676 grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
Craig Tillerdd86b692017-04-06 10:43:11 -0700677}
678
Craig Tiller249de2b2017-10-02 11:49:19 -0700679static grpc_error *fd_become_pollable(grpc_fd *fd, pollable **p) {
680 gpr_mu_lock(&fd->pollable_mu);
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700681 grpc_error *error = GRPC_ERROR_NONE;
682 static const char *err_desc = "fd_become_pollable";
Craig Tiller249de2b2017-10-02 11:49:19 -0700683 if (fd->pollable_obj == NULL) {
684 if (append_error(&error, pollable_create(PO_FD, &fd->pollable_obj),
685 err_desc)) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700686 fd->pollable_obj->owner_fd = fd;
Craig Tiller249de2b2017-10-02 11:49:19 -0700687 if (!append_error(&error, pollable_add_fd(fd->pollable_obj, fd),
688 err_desc)) {
Craig Tiller389ea902017-10-04 17:42:49 +0000689 POLLABLE_UNREF(fd->pollable_obj, "fd_pollable");
Craig Tiller249de2b2017-10-02 11:49:19 -0700690 fd->pollable_obj = NULL;
691 }
692 }
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700693 }
Craig Tiller249de2b2017-10-02 11:49:19 -0700694 if (error == GRPC_ERROR_NONE) {
695 GPR_ASSERT(fd->pollable_obj != NULL);
Craig Tiller389ea902017-10-04 17:42:49 +0000696 *p = POLLABLE_REF(fd->pollable_obj, "pollset");
Craig Tiller249de2b2017-10-02 11:49:19 -0700697 } else {
698 GPR_ASSERT(fd->pollable_obj == NULL);
699 *p = NULL;
700 }
701 gpr_mu_unlock(&fd->pollable_mu);
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700702 return error;
703}
704
Craig Tillerdd86b692017-04-06 10:43:11 -0700705/* pollset->po.mu lock must be held by the caller before calling this */
706static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
Craig Tillere24b24d2017-04-06 16:05:45 -0700707 grpc_closure *closure) {
708 GPR_ASSERT(pollset->shutdown_closure == NULL);
709 pollset->shutdown_closure = closure;
Craig Tiller23adbd52017-10-02 15:29:18 -0700710 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
Craig Tillerd9cd8f02017-04-07 16:26:02 -0700711 pollset_maybe_finish_shutdown(exec_ctx, pollset);
Craig Tillere24b24d2017-04-06 16:05:45 -0700712}
Craig Tillerdd86b692017-04-06 10:43:11 -0700713
Craig Tiller9bedddd2017-06-08 17:05:00 -0700714static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
715 grpc_pollset *pollset, bool drain) {
Craig Tiller4f07ea82017-06-08 23:56:06 +0000716 static const char *err_desc = "pollset_process_events";
Craig Tillere24b24d2017-04-06 16:05:45 -0700717 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerca4fc662017-06-09 11:13:56 -0700718 for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
719 pollset->event_cursor != pollset->event_count;
720 i++) {
Craig Tiller4f07ea82017-06-08 23:56:06 +0000721 int n = pollset->event_cursor++;
722 struct epoll_event *ev = &pollset->events[n];
723 void *data_ptr = ev->data.ptr;
724 if (1 & (intptr_t)data_ptr) {
Craig Tillercfaa0462017-05-05 15:27:40 +0000725 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller4f07ea82017-06-08 23:56:06 +0000726 gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
Craig Tillercd0354b2017-04-10 16:19:18 -0700727 }
Yash Tibrewal52778c42017-09-11 15:00:11 -0700728 append_error(&error,
729 grpc_wakeup_fd_consume_wakeup(
730 (grpc_wakeup_fd *)((~(intptr_t)1) & (intptr_t)data_ptr)),
Craig Tiller9bedddd2017-06-08 17:05:00 -0700731 err_desc);
Craig Tillere24b24d2017-04-06 16:05:45 -0700732 } else {
Craig Tiller61f96c12017-05-12 13:36:39 -0700733 grpc_fd *fd = (grpc_fd *)data_ptr;
Craig Tiller4f07ea82017-06-08 23:56:06 +0000734 bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
735 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
736 bool write_ev = (ev->events & EPOLLOUT) != 0;
Craig Tillercfaa0462017-05-05 15:27:40 +0000737 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tillere3a69332017-04-18 16:45:40 +0000738 gpr_log(GPR_DEBUG,
Craig Tiller4f07ea82017-06-08 23:56:06 +0000739 "PS:%p got fd %p: cancel=%d read=%d "
Craig Tillere3a69332017-04-18 16:45:40 +0000740 "write=%d",
Craig Tiller4f07ea82017-06-08 23:56:06 +0000741 pollset, fd, cancel, read_ev, write_ev);
Craig Tillercd0354b2017-04-10 16:19:18 -0700742 }
Craig Tiller61f96c12017-05-12 13:36:39 -0700743 if (read_ev || cancel) {
744 fd_become_readable(exec_ctx, fd, pollset);
745 }
746 if (write_ev || cancel) {
747 fd_become_writable(exec_ctx, fd);
Craig Tillere24b24d2017-04-06 16:05:45 -0700748 }
749 }
750 }
751
752 return error;
753}
754
Craig Tiller4f07ea82017-06-08 23:56:06 +0000755/* pollset_shutdown is guaranteed to be called before pollset_destroy. */
756static void pollset_destroy(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset) {
Craig Tiller389ea902017-10-04 17:42:49 +0000757 POLLABLE_UNREF(pollset->active_pollable, "pollset");
Craig Tiller249de2b2017-10-02 11:49:19 -0700758 pollset->active_pollable = NULL;
Craig Tiller9bedddd2017-06-08 17:05:00 -0700759 GRPC_LOG_IF_ERROR("pollset_process_events",
760 pollset_process_events(exec_ctx, pollset, true));
Craig Tiller4f07ea82017-06-08 23:56:06 +0000761}
762
763static grpc_error *pollset_epoll(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
Craig Tiller50448be2017-09-18 13:56:51 -0700764 pollable *p, grpc_millis deadline) {
Craig Tiller20397792017-07-18 11:35:27 -0700765 int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
Craig Tiller4f07ea82017-06-08 23:56:06 +0000766
767 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller86ffd652017-06-20 00:16:54 +0000768 char *desc = pollable_desc(p);
769 gpr_log(GPR_DEBUG, "PS:%p poll %p[%s] for %dms", pollset, p, desc, timeout);
770 gpr_free(desc);
Craig Tiller4f07ea82017-06-08 23:56:06 +0000771 }
772
773 if (timeout != 0) {
774 GRPC_SCHEDULING_START_BLOCKING_REGION;
775 }
776 int r;
777 do {
Craig Tillerb4bb1cd2017-07-20 14:18:17 -0700778 GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
Craig Tiller4f07ea82017-06-08 23:56:06 +0000779 r = epoll_wait(p->epfd, pollset->events, MAX_EPOLL_EVENTS, timeout);
780 } while (r < 0 && errno == EINTR);
781 if (timeout != 0) {
Craig Tiller781e91a2017-07-17 16:21:00 -0700782 GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
Craig Tiller4f07ea82017-06-08 23:56:06 +0000783 }
784
785 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
786
787 if (GRPC_TRACER_ON(grpc_polling_trace)) {
788 gpr_log(GPR_DEBUG, "PS:%p poll %p got %d events", pollset, p, r);
789 }
790
791 pollset->event_cursor = 0;
792 pollset->event_count = r;
793
794 return GRPC_ERROR_NONE;
795}
796
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700797/* Return true if first in list */
Craig Tiller23adbd52017-10-02 15:29:18 -0700798static bool worker_insert(grpc_pollset_worker **root_worker,
799 grpc_pollset_worker *worker, pwlinks link) {
800 if (*root_worker == NULL) {
801 *root_worker = worker;
802 worker->links[link].next = worker->links[link].prev = worker;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700803 return true;
804 } else {
Craig Tiller23adbd52017-10-02 15:29:18 -0700805 worker->links[link].next = *root_worker;
806 worker->links[link].prev = worker->links[link].next->links[link].prev;
807 worker->links[link].next->links[link].prev = worker;
808 worker->links[link].prev->links[link].next = worker;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700809 return false;
810 }
811}
812
Craig Tiller249de2b2017-10-02 11:49:19 -0700813/* returns the new root IFF the root changed */
Craig Tiller23adbd52017-10-02 15:29:18 -0700814typedef enum { WRR_NEW_ROOT, WRR_EMPTIED, WRR_REMOVED } worker_remove_result;
815
816static worker_remove_result worker_remove(grpc_pollset_worker **root_worker,
817 grpc_pollset_worker *worker,
818 pwlinks link) {
819 if (worker == *root_worker) {
820 if (worker == worker->links[link].next) {
821 *root_worker = NULL;
822 return WRR_EMPTIED;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700823 } else {
Craig Tiller23adbd52017-10-02 15:29:18 -0700824 *root_worker = worker->links[link].next;
825 worker->links[link].prev->links[link].next = worker->links[link].next;
826 worker->links[link].next->links[link].prev = worker->links[link].prev;
827 return WRR_NEW_ROOT;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700828 }
829 } else {
Craig Tiller23adbd52017-10-02 15:29:18 -0700830 worker->links[link].prev->links[link].next = worker->links[link].next;
831 worker->links[link].next->links[link].prev = worker->links[link].prev;
832 return WRR_REMOVED;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700833 }
834}
835
Craig Tillere24b24d2017-04-06 16:05:45 -0700836/* Return true if this thread should poll */
Craig Tiller20397792017-07-18 11:35:27 -0700837static bool begin_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
838 grpc_pollset_worker *worker,
839 grpc_pollset_worker **worker_hdl,
840 grpc_millis deadline) {
Craig Tillere3a69332017-04-18 16:45:40 +0000841 bool do_poll = true;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700842 if (worker_hdl != NULL) *worker_hdl = worker;
Craig Tiller9f012512017-04-13 15:37:14 -0700843 worker->initialized_cv = false;
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700844 worker->kicked = false;
845 worker->pollset = pollset;
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700846 worker->pollable_obj =
847 POLLABLE_REF(pollset->active_pollable, "pollset_worker");
Craig Tiller23adbd52017-10-02 15:29:18 -0700848 worker_insert(&pollset->root_worker, worker, PWLINK_POLLSET);
Craig Tiller249de2b2017-10-02 11:49:19 -0700849 gpr_mu_lock(&worker->pollable_obj->mu);
Craig Tiller23adbd52017-10-02 15:29:18 -0700850 if (!worker_insert(&worker->pollable_obj->root_worker, worker,
851 PWLINK_POLLABLE)) {
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700852 worker->initialized_cv = true;
853 gpr_cv_init(&worker->cv);
Craig Tillerad059f72017-10-12 22:47:05 +0000854 gpr_mu_unlock(&pollset->mu);
Craig Tiller6c8383a2017-05-05 16:54:42 +0000855 if (GRPC_TRACER_ON(grpc_polling_trace) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700856 worker->pollable_obj->root_worker != worker) {
Craig Tillere3a69332017-04-18 16:45:40 +0000857 gpr_log(GPR_DEBUG, "PS:%p wait %p w=%p for %dms", pollset,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700858 worker->pollable_obj, worker,
Craig Tiller20397792017-07-18 11:35:27 -0700859 poll_deadline_to_millis_timeout(exec_ctx, deadline));
Craig Tillere3a69332017-04-18 16:45:40 +0000860 }
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700861 while (do_poll && worker->pollable_obj->root_worker != worker) {
Craig Tiller6b9a99c2017-10-12 00:27:00 +0000862 if (gpr_cv_wait(&worker->cv, &worker->pollable_obj->mu,
Craig Tiller20397792017-07-18 11:35:27 -0700863 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME))) {
Craig Tillercfaa0462017-05-05 15:27:40 +0000864 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tillere3a69332017-04-18 16:45:40 +0000865 gpr_log(GPR_DEBUG, "PS:%p timeout_wait %p w=%p", pollset,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700866 worker->pollable_obj, worker);
Craig Tillere3a69332017-04-18 16:45:40 +0000867 }
868 do_poll = false;
869 } else if (worker->kicked) {
Craig Tillercfaa0462017-05-05 15:27:40 +0000870 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700871 gpr_log(GPR_DEBUG, "PS:%p wakeup %p w=%p", pollset,
872 worker->pollable_obj, worker);
Craig Tillere3a69332017-04-18 16:45:40 +0000873 }
874 do_poll = false;
Craig Tillercfaa0462017-05-05 15:27:40 +0000875 } else if (GRPC_TRACER_ON(grpc_polling_trace) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700876 worker->pollable_obj->root_worker != worker) {
Craig Tillere3a69332017-04-18 16:45:40 +0000877 gpr_log(GPR_DEBUG, "PS:%p spurious_wakeup %p w=%p", pollset,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700878 worker->pollable_obj, worker);
Craig Tillere24b24d2017-04-06 16:05:45 -0700879 }
880 }
Craig Tiller20397792017-07-18 11:35:27 -0700881 grpc_exec_ctx_invalidate_now(exec_ctx);
Craig Tillerad059f72017-10-12 22:47:05 +0000882 } else {
883 gpr_mu_unlock(&pollset->mu);
Craig Tillere24b24d2017-04-06 16:05:45 -0700884 }
Craig Tiller249de2b2017-10-02 11:49:19 -0700885 gpr_mu_unlock(&worker->pollable_obj->mu);
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700886
Craig Tillerad059f72017-10-12 22:47:05 +0000887 return do_poll;
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700888 // && pollset->shutdown_closure == NULL && pollset->active_pollable ==
889 // worker->pollable_obj;
Craig Tillere24b24d2017-04-06 16:05:45 -0700890}
891
Craig Tillerf8401102017-04-17 09:47:28 -0700892static void end_worker(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
893 grpc_pollset_worker *worker,
Craig Tillere24b24d2017-04-06 16:05:45 -0700894 grpc_pollset_worker **worker_hdl) {
Craig Tiller249de2b2017-10-02 11:49:19 -0700895 gpr_mu_lock(&worker->pollable_obj->mu);
Craig Tiller23adbd52017-10-02 15:29:18 -0700896 if (worker_remove(&worker->pollable_obj->root_worker, worker,
897 PWLINK_POLLABLE) == WRR_NEW_ROOT) {
898 grpc_pollset_worker *new_root = worker->pollable_obj->root_worker;
Craig Tiller249de2b2017-10-02 11:49:19 -0700899 GPR_ASSERT(new_root->initialized_cv);
900 gpr_cv_signal(&new_root->cv);
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700901 }
Craig Tillerad059f72017-10-12 22:47:05 +0000902 gpr_mu_unlock(&worker->pollable_obj->mu);
903 POLLABLE_UNREF(worker->pollable_obj, "pollset_worker");
904 gpr_mu_lock(&pollset->mu);
Craig Tiller1e8c2ab2017-10-12 15:50:13 -0700905 if (worker_remove(&pollset->root_worker, worker, PWLINK_POLLSET) ==
906 WRR_EMPTIED) {
Craig Tillerad059f72017-10-12 22:47:05 +0000907 pollset_maybe_finish_shutdown(exec_ctx, pollset);
908 }
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700909 if (worker->initialized_cv) {
910 gpr_cv_destroy(&worker->cv);
911 }
Craig Tillere24b24d2017-04-06 16:05:45 -0700912}
913
Craig Tillerad059f72017-10-12 22:47:05 +0000914static long gettid(void) { return syscall(__NR_gettid); }
915
Craig Tiller8fc1ca12017-04-07 13:01:48 -0700916/* pollset->po.mu lock must be held by the caller before calling this.
917 The function pollset_work() may temporarily release the lock (pollset->po.mu)
Craig Tillerdd86b692017-04-06 10:43:11 -0700918 during the course of its execution but it will always re-acquire the lock and
919 ensure that it is held by the time the function returns */
920static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
921 grpc_pollset_worker **worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700922 grpc_millis deadline) {
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000923#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
Craig Tiller513daab2017-10-06 09:18:34 -0700924 grpc_pollset_worker *worker =
925 (grpc_pollset_worker *)gpr_malloc(sizeof(*worker));
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000926#define WORKER_PTR (worker)
927#else
Craig Tillere24b24d2017-04-06 16:05:45 -0700928 grpc_pollset_worker worker;
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000929#define WORKER_PTR (&worker)
930#endif
Craig Tillerad059f72017-10-12 22:47:05 +0000931 WORKER_PTR->originator = gettid();
Craig Tiller249de2b2017-10-02 11:49:19 -0700932 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tillerbfec10f2017-10-11 17:31:06 -0700933 gpr_log(GPR_DEBUG, "PS:%p work hdl=%p worker=%p now=%" PRIdPTR
934 " deadline=%" PRIdPTR " kwp=%d pollable=%p",
935 pollset, worker_hdl, WORKER_PTR, grpc_exec_ctx_now(exec_ctx),
936 deadline, pollset->kicked_without_poller, pollset->active_pollable);
Craig Tillercd0354b2017-04-10 16:19:18 -0700937 }
Craig Tillerd1d7fdd2017-04-14 16:16:24 -0700938 static const char *err_desc = "pollset_work";
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000939 grpc_error *error = GRPC_ERROR_NONE;
Craig Tillerbb93af62017-04-07 23:49:00 +0000940 if (pollset->kicked_without_poller) {
941 pollset->kicked_without_poller = false;
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000942 } else {
Craig Tiller6b9a99c2017-10-12 00:27:00 +0000943 if (begin_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl, deadline)) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700944 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
945 gpr_tls_set(&g_current_thread_worker, (intptr_t)WORKER_PTR);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700946 if (pollset->event_cursor == pollset->event_count) {
Craig Tillerbfec10f2017-10-11 17:31:06 -0700947 append_error(&error, pollset_epoll(exec_ctx, pollset,
948 WORKER_PTR->pollable_obj, deadline),
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700949 err_desc);
950 }
951 append_error(&error, pollset_process_events(exec_ctx, pollset, false),
Craig Tiller4f07ea82017-06-08 23:56:06 +0000952 err_desc);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700953 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700954 gpr_tls_set(&g_current_thread_pollset, 0);
955 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller4f07ea82017-06-08 23:56:06 +0000956 }
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700957 end_worker(exec_ctx, pollset, WORKER_PTR, worker_hdl);
Craig Tiller90a9d7d2017-10-04 21:24:03 +0000958 }
959#ifdef GRPC_EPOLLEX_CREATE_WORKERS_ON_HEAP
960 gpr_free(worker);
961#endif
Craig Tillere24b24d2017-04-06 16:05:45 -0700962 return error;
Craig Tillerdd86b692017-04-06 10:43:11 -0700963}
964
Craig Tiller23adbd52017-10-02 15:29:18 -0700965static grpc_error *pollset_transition_pollable_from_empty_to_fd_locked(
966 grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) {
967 static const char *err_desc = "pollset_transition_pollable_from_empty_to_fd";
968 grpc_error *error = GRPC_ERROR_NONE;
969 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700970 gpr_log(GPR_DEBUG,
971 "PS:%p add fd %p (%d); transition pollable from empty to fd",
Craig Tillerc5ce0572017-10-03 23:09:41 +0000972 pollset, fd, fd->fd);
Craig Tiller23adbd52017-10-02 15:29:18 -0700973 }
974 append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
Craig Tiller389ea902017-10-04 17:42:49 +0000975 POLLABLE_UNREF(pollset->active_pollable, "pollset");
Craig Tiller23adbd52017-10-02 15:29:18 -0700976 append_error(&error, fd_become_pollable(fd, &pollset->active_pollable),
977 err_desc);
978 return error;
979}
980
981static grpc_error *pollset_transition_pollable_from_fd_to_multi_locked(
982 grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *and_add_fd) {
983 static const char *err_desc = "pollset_transition_pollable_from_fd_to_multi";
984 grpc_error *error = GRPC_ERROR_NONE;
985 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -0700986 gpr_log(
987 GPR_DEBUG,
988 "PS:%p add fd %p (%d); transition pollable from fd %p to multipoller",
989 pollset, and_add_fd, and_add_fd ? and_add_fd->fd : -1,
990 pollset->active_pollable->owner_fd);
Craig Tiller23adbd52017-10-02 15:29:18 -0700991 }
992 append_error(&error, pollset_kick_all(exec_ctx, pollset), err_desc);
Craig Tiller23adbd52017-10-02 15:29:18 -0700993 grpc_fd *initial_fd = pollset->active_pollable->owner_fd;
Craig Tiller389ea902017-10-04 17:42:49 +0000994 POLLABLE_UNREF(pollset->active_pollable, "pollset");
995 pollset->active_pollable = NULL;
Craig Tiller23adbd52017-10-02 15:29:18 -0700996 if (append_error(&error, pollable_create(PO_MULTI, &pollset->active_pollable),
997 err_desc)) {
998 append_error(&error, pollable_add_fd(pollset->active_pollable, initial_fd),
999 err_desc);
1000 if (and_add_fd != NULL) {
1001 append_error(&error,
1002 pollable_add_fd(pollset->active_pollable, and_add_fd),
1003 err_desc);
1004 }
1005 }
1006 return error;
1007}
1008
Craig Tiller50480b22017-04-17 16:34:52 +00001009/* expects pollsets locked, flag whether fd is locked or not */
Craig Tillerf8401102017-04-17 09:47:28 -07001010static grpc_error *pollset_add_fd_locked(grpc_exec_ctx *exec_ctx,
Craig Tiller249de2b2017-10-02 11:49:19 -07001011 grpc_pollset *pollset, grpc_fd *fd) {
Craig Tiller50480b22017-04-17 16:34:52 +00001012 grpc_error *error = GRPC_ERROR_NONE;
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001013 pollable *po_at_start =
1014 POLLABLE_REF(pollset->active_pollable, "pollset_add_fd");
Craig Tiller249de2b2017-10-02 11:49:19 -07001015 switch (pollset->active_pollable->type) {
1016 case PO_EMPTY:
1017 /* empty pollable --> single fd pollable */
Craig Tiller23adbd52017-10-02 15:29:18 -07001018 error = pollset_transition_pollable_from_empty_to_fd_locked(exec_ctx,
1019 pollset, fd);
Craig Tiller249de2b2017-10-02 11:49:19 -07001020 break;
1021 case PO_FD:
Craig Tiller4fd6a412017-10-04 22:41:13 +00001022 gpr_mu_lock(&po_at_start->owner_fd->orphan_mu);
Craig Tillerd8d9f572017-10-04 16:10:23 -07001023 if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) &
1024 1) == 0) {
1025 error = pollset_transition_pollable_from_empty_to_fd_locked(
1026 exec_ctx, pollset, fd);
Craig Tiller4fd6a412017-10-04 22:41:13 +00001027 } else {
1028 /* fd --> multipoller */
Craig Tillerd8d9f572017-10-04 16:10:23 -07001029 error = pollset_transition_pollable_from_fd_to_multi_locked(
1030 exec_ctx, pollset, fd);
Craig Tiller4fd6a412017-10-04 22:41:13 +00001031 }
1032 gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
Craig Tiller249de2b2017-10-02 11:49:19 -07001033 break;
1034 case PO_MULTI:
Craig Tiller23adbd52017-10-02 15:29:18 -07001035 error = pollable_add_fd(pollset->active_pollable, fd);
Craig Tiller249de2b2017-10-02 11:49:19 -07001036 break;
1037 }
1038 if (error != GRPC_ERROR_NONE) {
Craig Tiller389ea902017-10-04 17:42:49 +00001039 POLLABLE_UNREF(pollset->active_pollable, "pollset");
Craig Tiller249de2b2017-10-02 11:49:19 -07001040 pollset->active_pollable = po_at_start;
1041 } else {
Craig Tiller389ea902017-10-04 17:42:49 +00001042 POLLABLE_UNREF(po_at_start, "pollset_add_fd");
Craig Tillere24b24d2017-04-06 16:05:45 -07001043 }
Craig Tiller50480b22017-04-17 16:34:52 +00001044 return error;
1045}
Craig Tillerd1d7fdd2017-04-14 16:16:24 -07001046
Craig Tillerad059f72017-10-12 22:47:05 +00001047static grpc_error *pollset_as_multipollable_locked(grpc_exec_ctx *exec_ctx,
Craig Tiller1e8c2ab2017-10-12 15:50:13 -07001048 grpc_pollset *pollset,
1049 pollable **pollable_obj) {
Craig Tiller23adbd52017-10-02 15:29:18 -07001050 grpc_error *error = GRPC_ERROR_NONE;
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001051 pollable *po_at_start =
1052 POLLABLE_REF(pollset->active_pollable, "pollset_as_multipollable");
Craig Tiller23adbd52017-10-02 15:29:18 -07001053 switch (pollset->active_pollable->type) {
1054 case PO_EMPTY:
Craig Tiller389ea902017-10-04 17:42:49 +00001055 POLLABLE_UNREF(pollset->active_pollable, "pollset");
Craig Tiller23adbd52017-10-02 15:29:18 -07001056 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1057 break;
1058 case PO_FD:
Craig Tiller4fd6a412017-10-04 22:41:13 +00001059 gpr_mu_lock(&po_at_start->owner_fd->orphan_mu);
Craig Tillerd8d9f572017-10-04 16:10:23 -07001060 if ((gpr_atm_no_barrier_load(&pollset->active_pollable->owner_fd->refst) &
1061 1) == 0) {
Craig Tiller4fd6a412017-10-04 22:41:13 +00001062 POLLABLE_UNREF(pollset->active_pollable, "pollset");
1063 error = pollable_create(PO_MULTI, &pollset->active_pollable);
1064 } else {
1065 error = pollset_transition_pollable_from_fd_to_multi_locked(
1066 exec_ctx, pollset, NULL);
1067 }
1068 gpr_mu_unlock(&po_at_start->owner_fd->orphan_mu);
Craig Tiller23adbd52017-10-02 15:29:18 -07001069 break;
1070 case PO_MULTI:
1071 break;
1072 }
1073 if (error != GRPC_ERROR_NONE) {
Craig Tiller389ea902017-10-04 17:42:49 +00001074 POLLABLE_UNREF(pollset->active_pollable, "pollset");
Craig Tiller23adbd52017-10-02 15:29:18 -07001075 pollset->active_pollable = po_at_start;
Craig Tiller389ea902017-10-04 17:42:49 +00001076 *pollable_obj = NULL;
Craig Tiller23adbd52017-10-02 15:29:18 -07001077 } else {
Craig Tiller389ea902017-10-04 17:42:49 +00001078 *pollable_obj = POLLABLE_REF(pollset->active_pollable, "pollset_set");
1079 POLLABLE_UNREF(po_at_start, "pollset_as_multipollable");
Craig Tiller23adbd52017-10-02 15:29:18 -07001080 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001081 return error;
1082}
1083
Craig Tiller50480b22017-04-17 16:34:52 +00001084static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
1085 grpc_fd *fd) {
Craig Tiller249de2b2017-10-02 11:49:19 -07001086 gpr_mu_lock(&pollset->mu);
1087 grpc_error *error = pollset_add_fd_locked(exec_ctx, pollset, fd);
1088 gpr_mu_unlock(&pollset->mu);
Craig Tillere24b24d2017-04-06 16:05:45 -07001089 GRPC_LOG_IF_ERROR("pollset_add_fd", error);
Craig Tillerdd86b692017-04-06 10:43:11 -07001090}
1091
1092/*******************************************************************************
1093 * Pollset-set Definitions
1094 */
1095
Craig Tiller249de2b2017-10-02 11:49:19 -07001096static grpc_pollset_set *pss_lock_adam(grpc_pollset_set *pss) {
1097 gpr_mu_lock(&pss->mu);
1098 while (pss->parent != NULL) {
Craig Tiller249de2b2017-10-02 11:49:19 -07001099 gpr_mu_unlock(&pss->mu);
1100 pss = pss->parent;
Craig Tillerc5ce0572017-10-03 23:09:41 +00001101 gpr_mu_lock(&pss->mu);
Craig Tiller249de2b2017-10-02 11:49:19 -07001102 }
1103 return pss;
1104}
1105
Craig Tillerdd86b692017-04-06 10:43:11 -07001106static grpc_pollset_set *pollset_set_create(void) {
Craig Tiller23adbd52017-10-02 15:29:18 -07001107 grpc_pollset_set *pss = (grpc_pollset_set *)gpr_zalloc(sizeof(*pss));
Craig Tiller249de2b2017-10-02 11:49:19 -07001108 gpr_mu_init(&pss->mu);
1109 gpr_ref_init(&pss->refs, 1);
Craig Tillerdd86b692017-04-06 10:43:11 -07001110 return pss;
1111}
1112
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001113static void pollset_set_unref(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss) {
Craig Tiller389ea902017-10-04 17:42:49 +00001114 if (pss == NULL) return;
1115 if (!gpr_unref(&pss->refs)) return;
1116 pollset_set_unref(exec_ctx, pss->parent);
1117 gpr_mu_destroy(&pss->mu);
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001118 for (size_t i = 0; i < pss->pollset_count; i++) {
Craig Tiller389ea902017-10-04 17:42:49 +00001119 POLLABLE_UNREF(pss->pollsets[i], "pollset_set");
1120 }
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001121 for (size_t i = 0; i < pss->fd_count; i++) {
Craig Tiller389ea902017-10-04 17:42:49 +00001122 UNREF_BY(exec_ctx, pss->fds[i], 2, "pollset_set");
1123 }
1124 gpr_free(pss->pollsets);
1125 gpr_free(pss->fds);
1126 gpr_free(pss);
1127}
Craig Tillerdd86b692017-04-06 10:43:11 -07001128
1129static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
1130 grpc_fd *fd) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001131 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1132 gpr_log(GPR_DEBUG, "PSS:%p: add fd %p (%d)", pss, fd, fd->fd);
1133 }
Craig Tiller249de2b2017-10-02 11:49:19 -07001134 grpc_error *error = GRPC_ERROR_NONE;
1135 static const char *err_desc = "pollset_set_add_fd";
1136 pss = pss_lock_adam(pss);
Craig Tiller23adbd52017-10-02 15:29:18 -07001137 for (size_t i = 0; i < pss->pollset_count; i++) {
1138 append_error(&error, pollable_add_fd(pss->pollsets[i], fd), err_desc);
Craig Tiller249de2b2017-10-02 11:49:19 -07001139 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001140 if (pss->fd_count == pss->fd_capacity) {
1141 pss->fd_capacity = GPR_MAX(pss->fd_capacity * 2, 8);
Craig Tiller513daab2017-10-06 09:18:34 -07001142 pss->fds =
1143 (grpc_fd **)gpr_realloc(pss->fds, pss->fd_capacity * sizeof(*pss->fds));
Craig Tiller23adbd52017-10-02 15:29:18 -07001144 }
1145 REF_BY(fd, 2, "pollset_set");
1146 pss->fds[pss->fd_count++] = fd;
Craig Tiller249de2b2017-10-02 11:49:19 -07001147 gpr_mu_unlock(&pss->mu);
1148
Craig Tiller23adbd52017-10-02 15:29:18 -07001149 GRPC_LOG_IF_ERROR(err_desc, error);
Craig Tillerdd86b692017-04-06 10:43:11 -07001150}
1151
1152static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pss,
Craig Tiller23adbd52017-10-02 15:29:18 -07001153 grpc_fd *fd) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001154 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1155 gpr_log(GPR_DEBUG, "PSS:%p: del fd %p", pss, fd);
1156 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001157 pss = pss_lock_adam(pss);
1158 size_t i;
1159 for (i = 0; i < pss->fd_count; i++) {
1160 if (pss->fds[i] == fd) {
1161 UNREF_BY(exec_ctx, fd, 2, "pollset_set");
1162 break;
1163 }
1164 }
1165 GPR_ASSERT(i != pss->fd_count);
1166 for (; i < pss->fd_count - 1; i++) {
1167 pss->fds[i] = pss->fds[i + 1];
1168 }
1169 pss->fd_count--;
1170 gpr_mu_unlock(&pss->mu);
1171}
Craig Tillerdd86b692017-04-06 10:43:11 -07001172
Craig Tillerdd86b692017-04-06 10:43:11 -07001173static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
Craig Tiller23adbd52017-10-02 15:29:18 -07001174 grpc_pollset_set *pss, grpc_pollset *ps) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001175 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1176 gpr_log(GPR_DEBUG, "PSS:%p: del pollset %p", pss, ps);
1177 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001178 pss = pss_lock_adam(pss);
1179 size_t i;
1180 for (i = 0; i < pss->pollset_count; i++) {
1181 if (pss->pollsets[i] == ps->active_pollable) {
Craig Tiller389ea902017-10-04 17:42:49 +00001182 POLLABLE_UNREF(pss->pollsets[i], "pollset_set");
Craig Tiller23adbd52017-10-02 15:29:18 -07001183 break;
1184 }
1185 }
1186 GPR_ASSERT(i != pss->pollset_count);
1187 for (; i < pss->pollset_count - 1; i++) {
1188 pss->pollsets[i] = pss->pollsets[i + 1];
1189 }
1190 pss->pollset_count--;
1191 gpr_mu_unlock(&pss->mu);
Craig Tillerad059f72017-10-12 22:47:05 +00001192 gpr_mu_lock(&ps->mu);
1193 if (0 == --ps->containing_pollset_set_count) {
1194 pollset_maybe_finish_shutdown(exec_ctx, ps);
1195 }
1196 gpr_mu_unlock(&ps->mu);
Craig Tiller23adbd52017-10-02 15:29:18 -07001197}
1198
Craig Tiller4fd6a412017-10-04 22:41:13 +00001199// add all fds to pollables, and output a new array of unorphaned out_fds
Craig Tiller23adbd52017-10-02 15:29:18 -07001200static grpc_error *add_fds_to_pollables(grpc_exec_ctx *exec_ctx, grpc_fd **fds,
1201 size_t fd_count, pollable **pollables,
1202 size_t pollable_count,
Craig Tillerd8d9f572017-10-04 16:10:23 -07001203 const char *err_desc, grpc_fd **out_fds,
1204 size_t *out_fd_count) {
Craig Tiller23adbd52017-10-02 15:29:18 -07001205 grpc_error *error = GRPC_ERROR_NONE;
1206 for (size_t i = 0; i < fd_count; i++) {
Craig Tiller4fd6a412017-10-04 22:41:13 +00001207 gpr_mu_lock(&fds[i]->orphan_mu);
1208 if ((gpr_atm_no_barrier_load(&fds[i]->refst) & 1) == 0) {
1209 gpr_mu_unlock(&fds[i]->orphan_mu);
1210 UNREF_BY(exec_ctx, fds[i], 2, "pollset_set");
1211 } else {
1212 for (size_t j = 0; j < pollable_count; j++) {
1213 append_error(&error, pollable_add_fd(pollables[j], fds[i]), err_desc);
1214 }
1215 gpr_mu_unlock(&fds[i]->orphan_mu);
1216 out_fds[(*out_fd_count)++] = fds[i];
Craig Tiller23adbd52017-10-02 15:29:18 -07001217 }
1218 }
1219 return error;
1220}
Craig Tillerdd86b692017-04-06 10:43:11 -07001221
Craig Tiller79c4f1f2017-10-12 01:12:40 +00001222static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
1223 grpc_pollset_set *pss, grpc_pollset *ps) {
1224 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1225 gpr_log(GPR_DEBUG, "PSS:%p: add pollset %p", pss, ps);
1226 }
1227 grpc_error *error = GRPC_ERROR_NONE;
1228 static const char *err_desc = "pollset_set_add_pollset";
1229 pollable *pollable_obj = NULL;
Craig Tiller1e8c2ab2017-10-12 15:50:13 -07001230 gpr_mu_lock(&ps->mu);
1231 if (!GRPC_LOG_IF_ERROR(err_desc, pollset_as_multipollable_locked(
1232 exec_ctx, ps, &pollable_obj))) {
Craig Tiller79c4f1f2017-10-12 01:12:40 +00001233 GPR_ASSERT(pollable_obj == NULL);
Craig Tiller1e8c2ab2017-10-12 15:50:13 -07001234 gpr_mu_unlock(&ps->mu);
Craig Tiller79c4f1f2017-10-12 01:12:40 +00001235 return;
1236 }
Craig Tiller1e8c2ab2017-10-12 15:50:13 -07001237 ps->containing_pollset_set_count++;
1238 gpr_mu_unlock(&ps->mu);
Craig Tiller79c4f1f2017-10-12 01:12:40 +00001239 pss = pss_lock_adam(pss);
1240 size_t initial_fd_count = pss->fd_count;
1241 pss->fd_count = 0;
Craig Tiller3ca91152017-10-12 09:09:54 -07001242 append_error(&error, add_fds_to_pollables(exec_ctx, pss->fds,
1243 initial_fd_count, &pollable_obj, 1,
1244 err_desc, pss->fds, &pss->fd_count),
Craig Tiller79c4f1f2017-10-12 01:12:40 +00001245 err_desc);
1246 if (pss->pollset_count == pss->pollset_capacity) {
1247 pss->pollset_capacity = GPR_MAX(pss->pollset_capacity * 2, 8);
1248 pss->pollsets = (pollable **)gpr_realloc(
1249 pss->pollsets, pss->pollset_capacity * sizeof(*pss->pollsets));
1250 }
1251 pss->pollsets[pss->pollset_count++] = pollable_obj;
1252 gpr_mu_unlock(&pss->mu);
1253
1254 GRPC_LOG_IF_ERROR(err_desc, error);
1255}
1256
Craig Tillerdd86b692017-04-06 10:43:11 -07001257static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
Craig Tiller23adbd52017-10-02 15:29:18 -07001258 grpc_pollset_set *a,
1259 grpc_pollset_set *b) {
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001260 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1261 gpr_log(GPR_DEBUG, "PSS: merge (%p, %p)", a, b);
1262 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001263 grpc_error *error = GRPC_ERROR_NONE;
1264 static const char *err_desc = "pollset_set_add_fd";
1265 for (;;) {
1266 if (a == b) {
1267 // pollset ancestors are the same: nothing to do
1268 return;
1269 }
1270 if (a > b) {
1271 GPR_SWAP(grpc_pollset_set *, a, b);
1272 }
Craig Tillerc5ce0572017-10-03 23:09:41 +00001273 gpr_mu *a_mu = &a->mu;
1274 gpr_mu *b_mu = &b->mu;
1275 gpr_mu_lock(a_mu);
1276 gpr_mu_lock(b_mu);
Craig Tiller23adbd52017-10-02 15:29:18 -07001277 if (a->parent != NULL) {
1278 a = a->parent;
1279 } else if (b->parent != NULL) {
1280 b = b->parent;
1281 } else {
1282 break; // exit loop, both pollsets locked
1283 }
Craig Tillerc5ce0572017-10-03 23:09:41 +00001284 gpr_mu_unlock(a_mu);
1285 gpr_mu_unlock(b_mu);
Craig Tiller23adbd52017-10-02 15:29:18 -07001286 }
1287 // try to do the least copying possible
1288 // TODO(ctiller): there's probably a better heuristic here
1289 const size_t a_size = a->fd_count + a->pollset_count;
1290 const size_t b_size = b->fd_count + b->pollset_count;
1291 if (b_size > a_size) {
1292 GPR_SWAP(grpc_pollset_set *, a, b);
1293 }
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001294 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1295 gpr_log(GPR_DEBUG, "PSS: parent %p to %p", b, a);
1296 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001297 gpr_ref(&a->refs);
1298 b->parent = a;
Craig Tiller23adbd52017-10-02 15:29:18 -07001299 if (a->fd_capacity < a->fd_count + b->fd_count) {
1300 a->fd_capacity = GPR_MAX(2 * a->fd_capacity, a->fd_count + b->fd_count);
Craig Tiller513daab2017-10-06 09:18:34 -07001301 a->fds = (grpc_fd **)gpr_realloc(a->fds, a->fd_capacity * sizeof(*a->fds));
Craig Tiller23adbd52017-10-02 15:29:18 -07001302 }
Craig Tiller4fd6a412017-10-04 22:41:13 +00001303 size_t initial_a_fd_count = a->fd_count;
1304 a->fd_count = 0;
Craig Tillerd8d9f572017-10-04 16:10:23 -07001305 append_error(&error, add_fds_to_pollables(
1306 exec_ctx, a->fds, initial_a_fd_count, b->pollsets,
1307 b->pollset_count, "merge_a2b", a->fds, &a->fd_count),
Craig Tiller4fd6a412017-10-04 22:41:13 +00001308 err_desc);
Craig Tillerd8d9f572017-10-04 16:10:23 -07001309 append_error(&error, add_fds_to_pollables(exec_ctx, b->fds, b->fd_count,
1310 a->pollsets, a->pollset_count,
1311 "merge_b2a", a->fds, &a->fd_count),
Craig Tiller4fd6a412017-10-04 22:41:13 +00001312 err_desc);
Craig Tiller23adbd52017-10-02 15:29:18 -07001313 if (a->pollset_capacity < a->pollset_count + b->pollset_count) {
1314 a->pollset_capacity =
1315 GPR_MAX(2 * a->pollset_capacity, a->pollset_count + b->pollset_count);
Craig Tiller513daab2017-10-06 09:18:34 -07001316 a->pollsets = (pollable **)gpr_realloc(
1317 a->pollsets, a->pollset_capacity * sizeof(*a->pollsets));
Craig Tiller23adbd52017-10-02 15:29:18 -07001318 }
Craig Tiller23adbd52017-10-02 15:29:18 -07001319 memcpy(a->pollsets + a->pollset_count, b->pollsets,
1320 b->pollset_count * sizeof(*b->pollsets));
Craig Tiller23adbd52017-10-02 15:29:18 -07001321 a->pollset_count += b->pollset_count;
1322 gpr_free(b->fds);
1323 gpr_free(b->pollsets);
Craig Tiller90a9d7d2017-10-04 21:24:03 +00001324 b->fds = NULL;
1325 b->pollsets = NULL;
Craig Tiller23adbd52017-10-02 15:29:18 -07001326 b->fd_count = b->fd_capacity = b->pollset_count = b->pollset_capacity = 0;
1327 gpr_mu_unlock(&a->mu);
1328 gpr_mu_unlock(&b->mu);
1329}
Craig Tillerdd86b692017-04-06 10:43:11 -07001330
1331static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
1332 grpc_pollset_set *bag,
Craig Tillerf081d892017-04-07 14:04:42 -07001333 grpc_pollset_set *item) {}
Craig Tillerdd86b692017-04-06 10:43:11 -07001334
1335/*******************************************************************************
1336 * Event engine binding
1337 */
1338
1339static void shutdown_engine(void) {
1340 fd_global_shutdown();
1341 pollset_global_shutdown();
1342}
1343
1344static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001345 sizeof(grpc_pollset),
Craig Tillerdd86b692017-04-06 10:43:11 -07001346
Yash Tibrewal533d1182017-09-18 10:48:22 -07001347 fd_create,
1348 fd_wrapped_fd,
1349 fd_orphan,
1350 fd_shutdown,
1351 fd_notify_on_read,
1352 fd_notify_on_write,
1353 fd_is_shutdown,
1354 fd_get_read_notifier_pollset,
Craig Tillerdd86b692017-04-06 10:43:11 -07001355
Yash Tibrewal533d1182017-09-18 10:48:22 -07001356 pollset_init,
1357 pollset_shutdown,
1358 pollset_destroy,
1359 pollset_work,
1360 pollset_kick,
1361 pollset_add_fd,
Craig Tillerdd86b692017-04-06 10:43:11 -07001362
Yash Tibrewal533d1182017-09-18 10:48:22 -07001363 pollset_set_create,
Craig Tiller29a9c3a2017-10-04 15:15:04 -07001364 pollset_set_unref, // destroy ==> unref 1 public ref
Yash Tibrewal533d1182017-09-18 10:48:22 -07001365 pollset_set_add_pollset,
1366 pollset_set_del_pollset,
1367 pollset_set_add_pollset_set,
1368 pollset_set_del_pollset_set,
1369 pollset_set_add_fd,
1370 pollset_set_del_fd,
Craig Tillerdd86b692017-04-06 10:43:11 -07001371
Yash Tibrewal533d1182017-09-18 10:48:22 -07001372 shutdown_engine,
Craig Tillerdd86b692017-04-06 10:43:11 -07001373};
1374
Craig Tiller6c8383a2017-05-05 16:54:42 +00001375const grpc_event_engine_vtable *grpc_init_epollex_linux(
1376 bool explicitly_requested) {
Craig Tillerdd86b692017-04-06 10:43:11 -07001377 if (!grpc_has_wakeup_fd()) {
1378 return NULL;
1379 }
1380
Craig Tiller819cd882017-04-25 13:18:22 -07001381 if (!grpc_is_epollexclusive_available()) {
Craig Tillerdd86b692017-04-06 10:43:11 -07001382 return NULL;
1383 }
1384
Craig Tiller14ee7c32017-10-04 19:34:09 -07001385#ifndef NDEBUG
Craig Tiller389ea902017-10-04 17:42:49 +00001386 grpc_register_tracer(&grpc_trace_pollable_refcount);
Craig Tiller14ee7c32017-10-04 19:34:09 -07001387#endif
Craig Tiller389ea902017-10-04 17:42:49 +00001388
Craig Tillerdd86b692017-04-06 10:43:11 -07001389 fd_global_init();
1390
1391 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller54d388d2017-04-14 16:20:45 -07001392 pollset_global_shutdown();
1393 fd_global_shutdown();
Craig Tillerdd86b692017-04-06 10:43:11 -07001394 return NULL;
1395 }
1396
1397 return &vtable;
1398}
1399
1400#else /* defined(GRPC_LINUX_EPOLL) */
1401#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001402#include "src/core/lib/iomgr/ev_epollex_linux.h"
Craig Tillerdd86b692017-04-06 10:43:11 -07001403/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1404 * NULL */
Craig Tiller6c8383a2017-05-05 16:54:42 +00001405const grpc_event_engine_vtable *grpc_init_epollex_linux(
1406 bool explicitly_requested) {
1407 return NULL;
1408}
Craig Tillerdd86b692017-04-06 10:43:11 -07001409#endif /* defined(GRPC_POSIX_SOCKET) */
1410
1411#endif /* !defined(GRPC_LINUX_EPOLL) */