blob: e5aab293bd3785d2c993f8afcccc6986fe150781 [file] [log] [blame]
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001/*
2 *
3 * Copyright 2016, Google Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -070034#include <grpc/grpc_posix.h>
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070035#include <grpc/support/port_platform.h>
36
Sree Kuchibhotla76a07952016-06-22 15:09:06 -070037/* This polling engine is only relevant on linux kernels supporting epoll() */
Sree Kuchibhotla5855c472016-06-08 12:56:56 -070038#ifdef GPR_LINUX_EPOLL
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070039
Sree Kuchibhotla4c11a202016-06-06 09:23:25 -070040#include "src/core/lib/iomgr/ev_epoll_linux.h"
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070041
42#include <assert.h>
43#include <errno.h>
44#include <poll.h>
45#include <signal.h>
46#include <string.h>
47#include <sys/epoll.h>
48#include <sys/socket.h>
49#include <unistd.h>
50
51#include <grpc/support/alloc.h>
52#include <grpc/support/log.h>
53#include <grpc/support/string_util.h>
54#include <grpc/support/tls.h>
55#include <grpc/support/useful.h>
56
57#include "src/core/lib/iomgr/ev_posix.h"
58#include "src/core/lib/iomgr/iomgr_internal.h"
59#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerb39307d2016-06-30 15:39:13 -070060#include "src/core/lib/iomgr/workqueue.h"
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070061#include "src/core/lib/profiling/timers.h"
62#include "src/core/lib/support/block_annotate.h"
63
Sree Kuchibhotla34217242016-06-29 00:19:07 -070064/* TODO: sreek - Move this to init.c and initialize this like other tracers. */
65static int grpc_polling_trace = 0; /* Disabled by default */
Sree Kuchibhotla1e776682016-06-28 14:09:26 -070066#define GRPC_POLLING_TRACE(fmt, ...) \
67 if (grpc_polling_trace) { \
68 gpr_log(GPR_INFO, (fmt), __VA_ARGS__); \
69 }
70
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -070071static int grpc_wakeup_signal = -1;
72static bool is_grpc_wakeup_signal_initialized = false;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070073
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -070074/* Implements the function defined in grpc_posix.h. This function might be
75 * called before even calling grpc_init() to set either a different signal to
76 * use. If signum == -1, then the use of signals is disabled */
77void grpc_use_signal(int signum) {
78 grpc_wakeup_signal = signum;
79 is_grpc_wakeup_signal_initialized = true;
80
81 if (grpc_wakeup_signal < 0) {
82 gpr_log(GPR_INFO,
83 "Use of signals is disabled. Epoll engine will not be used");
84 } else {
85 gpr_log(GPR_INFO, "epoll engine will be using signal: %d",
86 grpc_wakeup_signal);
87 }
88}
89
90struct polling_island;
Sree Kuchibhotla5855c472016-06-08 12:56:56 -070091
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070092/*******************************************************************************
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -070093 * Fd Declarations
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070094 */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -070095struct grpc_fd {
96 int fd;
97 /* refst format:
Sree Kuchibhotla5098f912016-05-31 10:58:17 -070098 bit 0 : 1=Active / 0=Orphaned
99 bits 1-n : refcount
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700100 Ref/Unref by two to avoid altering the orphaned bit */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700101 gpr_atm refst;
102
103 gpr_mu mu;
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700104
105 /* Indicates that the fd is shutdown and that any pending read/write closures
106 should fail */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700107 bool shutdown;
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700108
109 /* The fd is either closed or we relinquished control of it. In either cases,
110 this indicates that the 'fd' on this structure is no longer valid */
111 bool orphaned;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700112
Sree Kuchibhotla3dbf4d62016-06-08 16:26:45 -0700113 /* TODO: sreek - Move this to a lockfree implementation */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700114 grpc_closure *read_closure;
115 grpc_closure *write_closure;
116
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700117 /* The polling island to which this fd belongs to and the mutex protecting the
118 the field */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700119 gpr_mu pi_mu;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700120 struct polling_island *polling_island;
121
122 struct grpc_fd *freelist_next;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700123 grpc_closure *on_done_closure;
124
Sree Kuchibhotla5855c472016-06-08 12:56:56 -0700125 /* The pollset that last noticed that the fd is readable */
126 grpc_pollset *read_notifier_pollset;
127
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700128 grpc_iomgr_object iomgr_object;
129};
130
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700131/* Reference counting for fds */
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700132// #define GRPC_FD_REF_COUNT_DEBUG
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700133#ifdef GRPC_FD_REF_COUNT_DEBUG
134static void fd_ref(grpc_fd *fd, const char *reason, const char *file, int line);
135static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
136 int line);
137#define GRPC_FD_REF(fd, reason) fd_ref(fd, reason, __FILE__, __LINE__)
138#define GRPC_FD_UNREF(fd, reason) fd_unref(fd, reason, __FILE__, __LINE__)
139#else
140static void fd_ref(grpc_fd *fd);
141static void fd_unref(grpc_fd *fd);
142#define GRPC_FD_REF(fd, reason) fd_ref(fd)
143#define GRPC_FD_UNREF(fd, reason) fd_unref(fd)
144#endif
145
146static void fd_global_init(void);
147static void fd_global_shutdown(void);
148
149#define CLOSURE_NOT_READY ((grpc_closure *)0)
150#define CLOSURE_READY ((grpc_closure *)1)
151
152/*******************************************************************************
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700153 * Polling island Declarations
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700154 */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700155
156// #define GRPC_PI_REF_COUNT_DEBUG
157#ifdef GRPC_PI_REF_COUNT_DEBUG
158
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700159#define PI_ADD_REF(p, r) pi_add_ref_dbg((p), (r), __FILE__, __LINE__)
Craig Tillerb39307d2016-06-30 15:39:13 -0700160#define PI_UNREF(exec_ctx, p, r) \
161 pi_unref_dbg((exec_ctx), (p), (r), __FILE__, __LINE__)
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700162
163#else /* defined(GRPC_PI_REF_COUNT_DEBUG) */
164
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700165#define PI_ADD_REF(p, r) pi_add_ref((p))
Craig Tillerb39307d2016-06-30 15:39:13 -0700166#define PI_UNREF(exec_ctx, p, r) pi_unref((exec_ctx), (p))
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700167
168#endif /* !defined(GPRC_PI_REF_COUNT_DEBUG) */
169
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700170typedef struct polling_island {
171 gpr_mu mu;
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700172 /* Ref count. Use PI_ADD_REF() and PI_UNREF() macros to increment/decrement
173 the refcount.
174 Once the ref count becomes zero, this structure is destroyed which means
175 we should ensure that there is never a scenario where a PI_ADD_REF() is
176 racing with a PI_UNREF() that just made the ref_count zero. */
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700177 gpr_refcount ref_count;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700178
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700179 /* Pointer to the polling_island this merged into.
180 * merged_to value is only set once in polling_island's lifetime (and that too
181 * only if the island is merged with another island). Because of this, we can
182 * use gpr_atm type here so that we can do atomic access on this and reduce
183 * lock contention on 'mu' mutex.
184 *
185 * Note that if this field is not NULL (i.e not 0), all the remaining fields
186 * (except mu and ref_count) are invalid and must be ignored. */
187 gpr_atm merged_to;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700188
Craig Tillerb39307d2016-06-30 15:39:13 -0700189 /* The workqueue associated with this polling island */
190 grpc_workqueue *workqueue;
191
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700192 /* The fd of the underlying epoll set */
193 int epoll_fd;
194
195 /* The file descriptors in the epoll set */
196 size_t fd_cnt;
197 size_t fd_capacity;
198 grpc_fd **fds;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700199} polling_island;
200
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700201/*******************************************************************************
202 * Pollset Declarations
203 */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700204struct grpc_pollset_worker {
Sree Kuchibhotla34217242016-06-29 00:19:07 -0700205 /* Thread id of this worker */
206 pthread_t pt_id;
207
208 /* Used to prevent a worker from getting kicked multiple times */
209 gpr_atm is_kicked;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700210 struct grpc_pollset_worker *next;
211 struct grpc_pollset_worker *prev;
212};
213
214struct grpc_pollset {
215 gpr_mu mu;
216 grpc_pollset_worker root_worker;
217 bool kicked_without_pollers;
218
219 bool shutting_down; /* Is the pollset shutting down ? */
220 bool finish_shutdown_called; /* Is the 'finish_shutdown_locked()' called ? */
221 grpc_closure *shutdown_done; /* Called after after shutdown is complete */
222
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700223 /* The polling island to which this pollset belongs to */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700224 struct polling_island *polling_island;
225};
226
227/*******************************************************************************
228 * Pollset-set Declarations
229 */
Sree Kuchibhotla3dbf4d62016-06-08 16:26:45 -0700230/* TODO: sreek - Change the pollset_set implementation such that a pollset_set
231 * directly points to a polling_island (and adding an fd/pollset/pollset_set to
232 * the current pollset_set would result in polling island merges. This would
233 * remove the need to maintain fd_count here. This will also significantly
234 * simplify the grpc_fd structure since we would no longer need to explicitly
235 * maintain the orphaned state */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700236struct grpc_pollset_set {
237 gpr_mu mu;
238
239 size_t pollset_count;
240 size_t pollset_capacity;
241 grpc_pollset **pollsets;
242
243 size_t pollset_set_count;
244 size_t pollset_set_capacity;
245 struct grpc_pollset_set **pollset_sets;
246
247 size_t fd_count;
248 size_t fd_capacity;
249 grpc_fd **fds;
250};
251
252/*******************************************************************************
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700253 * Common helpers
254 */
255
Craig Tillerf975f742016-07-01 14:56:27 -0700256static bool append_error(grpc_error **composite, grpc_error *error,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700257 const char *desc) {
Craig Tillerf975f742016-07-01 14:56:27 -0700258 if (error == GRPC_ERROR_NONE) return true;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700259 if (*composite == GRPC_ERROR_NONE) {
260 *composite = GRPC_ERROR_CREATE(desc);
261 }
262 *composite = grpc_error_add_child(*composite, error);
Craig Tillerf975f742016-07-01 14:56:27 -0700263 return false;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700264}
265
266/*******************************************************************************
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700267 * Polling island Definitions
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700268 */
269
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700270/* The wakeup fd that is used to wake up all threads in a Polling island. This
271 is useful in the polling island merge operation where we need to wakeup all
272 the threads currently polling the smaller polling island (so that they can
273 start polling the new/merged polling island)
274
275 NOTE: This fd is initialized to be readable and MUST NOT be consumed i.e the
276 threads that woke up MUST NOT call grpc_wakeup_fd_consume_wakeup() */
277static grpc_wakeup_fd polling_island_wakeup_fd;
278
Craig Tillerb39307d2016-06-30 15:39:13 -0700279/* Forward declaration */
Craig Tiller2b49ea92016-07-01 13:21:27 -0700280static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700281
Sree Kuchibhotlaad2c4772016-06-13 19:06:54 -0700282#ifdef GRPC_TSAN
Sree Kuchibhotla41622a82016-06-13 16:43:14 -0700283/* Currently TSAN may incorrectly flag data races between epoll_ctl and
284 epoll_wait for any grpc_fd structs that are added to the epoll set via
285 epoll_ctl and are returned (within a very short window) via epoll_wait().
286
287 To work-around this race, we establish a happens-before relation between
288 the code just-before epoll_ctl() and the code after epoll_wait() by using
289 this atomic */
290gpr_atm g_epoll_sync;
Sree Kuchibhotlaad2c4772016-06-13 19:06:54 -0700291#endif /* defined(GRPC_TSAN) */
Sree Kuchibhotla41622a82016-06-13 16:43:14 -0700292
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700293#ifdef GRPC_PI_REF_COUNT_DEBUG
Craig Tillerb39307d2016-06-30 15:39:13 -0700294static void pi_add_ref(polling_island *pi);
295static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700296
Craig Tillerb39307d2016-06-30 15:39:13 -0700297static void pi_add_ref_dbg(polling_island *pi, char *reason, char *file,
298 int line) {
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700299 long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count));
300 pi_add_ref(pi);
301 gpr_log(GPR_DEBUG, "Add ref pi: %p, old: %ld -> new:%ld (%s) - (%s, %d)",
302 (void *)pi, old_cnt, old_cnt + 1, reason, file, line);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700303}
304
Craig Tillerb39307d2016-06-30 15:39:13 -0700305static void pi_unref_dbg(grpc_exec_ctx *exec_ctx, polling_island *pi,
306 char *reason, char *file, int line) {
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700307 long old_cnt = gpr_atm_acq_load(&(pi->ref_count.count));
Craig Tillerb39307d2016-06-30 15:39:13 -0700308 pi_unref(exec_ctx, pi);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700309 gpr_log(GPR_DEBUG, "Unref pi: %p, old:%ld -> new:%ld (%s) - (%s, %d)",
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700310 (void *)pi, old_cnt, (old_cnt - 1), reason, file, line);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700311}
312#endif
313
Craig Tillerb39307d2016-06-30 15:39:13 -0700314static void pi_add_ref(polling_island *pi) { gpr_ref(&pi->ref_count); }
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700315
Craig Tillerb39307d2016-06-30 15:39:13 -0700316static void pi_unref(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700317 /* If ref count went to zero, delete the polling island.
318 Note that this deletion not be done under a lock. Once the ref count goes
319 to zero, we are guaranteed that no one else holds a reference to the
320 polling island (and that there is no racing pi_add_ref() call either).
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700321
322 Also, if we are deleting the polling island and the merged_to field is
323 non-empty, we should remove a ref to the merged_to polling island
324 */
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700325 if (gpr_unref(&pi->ref_count)) {
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700326 polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
Craig Tillerb39307d2016-06-30 15:39:13 -0700327 polling_island_delete(exec_ctx, pi);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700328 if (next != NULL) {
Craig Tiller2b49ea92016-07-01 13:21:27 -0700329 PI_UNREF(exec_ctx, next, "pi_delete"); /* Recursive call */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700330 }
331 }
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700332}
333
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700334/* The caller is expected to hold pi->mu lock before calling this function */
335static void polling_island_add_fds_locked(polling_island *pi, grpc_fd **fds,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700336 size_t fd_count, bool add_fd_refs,
337 grpc_error **error) {
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700338 int err;
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700339 size_t i;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700340 struct epoll_event ev;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700341 char *err_msg;
342 const char *err_desc = "polling_island_add_fds";
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700343
Sree Kuchibhotlaad2c4772016-06-13 19:06:54 -0700344#ifdef GRPC_TSAN
Sree Kuchibhotla41622a82016-06-13 16:43:14 -0700345 /* See the definition of g_epoll_sync for more context */
Sree Kuchibhotla0224dcc2016-06-22 18:04:00 -0700346 gpr_atm_rel_store(&g_epoll_sync, (gpr_atm)0);
Sree Kuchibhotlaad2c4772016-06-13 19:06:54 -0700347#endif /* defined(GRPC_TSAN) */
Sree Kuchibhotla41622a82016-06-13 16:43:14 -0700348
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700349 for (i = 0; i < fd_count; i++) {
350 ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
351 ev.data.ptr = fds[i];
352 err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD, fds[i]->fd, &ev);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700353
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700354 if (err < 0) {
355 if (errno != EEXIST) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700356 gpr_asprintf(
357 &err_msg,
358 "epoll_ctl (epoll_fd: %d) add fd: %d failed with error: %d (%s)",
359 pi->epoll_fd, fds[i]->fd, errno, strerror(errno));
360 append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
361 gpr_free(err_msg);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700362 }
363
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700364 continue;
365 }
366
367 if (pi->fd_cnt == pi->fd_capacity) {
368 pi->fd_capacity = GPR_MAX(pi->fd_capacity + 8, pi->fd_cnt * 3 / 2);
369 pi->fds = gpr_realloc(pi->fds, sizeof(grpc_fd *) * pi->fd_capacity);
370 }
371
372 pi->fds[pi->fd_cnt++] = fds[i];
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700373 if (add_fd_refs) {
374 GRPC_FD_REF(fds[i], "polling_island");
375 }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700376 }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700377}
378
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700379/* The caller is expected to hold pi->mu before calling this */
380static void polling_island_add_wakeup_fd_locked(polling_island *pi,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700381 grpc_wakeup_fd *wakeup_fd,
382 grpc_error **error) {
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700383 struct epoll_event ev;
384 int err;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700385 char *err_msg;
386 const char *err_desc = "polling_island_add_wakeup_fd";
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700387
388 ev.events = (uint32_t)(EPOLLIN | EPOLLET);
389 ev.data.ptr = wakeup_fd;
390 err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_ADD,
391 GRPC_WAKEUP_FD_GET_READ_FD(wakeup_fd), &ev);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700392 if (err < 0 && errno != EEXIST) {
393 gpr_asprintf(&err_msg,
394 "epoll_ctl (epoll_fd: %d) add wakeup fd: %d failed with "
395 "error: %d (%s)",
396 pi->epoll_fd,
397 GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd), errno,
398 strerror(errno));
399 append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
400 gpr_free(err_msg);
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700401 }
402}
403
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700404/* The caller is expected to hold pi->mu lock before calling this function */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700405static void polling_island_remove_all_fds_locked(polling_island *pi,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700406 bool remove_fd_refs,
407 grpc_error **error) {
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700408 int err;
409 size_t i;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700410 char *err_msg;
411 const char *err_desc = "polling_island_remove_fds";
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700412
413 for (i = 0; i < pi->fd_cnt; i++) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700414 err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, pi->fds[i]->fd, NULL);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700415 if (err < 0 && errno != ENOENT) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700416 gpr_asprintf(&err_msg,
417 "epoll_ctl (epoll_fd: %d) delete fds[%zu]: %d failed with "
418 "error: %d (%s)",
419 pi->epoll_fd, i, pi->fds[i]->fd, errno, strerror(errno));
420 append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
421 gpr_free(err_msg);
Sree Kuchibhotlaad162ba2016-06-06 16:23:37 -0700422 }
423
424 if (remove_fd_refs) {
425 GRPC_FD_UNREF(pi->fds[i], "polling_island");
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700426 }
427 }
428
429 pi->fd_cnt = 0;
430}
431
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700432/* The caller is expected to hold pi->mu lock before calling this function */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700433static void polling_island_remove_fd_locked(polling_island *pi, grpc_fd *fd,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700434 bool is_fd_closed,
435 grpc_error **error) {
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700436 int err;
437 size_t i;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700438 char *err_msg;
439 const char *err_desc = "polling_island_remove_fd";
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700440
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700441 /* If fd is already closed, then it would have been automatically been removed
442 from the epoll set */
443 if (!is_fd_closed) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700444 err = epoll_ctl(pi->epoll_fd, EPOLL_CTL_DEL, fd->fd, NULL);
445 if (err < 0 && errno != ENOENT) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700446 gpr_asprintf(
447 &err_msg,
448 "epoll_ctl (epoll_fd: %d) del fd: %d failed with error: %d (%s)",
449 pi->epoll_fd, fd->fd, errno, strerror(errno));
450 append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
451 gpr_free(err_msg);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700452 }
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700453 }
454
455 for (i = 0; i < pi->fd_cnt; i++) {
456 if (pi->fds[i] == fd) {
457 pi->fds[i] = pi->fds[--pi->fd_cnt];
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700458 GRPC_FD_UNREF(fd, "polling_island");
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700459 break;
460 }
461 }
462}
463
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700464/* Might return NULL in case of an error */
Craig Tillerb39307d2016-06-30 15:39:13 -0700465static polling_island *polling_island_create(grpc_exec_ctx *exec_ctx,
466 grpc_fd *initial_fd,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700467 grpc_error **error) {
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700468 polling_island *pi = NULL;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700469 const char *err_desc = "polling_island_create";
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700470
Craig Tillerb39307d2016-06-30 15:39:13 -0700471 *error = GRPC_ERROR_NONE;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700472
Craig Tillerb39307d2016-06-30 15:39:13 -0700473 pi = gpr_malloc(sizeof(*pi));
474 gpr_mu_init(&pi->mu);
475 pi->fd_cnt = 0;
476 pi->fd_capacity = 0;
477 pi->fds = NULL;
478 pi->epoll_fd = -1;
479 pi->workqueue = NULL;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700480
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700481 gpr_ref_init(&pi->ref_count, 0);
Sree Kuchibhotla0224dcc2016-06-22 18:04:00 -0700482 gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700483
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700484 pi->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
Sree Kuchibhotla41622a82016-06-13 16:43:14 -0700485
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700486 if (pi->epoll_fd < 0) {
Craig Tillerb39307d2016-06-30 15:39:13 -0700487 append_error(error, GRPC_OS_ERROR(errno, "epoll_create1"), err_desc);
488 goto done;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700489 }
490
Craig Tillerb39307d2016-06-30 15:39:13 -0700491 polling_island_add_wakeup_fd_locked(pi, &grpc_global_wakeup_fd, error);
492
493 if (initial_fd != NULL) {
Craig Tillerb39307d2016-06-30 15:39:13 -0700494 polling_island_add_fds_locked(pi, &initial_fd, 1, true, error);
Craig Tillerb39307d2016-06-30 15:39:13 -0700495 }
496
Craig Tillerf975f742016-07-01 14:56:27 -0700497 if (append_error(error, grpc_workqueue_create(exec_ctx, &pi->workqueue),
498 err_desc) &&
499 *error == GRPC_ERROR_NONE) {
500 polling_island_add_fds_locked(pi, &pi->workqueue->wakeup_read_fd, 1, true,
501 error);
502 GPR_ASSERT(pi->workqueue->wakeup_read_fd->polling_island == NULL);
503 pi->workqueue->wakeup_read_fd->polling_island = pi;
504 PI_ADD_REF(pi, 1);
505 }
Craig Tillerb39307d2016-06-30 15:39:13 -0700506
507done:
508 if (*error != GRPC_ERROR_NONE) {
509 if (pi->epoll_fd < 0) {
510 close(pi->epoll_fd);
511 }
512 if (pi->workqueue != NULL) {
513 GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
514 }
515 gpr_mu_destroy(&pi->mu);
516 gpr_free(pi);
517 pi = NULL;
518 }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700519 return pi;
520}
521
Craig Tillerb39307d2016-06-30 15:39:13 -0700522static void polling_island_delete(grpc_exec_ctx *exec_ctx, polling_island *pi) {
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700523 GPR_ASSERT(pi->fd_cnt == 0);
524
Sree Kuchibhotla0224dcc2016-06-22 18:04:00 -0700525 gpr_atm_rel_store(&pi->merged_to, (gpr_atm)NULL);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700526
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700527 close(pi->epoll_fd);
Craig Tillerb39307d2016-06-30 15:39:13 -0700528 GRPC_WORKQUEUE_UNREF(exec_ctx, pi->workqueue, "polling_island");
529 gpr_mu_destroy(&pi->mu);
530 gpr_free(pi->fds);
531 gpr_free(pi);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700532}
533
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700534/* Attempts to gets the last polling island in the linked list (liked by the
535 * 'merged_to' field). Since this does not lock the polling island, there are no
536 * guarantees that the island returned is the last island */
537static polling_island *polling_island_maybe_get_latest(polling_island *pi) {
538 polling_island *next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
539 while (next != NULL) {
540 pi = next;
541 next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
542 }
543
544 return pi;
545}
546
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700547/* Gets the lock on the *latest* polling island i.e the last polling island in
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700548 the linked list (linked by the 'merged_to' field). Call gpr_mu_unlock on the
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700549 returned polling island's mu.
550 Usage: To lock/unlock polling island "pi", do the following:
551 polling_island *pi_latest = polling_island_lock(pi);
552 ...
553 ... critical section ..
554 ...
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700555 gpr_mu_unlock(&pi_latest->mu); // NOTE: use pi_latest->mu. NOT pi->mu */
556static polling_island *polling_island_lock(polling_island *pi) {
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700557 polling_island *next = NULL;
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700558
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700559 while (true) {
560 next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
561 if (next == NULL) {
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700562 /* Looks like 'pi' is the last node in the linked list but unless we check
563 this by holding the pi->mu lock, we cannot be sure (i.e without the
564 pi->mu lock, we don't prevent island merges).
565 To be absolutely sure, check once more by holding the pi->mu lock */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700566 gpr_mu_lock(&pi->mu);
567 next = (polling_island *)gpr_atm_acq_load(&pi->merged_to);
568 if (next == NULL) {
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700569 /* pi is infact the last node and we have the pi->mu lock. we're done */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700570 break;
571 }
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700572
573 /* pi->merged_to is not NULL i.e pi isn't the last node anymore. pi->mu
574 * isn't the lock we are interested in. Continue traversing the list */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700575 gpr_mu_unlock(&pi->mu);
576 }
577
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700578 pi = next;
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700579 }
580
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700581 return pi;
582}
583
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700584/* Gets the lock on the *latest* polling islands in the linked lists pointed by
585 *p and *q (and also updates *p and *q to point to the latest polling islands)
586
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700587 This function is needed because calling the following block of code to obtain
588 locks on polling islands (*p and *q) is prone to deadlocks.
589 {
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700590 polling_island_lock(*p, true);
591 polling_island_lock(*q, true);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700592 }
593
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700594 Usage/example:
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700595 polling_island *p1;
596 polling_island *p2;
597 ..
598 polling_island_lock_pair(&p1, &p2);
599 ..
600 .. Critical section with both p1 and p2 locked
601 ..
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700602 // Release locks: Always call polling_island_unlock_pair() to release locks
603 polling_island_unlock_pair(p1, p2);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700604*/
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700605static void polling_island_lock_pair(polling_island **p, polling_island **q) {
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700606 polling_island *pi_1 = *p;
607 polling_island *pi_2 = *q;
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700608 polling_island *next_1 = NULL;
609 polling_island *next_2 = NULL;
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700610
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700611 /* The algorithm is simple:
612 - Go to the last polling islands in the linked lists *pi_1 and *pi_2 (and
613 keep updating pi_1 and pi_2)
614 - Then obtain locks on the islands by following a lock order rule of
615 locking polling_island with lower address first
616 Special case: Before obtaining the locks, check if pi_1 and pi_2 are
617 pointing to the same island. If that is the case, we can just call
618 polling_island_lock()
619 - After obtaining both the locks, double check that the polling islands
620 are still the last polling islands in their respective linked lists
621 (this is because there might have been polling island merges before
622 we got the lock)
623 - If the polling islands are the last islands, we are done. If not,
624 release the locks and continue the process from the first step */
625 while (true) {
626 next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
627 while (next_1 != NULL) {
628 pi_1 = next_1;
629 next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700630 }
631
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700632 next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
633 while (next_2 != NULL) {
634 pi_2 = next_2;
635 next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
636 }
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700637
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700638 if (pi_1 == pi_2) {
639 pi_1 = pi_2 = polling_island_lock(pi_1);
640 break;
641 }
642
643 if (pi_1 < pi_2) {
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700644 gpr_mu_lock(&pi_1->mu);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700645 gpr_mu_lock(&pi_2->mu);
646 } else {
647 gpr_mu_lock(&pi_2->mu);
648 gpr_mu_lock(&pi_1->mu);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700649 }
650
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700651 next_1 = (polling_island *)gpr_atm_acq_load(&pi_1->merged_to);
652 next_2 = (polling_island *)gpr_atm_acq_load(&pi_2->merged_to);
653 if (next_1 == NULL && next_2 == NULL) {
654 break;
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700655 }
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700656
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700657 gpr_mu_unlock(&pi_1->mu);
658 gpr_mu_unlock(&pi_2->mu);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700659 }
660
661 *p = pi_1;
662 *q = pi_2;
663}
664
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700665static void polling_island_unlock_pair(polling_island *p, polling_island *q) {
666 if (p == q) {
667 gpr_mu_unlock(&p->mu);
668 } else {
669 gpr_mu_unlock(&p->mu);
670 gpr_mu_unlock(&q->mu);
671 }
672}
673
Sree Kuchibhotla229533b12016-06-21 20:42:52 -0700674static polling_island *polling_island_merge(polling_island *p,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700675 polling_island *q,
676 grpc_error **error) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700677 /* Get locks on both the polling islands */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700678 polling_island_lock_pair(&p, &q);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700679
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700680 if (p != q) {
681 /* Make sure that p points to the polling island with fewer fds than q */
682 if (p->fd_cnt > q->fd_cnt) {
683 GPR_SWAP(polling_island *, p, q);
684 }
685
686 /* Merge p with q i.e move all the fds from p (The one with fewer fds) to q
687 Note that the refcounts on the fds being moved will not change here.
688 This is why the last param in the following two functions is 'false') */
689 polling_island_add_fds_locked(q, p->fds, p->fd_cnt, false, error);
690 polling_island_remove_all_fds_locked(p, false, error);
691
692 /* Wakeup all the pollers (if any) on p so that they pickup this change */
693 polling_island_add_wakeup_fd_locked(p, &polling_island_wakeup_fd, error);
694
695 /* Add the 'merged_to' link from p --> q */
696 gpr_atm_rel_store(&p->merged_to, (gpr_atm)q);
697 PI_ADD_REF(q, "pi_merge"); /* To account for the new incoming ref from p */
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700698 }
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700699 /* else if p == q, nothing needs to be done */
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700700
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700701 polling_island_unlock_pair(p, q);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700702
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700703 /* Return the merged polling island (Note that no merge would have happened
704 if p == q which is ok) */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700705 return q;
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -0700706}
707
Sree Kuchibhotla3131c262016-06-21 17:28:28 -0700708static grpc_error *polling_island_global_init() {
709 grpc_error *error = GRPC_ERROR_NONE;
710
Sree Kuchibhotla3131c262016-06-21 17:28:28 -0700711 error = grpc_wakeup_fd_init(&polling_island_wakeup_fd);
712 if (error == GRPC_ERROR_NONE) {
713 error = grpc_wakeup_fd_wakeup(&polling_island_wakeup_fd);
714 }
715
716 return error;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700717}
718
Sree Kuchibhotlad627c102016-06-06 15:49:32 -0700719static void polling_island_global_shutdown() {
Sree Kuchibhotla24b10622016-06-08 15:20:17 -0700720 grpc_wakeup_fd_destroy(&polling_island_wakeup_fd);
Sree Kuchibhotlad627c102016-06-06 15:49:32 -0700721}
722
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700723/*******************************************************************************
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700724 * Fd Definitions
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700725 */
726
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700727/* We need to keep a freelist not because of any concerns of malloc performance
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700728 * but instead so that implementations with multiple threads in (for example)
729 * epoll_wait deal with the race between pollset removal and incoming poll
730 * notifications.
731 *
732 * The problem is that the poller ultimately holds a reference to this
733 * object, so it is very difficult to know when is safe to free it, at least
734 * without some expensive synchronization.
735 *
736 * If we keep the object freelisted, in the worst case losing this race just
737 * becomes a spurious read notification on a reused fd.
738 */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700739
740/* The alarm system needs to be able to wakeup 'some poller' sometimes
741 * (specifically when a new alarm needs to be triggered earlier than the next
742 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
743 * case occurs. */
Sree Kuchibhotla9bc3d2d2016-06-06 10:27:56 -0700744
745/* TODO: sreek: Right now, this wakes up all pollers. In future we should make
746 * sure to wake up one polling thread (which can wake up other threads if
747 * needed) */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700748grpc_wakeup_fd grpc_global_wakeup_fd;
749
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700750static grpc_fd *fd_freelist = NULL;
751static gpr_mu fd_freelist_mu;
752
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700753#ifdef GRPC_FD_REF_COUNT_DEBUG
754#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
755#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
756static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
757 int line) {
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700758 gpr_log(GPR_DEBUG, "FD %d %p ref %d %ld -> %ld [%s; %s:%d]", fd->fd,
759 (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700760 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
761#else
762#define REF_BY(fd, n, reason) ref_by(fd, n)
763#define UNREF_BY(fd, n, reason) unref_by(fd, n)
764static void ref_by(grpc_fd *fd, int n) {
765#endif
766 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
767}
768
769#ifdef GRPC_FD_REF_COUNT_DEBUG
770static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
771 int line) {
772 gpr_atm old;
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700773 gpr_log(GPR_DEBUG, "FD %d %p unref %d %ld -> %ld [%s; %s:%d]", fd->fd,
774 (void *)fd, n, gpr_atm_no_barrier_load(&fd->refst),
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700775 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
776#else
777static void unref_by(grpc_fd *fd, int n) {
778 gpr_atm old;
779#endif
780 old = gpr_atm_full_fetch_add(&fd->refst, -n);
781 if (old == n) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700782 /* Add the fd to the freelist */
783 gpr_mu_lock(&fd_freelist_mu);
784 fd->freelist_next = fd_freelist;
785 fd_freelist = fd;
786 grpc_iomgr_unregister_object(&fd->iomgr_object);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700787
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700788 gpr_mu_unlock(&fd_freelist_mu);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700789 } else {
790 GPR_ASSERT(old > n);
791 }
792}
793
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700794/* Increment refcount by two to avoid changing the orphan bit */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700795#ifdef GRPC_FD_REF_COUNT_DEBUG
796static void fd_ref(grpc_fd *fd, const char *reason, const char *file,
797 int line) {
798 ref_by(fd, 2, reason, file, line);
799}
800
801static void fd_unref(grpc_fd *fd, const char *reason, const char *file,
802 int line) {
803 unref_by(fd, 2, reason, file, line);
804}
805#else
806static void fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700807static void fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
808#endif
809
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700810static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
811
812static void fd_global_shutdown(void) {
813 gpr_mu_lock(&fd_freelist_mu);
814 gpr_mu_unlock(&fd_freelist_mu);
815 while (fd_freelist != NULL) {
816 grpc_fd *fd = fd_freelist;
817 fd_freelist = fd_freelist->freelist_next;
818 gpr_mu_destroy(&fd->mu);
819 gpr_free(fd);
820 }
821 gpr_mu_destroy(&fd_freelist_mu);
822}
823
824static grpc_fd *fd_create(int fd, const char *name) {
825 grpc_fd *new_fd = NULL;
826
827 gpr_mu_lock(&fd_freelist_mu);
828 if (fd_freelist != NULL) {
829 new_fd = fd_freelist;
830 fd_freelist = fd_freelist->freelist_next;
831 }
832 gpr_mu_unlock(&fd_freelist_mu);
833
834 if (new_fd == NULL) {
835 new_fd = gpr_malloc(sizeof(grpc_fd));
836 gpr_mu_init(&new_fd->mu);
837 gpr_mu_init(&new_fd->pi_mu);
838 }
839
840 /* Note: It is not really needed to get the new_fd->mu lock here. If this is a
841 newly created fd (or an fd we got from the freelist), no one else would be
842 holding a lock to it anyway. */
843 gpr_mu_lock(&new_fd->mu);
844
Sree Kuchibhotla0224dcc2016-06-22 18:04:00 -0700845 gpr_atm_rel_store(&new_fd->refst, (gpr_atm)1);
Sree Kuchibhotla5855c472016-06-08 12:56:56 -0700846 new_fd->fd = fd;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700847 new_fd->shutdown = false;
Sree Kuchibhotla5855c472016-06-08 12:56:56 -0700848 new_fd->orphaned = false;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700849 new_fd->read_closure = CLOSURE_NOT_READY;
850 new_fd->write_closure = CLOSURE_NOT_READY;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700851 new_fd->polling_island = NULL;
852 new_fd->freelist_next = NULL;
853 new_fd->on_done_closure = NULL;
Sree Kuchibhotla5855c472016-06-08 12:56:56 -0700854 new_fd->read_notifier_pollset = NULL;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700855
856 gpr_mu_unlock(&new_fd->mu);
857
858 char *fd_name;
859 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
860 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700861#ifdef GRPC_FD_REF_COUNT_DEBUG
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700862 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, (void *)new_fd, fd_name);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700863#endif
Sree Kuchibhotla6a295452016-06-23 15:53:10 -0700864 gpr_free(fd_name);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700865 return new_fd;
866}
867
868static bool fd_is_orphaned(grpc_fd *fd) {
869 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
870}
871
872static int fd_wrapped_fd(grpc_fd *fd) {
873 int ret_fd = -1;
874 gpr_mu_lock(&fd->mu);
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700875 if (!fd->orphaned) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700876 ret_fd = fd->fd;
877 }
878 gpr_mu_unlock(&fd->mu);
879
880 return ret_fd;
881}
882
883static void fd_orphan(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
884 grpc_closure *on_done, int *release_fd,
885 const char *reason) {
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700886 bool is_fd_closed = false;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700887 grpc_error *error = GRPC_ERROR_NONE;
888
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700889 gpr_mu_lock(&fd->mu);
890 fd->on_done_closure = on_done;
891
892 /* If release_fd is not NULL, we should be relinquishing control of the file
893 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700894 if (release_fd != NULL) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700895 *release_fd = fd->fd;
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700896 } else {
897 close(fd->fd);
898 is_fd_closed = true;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700899 }
900
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700901 fd->orphaned = true;
902
903 /* Remove the active status but keep referenced. We want this grpc_fd struct
904 to be alive (and not added to freelist) until the end of this function */
905 REF_BY(fd, 1, reason);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700906
907 /* Remove the fd from the polling island:
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700908 - Get a lock on the latest polling island (i.e the last island in the
909 linked list pointed by fd->polling_island). This is the island that
910 would actually contain the fd
911 - Remove the fd from the latest polling island
912 - Unlock the latest polling island
913 - Set fd->polling_island to NULL (but remove the ref on the polling island
914 before doing this.) */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700915 gpr_mu_lock(&fd->pi_mu);
Sree Kuchibhotla88ee12f2016-06-03 19:26:48 -0700916 if (fd->polling_island != NULL) {
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700917 polling_island *pi_latest = polling_island_lock(fd->polling_island);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700918 polling_island_remove_fd_locked(pi_latest, fd, is_fd_closed, &error);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -0700919 gpr_mu_unlock(&pi_latest->mu);
Sree Kuchibhotla79a62332016-06-04 14:01:03 -0700920
Craig Tillerb39307d2016-06-30 15:39:13 -0700921 PI_UNREF(exec_ctx, fd->polling_island, "fd_orphan");
Sree Kuchibhotla88ee12f2016-06-03 19:26:48 -0700922 fd->polling_island = NULL;
923 }
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700924 gpr_mu_unlock(&fd->pi_mu);
925
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700926 grpc_exec_ctx_sched(exec_ctx, fd->on_done_closure, error, NULL);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700927
928 gpr_mu_unlock(&fd->mu);
929 UNREF_BY(fd, 2, reason); /* Drop the reference */
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -0700930 GRPC_LOG_IF_ERROR("fd_orphan", GRPC_ERROR_REF(error));
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -0700931}
932
Sree Kuchibhotla3131c262016-06-21 17:28:28 -0700933static grpc_error *fd_shutdown_error(bool shutdown) {
934 if (!shutdown) {
935 return GRPC_ERROR_NONE;
936 } else {
937 return GRPC_ERROR_CREATE("FD shutdown");
938 }
939}
940
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700941static void notify_on_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
942 grpc_closure **st, grpc_closure *closure) {
Sree Kuchibhotla3131c262016-06-21 17:28:28 -0700943 if (fd->shutdown) {
944 grpc_exec_ctx_sched(exec_ctx, closure, GRPC_ERROR_CREATE("FD shutdown"),
945 NULL);
946 } else if (*st == CLOSURE_NOT_READY) {
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700947 /* not ready ==> switch to a waiting state by setting the closure */
948 *st = closure;
949 } else if (*st == CLOSURE_READY) {
950 /* already ready ==> queue the closure to run immediately */
951 *st = CLOSURE_NOT_READY;
Sree Kuchibhotla3131c262016-06-21 17:28:28 -0700952 grpc_exec_ctx_sched(exec_ctx, closure, fd_shutdown_error(fd->shutdown),
953 NULL);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700954 } else {
955 /* upcallptr was set to a different closure. This is an error! */
956 gpr_log(GPR_ERROR,
957 "User called a notify_on function with a previous callback still "
958 "pending");
959 abort();
960 }
961}
962
963/* returns 1 if state becomes not ready */
964static int set_ready_locked(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
965 grpc_closure **st) {
966 if (*st == CLOSURE_READY) {
967 /* duplicate ready ==> ignore */
968 return 0;
969 } else if (*st == CLOSURE_NOT_READY) {
970 /* not ready, and not waiting ==> flag ready */
971 *st = CLOSURE_READY;
972 return 0;
973 } else {
974 /* waiting ==> queue closure */
Sree Kuchibhotla3131c262016-06-21 17:28:28 -0700975 grpc_exec_ctx_sched(exec_ctx, *st, fd_shutdown_error(fd->shutdown), NULL);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -0700976 *st = CLOSURE_NOT_READY;
977 return 1;
978 }
979}
980
Sree Kuchibhotla5855c472016-06-08 12:56:56 -0700981static grpc_pollset *fd_get_read_notifier_pollset(grpc_exec_ctx *exec_ctx,
982 grpc_fd *fd) {
983 grpc_pollset *notifier = NULL;
984
985 gpr_mu_lock(&fd->mu);
986 notifier = fd->read_notifier_pollset;
987 gpr_mu_unlock(&fd->mu);
988
989 return notifier;
990}
991
Sree Kuchibhotla24b6eae2016-06-21 18:01:14 -0700992static bool fd_is_shutdown(grpc_fd *fd) {
993 gpr_mu_lock(&fd->mu);
994 const bool r = fd->shutdown;
995 gpr_mu_unlock(&fd->mu);
996 return r;
997}
998
Sree Kuchibhotla0100b2f2016-06-21 17:38:13 -0700999/* Might be called multiple times */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001000static void fd_shutdown(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
1001 gpr_mu_lock(&fd->mu);
Sree Kuchibhotla0100b2f2016-06-21 17:38:13 -07001002 /* Do the actual shutdown only once */
1003 if (!fd->shutdown) {
1004 fd->shutdown = true;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001005
Sree Kuchibhotla0100b2f2016-06-21 17:38:13 -07001006 shutdown(fd->fd, SHUT_RDWR);
1007 /* Flush any pending read and write closures. Since fd->shutdown is 'true'
1008 at this point, the closures would be called with 'success = false' */
1009 set_ready_locked(exec_ctx, fd, &fd->read_closure);
1010 set_ready_locked(exec_ctx, fd, &fd->write_closure);
1011 }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001012 gpr_mu_unlock(&fd->mu);
1013}
1014
1015static void fd_notify_on_read(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
1016 grpc_closure *closure) {
1017 gpr_mu_lock(&fd->mu);
1018 notify_on_locked(exec_ctx, fd, &fd->read_closure, closure);
1019 gpr_mu_unlock(&fd->mu);
1020}
1021
1022static void fd_notify_on_write(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
1023 grpc_closure *closure) {
1024 gpr_mu_lock(&fd->mu);
1025 notify_on_locked(exec_ctx, fd, &fd->write_closure, closure);
1026 gpr_mu_unlock(&fd->mu);
1027}
1028
Craig Tillerd6ba6192016-06-30 15:42:41 -07001029static grpc_workqueue *fd_get_workqueue(grpc_fd *fd) {
1030 gpr_mu_lock(&fd->pi_mu);
1031 grpc_workqueue *workqueue = NULL;
1032 if (fd->polling_island != NULL) {
1033 workqueue =
1034 GRPC_WORKQUEUE_REF(fd->polling_island->workqueue, "get_workqueue");
1035 }
1036 gpr_mu_unlock(&fd->pi_mu);
1037 return workqueue;
1038}
Craig Tiller70bd4832016-06-30 14:20:46 -07001039
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001040/*******************************************************************************
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001041 * Pollset Definitions
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001042 */
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001043GPR_TLS_DECL(g_current_thread_pollset);
1044GPR_TLS_DECL(g_current_thread_worker);
Craig Tiller19196992016-06-27 18:45:56 -07001045static __thread bool g_initialized_sigmask;
1046static __thread sigset_t g_orig_sigmask;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001047
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001048static void sig_handler(int sig_num) {
Sree Kuchibhotlad627c102016-06-06 15:49:32 -07001049#ifdef GRPC_EPOLL_DEBUG
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001050 gpr_log(GPR_INFO, "Received signal %d", sig_num);
Sree Kuchibhotla9bc3d2d2016-06-06 10:27:56 -07001051#endif
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001052}
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001053
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -07001054static void poller_kick_init() { signal(grpc_wakeup_signal, sig_handler); }
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001055
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001056/* Global state management */
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001057static grpc_error *pollset_global_init(void) {
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001058 gpr_tls_init(&g_current_thread_pollset);
1059 gpr_tls_init(&g_current_thread_worker);
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001060 poller_kick_init();
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001061 return grpc_wakeup_fd_init(&grpc_global_wakeup_fd);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001062}
1063
1064static void pollset_global_shutdown(void) {
1065 grpc_wakeup_fd_destroy(&grpc_global_wakeup_fd);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001066 gpr_tls_destroy(&g_current_thread_pollset);
1067 gpr_tls_destroy(&g_current_thread_worker);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001068}
1069
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001070static grpc_error *pollset_worker_kick(grpc_pollset_worker *worker) {
1071 grpc_error *err = GRPC_ERROR_NONE;
Sree Kuchibhotla34217242016-06-29 00:19:07 -07001072
1073 /* Kick the worker only if it was not already kicked */
1074 if (gpr_atm_no_barrier_cas(&worker->is_kicked, (gpr_atm)0, (gpr_atm)1)) {
1075 GRPC_POLLING_TRACE(
1076 "pollset_worker_kick: Kicking worker: %p (thread id: %ld)",
1077 (void *)worker, worker->pt_id);
1078 int err_num = pthread_kill(worker->pt_id, grpc_wakeup_signal);
1079 if (err_num != 0) {
1080 err = GRPC_OS_ERROR(err_num, "pthread_kill");
1081 }
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001082 }
1083 return err;
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001084}
1085
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001086/* Return 1 if the pollset has active threads in pollset_work (pollset must
1087 * be locked) */
1088static int pollset_has_workers(grpc_pollset *p) {
1089 return p->root_worker.next != &p->root_worker;
1090}
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001091
1092static void remove_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
1093 worker->prev->next = worker->next;
1094 worker->next->prev = worker->prev;
1095}
1096
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001097static grpc_pollset_worker *pop_front_worker(grpc_pollset *p) {
1098 if (pollset_has_workers(p)) {
1099 grpc_pollset_worker *w = p->root_worker.next;
1100 remove_worker(p, w);
1101 return w;
1102 } else {
1103 return NULL;
1104 }
1105}
1106
1107static void push_back_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
1108 worker->next = &p->root_worker;
1109 worker->prev = worker->next->prev;
1110 worker->prev->next = worker->next->prev = worker;
1111}
1112
1113static void push_front_worker(grpc_pollset *p, grpc_pollset_worker *worker) {
1114 worker->prev = &p->root_worker;
1115 worker->next = worker->prev->next;
1116 worker->prev->next = worker->next->prev = worker;
1117}
1118
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001119/* p->mu must be held before calling this function */
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001120static grpc_error *pollset_kick(grpc_pollset *p,
1121 grpc_pollset_worker *specific_worker) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001122 GPR_TIMER_BEGIN("pollset_kick", 0);
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001123 grpc_error *error = GRPC_ERROR_NONE;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001124 const char *err_desc = "Kick Failure";
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001125 grpc_pollset_worker *worker = specific_worker;
1126 if (worker != NULL) {
1127 if (worker == GRPC_POLLSET_KICK_BROADCAST) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001128 if (pollset_has_workers(p)) {
Sree Kuchibhotla79a62332016-06-04 14:01:03 -07001129 GPR_TIMER_BEGIN("pollset_kick.broadcast", 0);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001130 for (worker = p->root_worker.next; worker != &p->root_worker;
1131 worker = worker->next) {
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001132 if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001133 append_error(&error, pollset_worker_kick(worker), err_desc);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001134 }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001135 }
Craig Tillera218a062016-06-26 09:58:37 -07001136 GPR_TIMER_END("pollset_kick.broadcast", 0);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001137 } else {
1138 p->kicked_without_pollers = true;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001139 }
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001140 } else {
1141 GPR_TIMER_MARK("kicked_specifically", 0);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001142 if (gpr_tls_get(&g_current_thread_worker) != (intptr_t)worker) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001143 append_error(&error, pollset_worker_kick(worker), err_desc);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001144 }
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001145 }
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001146 } else if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)p) {
1147 /* Since worker == NULL, it means that we can kick "any" worker on this
1148 pollset 'p'. If 'p' happens to be the same pollset this thread is
1149 currently polling (i.e in pollset_work() function), then there is no need
1150 to kick any other worker since the current thread can just absorb the
1151 kick. This is the reason why we enter this case only when
1152 g_current_thread_pollset is != p */
1153
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001154 GPR_TIMER_MARK("kick_anonymous", 0);
1155 worker = pop_front_worker(p);
1156 if (worker != NULL) {
1157 GPR_TIMER_MARK("finally_kick", 0);
1158 push_back_worker(p, worker);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001159 append_error(&error, pollset_worker_kick(worker), err_desc);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001160 } else {
1161 GPR_TIMER_MARK("kicked_no_pollers", 0);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001162 p->kicked_without_pollers = true;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001163 }
1164 }
1165
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001166 GPR_TIMER_END("pollset_kick", 0);
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001167 GRPC_LOG_IF_ERROR("pollset_kick", GRPC_ERROR_REF(error));
1168 return error;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001169}
1170
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001171static grpc_error *kick_poller(void) {
1172 return grpc_wakeup_fd_wakeup(&grpc_global_wakeup_fd);
1173}
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001174
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001175static void pollset_init(grpc_pollset *pollset, gpr_mu **mu) {
1176 gpr_mu_init(&pollset->mu);
1177 *mu = &pollset->mu;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001178
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001179 pollset->root_worker.next = pollset->root_worker.prev = &pollset->root_worker;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001180 pollset->kicked_without_pollers = false;
1181
1182 pollset->shutting_down = false;
1183 pollset->finish_shutdown_called = false;
1184 pollset->shutdown_done = NULL;
1185
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001186 pollset->polling_island = NULL;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001187}
1188
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001189/* Convert a timespec to milliseconds:
1190 - Very small or negative poll times are clamped to zero to do a non-blocking
1191 poll (which becomes spin polling)
1192 - Other small values are rounded up to one millisecond
1193 - Longer than a millisecond polls are rounded up to the next nearest
1194 millisecond to avoid spinning
1195 - Infinite timeouts are converted to -1 */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001196static int poll_deadline_to_millis_timeout(gpr_timespec deadline,
1197 gpr_timespec now) {
1198 gpr_timespec timeout;
1199 static const int64_t max_spin_polling_us = 10;
1200 if (gpr_time_cmp(deadline, gpr_inf_future(deadline.clock_type)) == 0) {
1201 return -1;
1202 }
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001203
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001204 if (gpr_time_cmp(deadline, gpr_time_add(now, gpr_time_from_micros(
1205 max_spin_polling_us,
1206 GPR_TIMESPAN))) <= 0) {
1207 return 0;
1208 }
1209 timeout = gpr_time_sub(deadline, now);
1210 return gpr_time_to_millis(gpr_time_add(
1211 timeout, gpr_time_from_nanos(GPR_NS_PER_MS - 1, GPR_TIMESPAN)));
1212}
1213
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001214static void fd_become_readable(grpc_exec_ctx *exec_ctx, grpc_fd *fd,
1215 grpc_pollset *notifier) {
1216 /* Need the fd->mu since we might be racing with fd_notify_on_read */
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001217 gpr_mu_lock(&fd->mu);
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001218 set_ready_locked(exec_ctx, fd, &fd->read_closure);
1219 fd->read_notifier_pollset = notifier;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001220 gpr_mu_unlock(&fd->mu);
1221}
1222
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001223static void fd_become_writable(grpc_exec_ctx *exec_ctx, grpc_fd *fd) {
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001224 /* Need the fd->mu since we might be racing with fd_notify_on_write */
1225 gpr_mu_lock(&fd->mu);
1226 set_ready_locked(exec_ctx, fd, &fd->write_closure);
1227 gpr_mu_unlock(&fd->mu);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001228}
1229
Craig Tillerb39307d2016-06-30 15:39:13 -07001230static void pollset_release_polling_island(grpc_exec_ctx *exec_ctx,
1231 grpc_pollset *ps, char *reason) {
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001232 if (ps->polling_island != NULL) {
Craig Tillerb39307d2016-06-30 15:39:13 -07001233 PI_UNREF(exec_ctx, ps->polling_island, reason);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001234 }
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001235 ps->polling_island = NULL;
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001236}
1237
1238static void finish_shutdown_locked(grpc_exec_ctx *exec_ctx,
1239 grpc_pollset *pollset) {
1240 /* The pollset cannot have any workers if we are at this stage */
1241 GPR_ASSERT(!pollset_has_workers(pollset));
1242
1243 pollset->finish_shutdown_called = true;
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001244
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001245 /* Release the ref and set pollset->polling_island to NULL */
Craig Tillerb39307d2016-06-30 15:39:13 -07001246 pollset_release_polling_island(exec_ctx, pollset, "ps_shutdown");
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001247 grpc_exec_ctx_sched(exec_ctx, pollset->shutdown_done, GRPC_ERROR_NONE, NULL);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001248}
1249
1250/* pollset->mu lock must be held by the caller before calling this */
1251static void pollset_shutdown(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
1252 grpc_closure *closure) {
1253 GPR_TIMER_BEGIN("pollset_shutdown", 0);
1254 GPR_ASSERT(!pollset->shutting_down);
1255 pollset->shutting_down = true;
1256 pollset->shutdown_done = closure;
1257 pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);
1258
1259 /* If the pollset has any workers, we cannot call finish_shutdown_locked()
1260 because it would release the underlying polling island. In such a case, we
1261 let the last worker call finish_shutdown_locked() from pollset_work() */
1262 if (!pollset_has_workers(pollset)) {
1263 GPR_ASSERT(!pollset->finish_shutdown_called);
1264 GPR_TIMER_MARK("pollset_shutdown.finish_shutdown_locked", 0);
1265 finish_shutdown_locked(exec_ctx, pollset);
1266 }
1267 GPR_TIMER_END("pollset_shutdown", 0);
1268}
1269
1270/* pollset_shutdown is guaranteed to be called before pollset_destroy. So other
1271 * than destroying the mutexes, there is nothing special that needs to be done
1272 * here */
1273static void pollset_destroy(grpc_pollset *pollset) {
1274 GPR_ASSERT(!pollset_has_workers(pollset));
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001275 gpr_mu_destroy(&pollset->mu);
1276}
1277
Craig Tiller2b49ea92016-07-01 13:21:27 -07001278static void pollset_reset(grpc_pollset *pollset) {
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001279 GPR_ASSERT(pollset->shutting_down);
1280 GPR_ASSERT(!pollset_has_workers(pollset));
1281 pollset->shutting_down = false;
1282 pollset->finish_shutdown_called = false;
1283 pollset->kicked_without_pollers = false;
1284 pollset->shutdown_done = NULL;
Craig Tillerb39307d2016-06-30 15:39:13 -07001285 GPR_ASSERT(pollset->polling_island == NULL);
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001286}
1287
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001288#define GRPC_EPOLL_MAX_EVENTS 1000
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001289/* Note: sig_mask contains the signal mask to use *during* epoll_wait() */
1290static void pollset_work_and_unlock(grpc_exec_ctx *exec_ctx,
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001291 grpc_pollset *pollset,
1292 grpc_pollset_worker *worker, int timeout_ms,
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001293 sigset_t *sig_mask, grpc_error **error) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001294 struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
Sree Kuchibhotla88ee12f2016-06-03 19:26:48 -07001295 int epoll_fd = -1;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001296 int ep_rv;
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001297 polling_island *pi = NULL;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001298 char *err_msg;
1299 const char *err_desc = "pollset_work_and_unlock";
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001300 GPR_TIMER_BEGIN("pollset_work_and_unlock", 0);
1301
1302 /* We need to get the epoll_fd to wait on. The epoll_fd is in inside the
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001303 latest polling island pointed by pollset->polling_island.
Sree Kuchibhotla229533b12016-06-21 20:42:52 -07001304
1305 Since epoll_fd is immutable, we can read it without obtaining the polling
1306 island lock. There is however a possibility that the polling island (from
1307 which we got the epoll_fd) got merged with another island while we are
1308 in this function. This is still okay because in such a case, we will wakeup
1309 right-away from epoll_wait() and pick up the latest polling_island the next
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001310 this function (i.e pollset_work_and_unlock()) is called */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001311
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001312 if (pollset->polling_island == NULL) {
Craig Tillerb39307d2016-06-30 15:39:13 -07001313 pollset->polling_island = polling_island_create(exec_ctx, NULL, error);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001314 if (pollset->polling_island == NULL) {
1315 GPR_TIMER_END("pollset_work_and_unlock", 0);
1316 return; /* Fatal error. We cannot continue */
1317 }
1318
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001319 PI_ADD_REF(pollset->polling_island, "ps");
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001320 GRPC_POLLING_TRACE("pollset_work: pollset: %p created new pi: %p",
1321 (void *)pollset, (void *)pollset->polling_island);
Sree Kuchibhotla88ee12f2016-06-03 19:26:48 -07001322 }
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001323
Sree Kuchibhotla229533b12016-06-21 20:42:52 -07001324 pi = polling_island_maybe_get_latest(pollset->polling_island);
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001325 epoll_fd = pi->epoll_fd;
1326
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001327 /* Update the pollset->polling_island since the island being pointed by
Sree Kuchibhotla229533b12016-06-21 20:42:52 -07001328 pollset->polling_island maybe older than the one pointed by pi) */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001329 if (pollset->polling_island != pi) {
1330 /* Always do PI_ADD_REF before PI_UNREF because PI_UNREF may cause the
1331 polling island to be deleted */
1332 PI_ADD_REF(pi, "ps");
Craig Tillerb39307d2016-06-30 15:39:13 -07001333 PI_UNREF(exec_ctx, pollset->polling_island, "ps");
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001334 pollset->polling_island = pi;
1335 }
Sree Kuchibhotlad627c102016-06-06 15:49:32 -07001336
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001337 /* Add an extra ref so that the island does not get destroyed (which means
1338 the epoll_fd won't be closed) while we are are doing an epoll_wait() on the
1339 epoll_fd */
1340 PI_ADD_REF(pi, "ps_work");
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001341 gpr_mu_unlock(&pollset->mu);
1342
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001343 do {
1344 ep_rv = epoll_pwait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms,
1345 sig_mask);
1346 if (ep_rv < 0) {
1347 if (errno != EINTR) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001348 gpr_asprintf(&err_msg,
1349 "epoll_wait() epoll fd: %d failed with error: %d (%s)",
1350 epoll_fd, errno, strerror(errno));
1351 append_error(error, GRPC_OS_ERROR(errno, err_msg), err_desc);
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001352 } else {
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001353 /* We were interrupted. Save an interation by doing a zero timeout
1354 epoll_wait to see if there are any other events of interest */
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001355 GRPC_POLLING_TRACE(
1356 "pollset_work: pollset: %p, worker: %p received kick",
1357 (void *)pollset, (void *)worker);
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001358 ep_rv = epoll_wait(epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
Sree Kuchibhotla79a62332016-06-04 14:01:03 -07001359 }
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001360 }
Sree Kuchibhotla79a62332016-06-04 14:01:03 -07001361
Sree Kuchibhotlaad2c4772016-06-13 19:06:54 -07001362#ifdef GRPC_TSAN
Sree Kuchibhotla41622a82016-06-13 16:43:14 -07001363 /* See the definition of g_poll_sync for more details */
1364 gpr_atm_acq_load(&g_epoll_sync);
Sree Kuchibhotlaad2c4772016-06-13 19:06:54 -07001365#endif /* defined(GRPC_TSAN) */
Sree Kuchibhotla41622a82016-06-13 16:43:14 -07001366
Sree Kuchibhotla58e58962016-06-13 00:52:56 -07001367 for (int i = 0; i < ep_rv; ++i) {
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001368 void *data_ptr = ep_ev[i].data.ptr;
1369 if (data_ptr == &grpc_global_wakeup_fd) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001370 append_error(error,
1371 grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd),
1372 err_desc);
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001373 } else if (data_ptr == &polling_island_wakeup_fd) {
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001374 GRPC_POLLING_TRACE(
1375 "pollset_work: pollset: %p, worker: %p polling island (epoll_fd: "
1376 "%d) got merged",
1377 (void *)pollset, (void *)worker, epoll_fd);
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001378 /* This means that our polling island is merged with a different
1379 island. We do not have to do anything here since the subsequent call
1380 to the function pollset_work_and_unlock() will pick up the correct
1381 epoll_fd */
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001382 } else {
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001383 grpc_fd *fd = data_ptr;
1384 int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
1385 int read_ev = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
1386 int write_ev = ep_ev[i].events & EPOLLOUT;
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001387 if (read_ev || cancel) {
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001388 fd_become_readable(exec_ctx, fd, pollset);
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001389 }
1390 if (write_ev || cancel) {
1391 fd_become_writable(exec_ctx, fd);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001392 }
1393 }
Sree Kuchibhotlae5012ba2016-06-06 16:01:45 -07001394 }
1395 } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001396
1397 GPR_ASSERT(pi != NULL);
1398
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001399 /* Before leaving, release the extra ref we added to the polling island. It
1400 is important to use "pi" here (i.e our old copy of pollset->polling_island
1401 that we got before releasing the polling island lock). This is because
1402 pollset->polling_island pointer might get udpated in other parts of the
1403 code when there is an island merge while we are doing epoll_wait() above */
Craig Tillerb39307d2016-06-30 15:39:13 -07001404 PI_UNREF(exec_ctx, pi, "ps_work");
Sree Kuchibhotla24b10622016-06-08 15:20:17 -07001405
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001406 GPR_TIMER_END("pollset_work_and_unlock", 0);
1407}
1408
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001409/* pollset->mu lock must be held by the caller before calling this.
1410 The function pollset_work() may temporarily release the lock (pollset->mu)
1411 during the course of its execution but it will always re-acquire the lock and
1412 ensure that it is held by the time the function returns */
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001413static grpc_error *pollset_work(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
1414 grpc_pollset_worker **worker_hdl,
1415 gpr_timespec now, gpr_timespec deadline) {
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001416 GPR_TIMER_BEGIN("pollset_work", 0);
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001417 grpc_error *error = GRPC_ERROR_NONE;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001418 int timeout_ms = poll_deadline_to_millis_timeout(deadline, now);
1419
1420 sigset_t new_mask;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001421
1422 grpc_pollset_worker worker;
1423 worker.next = worker.prev = NULL;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001424 worker.pt_id = pthread_self();
Sree Kuchibhotla34217242016-06-29 00:19:07 -07001425 gpr_atm_no_barrier_store(&worker.is_kicked, (gpr_atm)0);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001426
1427 *worker_hdl = &worker;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001428
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001429 gpr_tls_set(&g_current_thread_pollset, (intptr_t)pollset);
1430 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001431
1432 if (pollset->kicked_without_pollers) {
1433 /* If the pollset was kicked without pollers, pretend that the current
1434 worker got the kick and skip polling. A kick indicates that there is some
1435 work that needs attention like an event on the completion queue or an
1436 alarm */
1437 GPR_TIMER_MARK("pollset_work.kicked_without_pollers", 0);
1438 pollset->kicked_without_pollers = 0;
1439 } else if (!pollset->shutting_down) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001440 /* We use the posix-signal with number 'grpc_wakeup_signal' for waking up
Sree Kuchibhotla34217242016-06-29 00:19:07 -07001441 (i.e 'kicking') a worker in the pollset. A 'kick' is a way to inform the
1442 worker that there is some pending work that needs immediate attention
1443 (like an event on the completion queue, or a polling island merge that
1444 results in a new epoll-fd to wait on) and that the worker should not
1445 spend time waiting in epoll_pwait().
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001446
Sree Kuchibhotla34217242016-06-29 00:19:07 -07001447 A worker can be kicked anytime from the point it is added to the pollset
1448 via push_front_worker() (or push_back_worker()) to the point it is
1449 removed via remove_worker().
1450 If the worker is kicked before/during it calls epoll_pwait(), it should
1451 immediately exit from epoll_wait(). If the worker is kicked after it
1452 returns from epoll_wait(), then nothing really needs to be done.
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001453
Sree Kuchibhotla34217242016-06-29 00:19:07 -07001454 To accomplish this, we mask 'grpc_wakeup_signal' on this thread at all
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001455 times *except* when it is in epoll_pwait(). This way, the worker never
1456 misses acting on a kick */
1457
Craig Tiller19196992016-06-27 18:45:56 -07001458 if (!g_initialized_sigmask) {
1459 sigemptyset(&new_mask);
1460 sigaddset(&new_mask, grpc_wakeup_signal);
1461 pthread_sigmask(SIG_BLOCK, &new_mask, &g_orig_sigmask);
1462 sigdelset(&g_orig_sigmask, grpc_wakeup_signal);
1463 g_initialized_sigmask = true;
1464 /* new_mask: The new thread mask which blocks 'grpc_wakeup_signal'.
1465 This is the mask used at all times *except during
1466 epoll_wait()*"
1467 g_orig_sigmask: The thread mask which allows 'grpc_wakeup_signal' and
Craig Tiller510ff692016-06-27 20:31:49 -07001468 this is the mask to use *during epoll_wait()*
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001469
Craig Tiller19196992016-06-27 18:45:56 -07001470 The new_mask is set on the worker before it is added to the pollset
Craig Tiller510ff692016-06-27 20:31:49 -07001471 (i.e before it can be kicked) */
Craig Tiller19196992016-06-27 18:45:56 -07001472 }
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001473
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001474 push_front_worker(pollset, &worker); /* Add worker to pollset */
1475
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001476 pollset_work_and_unlock(exec_ctx, pollset, &worker, timeout_ms,
1477 &g_orig_sigmask, &error);
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001478 grpc_exec_ctx_flush(exec_ctx);
1479
1480 gpr_mu_lock(&pollset->mu);
Sree Kuchibhotla34217242016-06-29 00:19:07 -07001481
1482 /* Note: There is no need to reset worker.is_kicked to 0 since we are no
1483 longer going to use this worker */
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001484 remove_worker(pollset, &worker);
1485 }
1486
1487 /* If we are the last worker on the pollset (i.e pollset_has_workers() is
1488 false at this point) and the pollset is shutting down, we may have to
1489 finish the shutdown process by calling finish_shutdown_locked().
1490 See pollset_shutdown() for more details.
1491
1492 Note: Continuing to access pollset here is safe; it is the caller's
1493 responsibility to not destroy a pollset when it has outstanding calls to
1494 pollset_work() */
1495 if (pollset->shutting_down && !pollset_has_workers(pollset) &&
1496 !pollset->finish_shutdown_called) {
1497 GPR_TIMER_MARK("pollset_work.finish_shutdown_locked", 0);
1498 finish_shutdown_locked(exec_ctx, pollset);
1499
1500 gpr_mu_unlock(&pollset->mu);
1501 grpc_exec_ctx_flush(exec_ctx);
1502 gpr_mu_lock(&pollset->mu);
1503 }
1504
1505 *worker_hdl = NULL;
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001506
Sree Kuchibhotla8e4926c2016-06-08 20:33:19 -07001507 gpr_tls_set(&g_current_thread_pollset, (intptr_t)0);
1508 gpr_tls_set(&g_current_thread_worker, (intptr_t)0);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001509
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001510 GPR_TIMER_END("pollset_work", 0);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001511
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001512 GRPC_LOG_IF_ERROR("pollset_work", GRPC_ERROR_REF(error));
1513 return error;
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001514}
1515
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001516static void pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
1517 grpc_fd *fd) {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001518 grpc_error *error = GRPC_ERROR_NONE;
1519
Sree Kuchibhotlacddf6972016-06-21 08:27:07 -07001520 gpr_mu_lock(&pollset->mu);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -07001521 gpr_mu_lock(&fd->pi_mu);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001522
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -07001523 polling_island *pi_new = NULL;
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001524
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001525 /* 1) If fd->polling_island and pollset->polling_island are both non-NULL and
1526 * equal, do nothing.
1527 * 2) If fd->polling_island and pollset->polling_island are both NULL, create
1528 * a new polling island (with a refcount of 2) and make the polling_island
1529 * fields in both fd and pollset to point to the new island
1530 * 3) If one of fd->polling_island or pollset->polling_island is NULL, update
1531 * the NULL polling_island field to point to the non-NULL polling_island
1532 * field (ensure that the refcount on the polling island is incremented by
1533 * 1 to account for the newly added reference)
1534 * 4) Finally, if fd->polling_island and pollset->polling_island are non-NULL
1535 * and different, merge both the polling islands and update the
1536 * polling_island fields in both fd and pollset to point to the merged
1537 * polling island.
1538 */
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -07001539 if (fd->polling_island == pollset->polling_island) {
1540 pi_new = fd->polling_island;
1541 if (pi_new == NULL) {
Craig Tillerb39307d2016-06-30 15:39:13 -07001542 pi_new = polling_island_create(exec_ctx, fd, &error);
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001543
1544 GRPC_POLLING_TRACE(
Sree Kuchibhotla9de42ab2016-06-28 17:41:21 -07001545 "pollset_add_fd: Created new polling island. pi_new: %p (fd: %d, "
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001546 "pollset: %p)",
1547 (void *)pi_new, fd->fd, (void *)pollset);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001548 }
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -07001549 } else if (fd->polling_island == NULL) {
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001550 pi_new = polling_island_lock(pollset->polling_island);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001551 polling_island_add_fds_locked(pi_new, &fd, 1, true, &error);
Sree Kuchibhotla88ee12f2016-06-03 19:26:48 -07001552 gpr_mu_unlock(&pi_new->mu);
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001553
1554 GRPC_POLLING_TRACE(
1555 "pollset_add_fd: fd->pi was NULL. pi_new: %p (fd: %d, pollset: %p, "
1556 "pollset->pi: %p)",
1557 (void *)pi_new, fd->fd, (void *)pollset,
1558 (void *)pollset->polling_island);
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -07001559 } else if (pollset->polling_island == NULL) {
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001560 pi_new = polling_island_lock(fd->polling_island);
Sree Kuchibhotla88ee12f2016-06-03 19:26:48 -07001561 gpr_mu_unlock(&pi_new->mu);
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001562
1563 GRPC_POLLING_TRACE(
1564 "pollset_add_fd: pollset->pi was NULL. pi_new: %p (fd: %d, pollset: "
1565 "%p, fd->pi: %p",
1566 (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island);
Sree Kuchibhotla5098f912016-05-31 10:58:17 -07001567 } else {
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001568 pi_new = polling_island_merge(fd->polling_island, pollset->polling_island,
1569 &error);
Sree Kuchibhotla1e776682016-06-28 14:09:26 -07001570 GRPC_POLLING_TRACE(
1571 "pollset_add_fd: polling islands merged. pi_new: %p (fd: %d, pollset: "
1572 "%p, fd->pi: %p, pollset->pi: %p)",
1573 (void *)pi_new, fd->fd, (void *)pollset, (void *)fd->polling_island,
1574 (void *)pollset->polling_island);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001575 }
1576
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001577 /* At this point, pi_new is the polling island that both fd->polling_island
1578 and pollset->polling_island must be pointing to */
1579
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001580 if (fd->polling_island != pi_new) {
1581 PI_ADD_REF(pi_new, "fd");
1582 if (fd->polling_island != NULL) {
Craig Tillerb39307d2016-06-30 15:39:13 -07001583 PI_UNREF(exec_ctx, fd->polling_island, "fd");
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001584 }
1585 fd->polling_island = pi_new;
1586 }
1587
1588 if (pollset->polling_island != pi_new) {
1589 PI_ADD_REF(pi_new, "ps");
1590 if (pollset->polling_island != NULL) {
Craig Tillerb39307d2016-06-30 15:39:13 -07001591 PI_UNREF(exec_ctx, pollset->polling_island, "ps");
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001592 }
1593 pollset->polling_island = pi_new;
1594 }
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001595
Sree Kuchibhotla9442bab2016-05-20 17:54:06 -07001596 gpr_mu_unlock(&fd->pi_mu);
Sree Kuchibhotlacddf6972016-06-21 08:27:07 -07001597 gpr_mu_unlock(&pollset->mu);
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001598}
1599
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001600/*******************************************************************************
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001601 * Pollset-set Definitions
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001602 */
1603
1604static grpc_pollset_set *pollset_set_create(void) {
1605 grpc_pollset_set *pollset_set = gpr_malloc(sizeof(*pollset_set));
1606 memset(pollset_set, 0, sizeof(*pollset_set));
1607 gpr_mu_init(&pollset_set->mu);
1608 return pollset_set;
1609}
1610
1611static void pollset_set_destroy(grpc_pollset_set *pollset_set) {
1612 size_t i;
1613 gpr_mu_destroy(&pollset_set->mu);
1614 for (i = 0; i < pollset_set->fd_count; i++) {
1615 GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
1616 }
1617 gpr_free(pollset_set->pollsets);
1618 gpr_free(pollset_set->pollset_sets);
1619 gpr_free(pollset_set->fds);
1620 gpr_free(pollset_set);
1621}
1622
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001623static void pollset_set_add_fd(grpc_exec_ctx *exec_ctx,
1624 grpc_pollset_set *pollset_set, grpc_fd *fd) {
1625 size_t i;
1626 gpr_mu_lock(&pollset_set->mu);
1627 if (pollset_set->fd_count == pollset_set->fd_capacity) {
1628 pollset_set->fd_capacity = GPR_MAX(8, 2 * pollset_set->fd_capacity);
1629 pollset_set->fds = gpr_realloc(
1630 pollset_set->fds, pollset_set->fd_capacity * sizeof(*pollset_set->fds));
1631 }
1632 GRPC_FD_REF(fd, "pollset_set");
1633 pollset_set->fds[pollset_set->fd_count++] = fd;
1634 for (i = 0; i < pollset_set->pollset_count; i++) {
1635 pollset_add_fd(exec_ctx, pollset_set->pollsets[i], fd);
1636 }
1637 for (i = 0; i < pollset_set->pollset_set_count; i++) {
1638 pollset_set_add_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
1639 }
1640 gpr_mu_unlock(&pollset_set->mu);
1641}
1642
1643static void pollset_set_del_fd(grpc_exec_ctx *exec_ctx,
1644 grpc_pollset_set *pollset_set, grpc_fd *fd) {
1645 size_t i;
1646 gpr_mu_lock(&pollset_set->mu);
1647 for (i = 0; i < pollset_set->fd_count; i++) {
1648 if (pollset_set->fds[i] == fd) {
1649 pollset_set->fd_count--;
1650 GPR_SWAP(grpc_fd *, pollset_set->fds[i],
1651 pollset_set->fds[pollset_set->fd_count]);
1652 GRPC_FD_UNREF(fd, "pollset_set");
1653 break;
1654 }
1655 }
1656 for (i = 0; i < pollset_set->pollset_set_count; i++) {
1657 pollset_set_del_fd(exec_ctx, pollset_set->pollset_sets[i], fd);
1658 }
1659 gpr_mu_unlock(&pollset_set->mu);
1660}
1661
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001662static void pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
1663 grpc_pollset_set *pollset_set,
1664 grpc_pollset *pollset) {
1665 size_t i, j;
1666 gpr_mu_lock(&pollset_set->mu);
1667 if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
1668 pollset_set->pollset_capacity =
1669 GPR_MAX(8, 2 * pollset_set->pollset_capacity);
1670 pollset_set->pollsets =
1671 gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
1672 sizeof(*pollset_set->pollsets));
1673 }
1674 pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
1675 for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
1676 if (fd_is_orphaned(pollset_set->fds[i])) {
1677 GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
1678 } else {
1679 pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
1680 pollset_set->fds[j++] = pollset_set->fds[i];
1681 }
1682 }
1683 pollset_set->fd_count = j;
1684 gpr_mu_unlock(&pollset_set->mu);
1685}
1686
1687static void pollset_set_del_pollset(grpc_exec_ctx *exec_ctx,
1688 grpc_pollset_set *pollset_set,
1689 grpc_pollset *pollset) {
1690 size_t i;
1691 gpr_mu_lock(&pollset_set->mu);
1692 for (i = 0; i < pollset_set->pollset_count; i++) {
1693 if (pollset_set->pollsets[i] == pollset) {
1694 pollset_set->pollset_count--;
1695 GPR_SWAP(grpc_pollset *, pollset_set->pollsets[i],
1696 pollset_set->pollsets[pollset_set->pollset_count]);
1697 break;
1698 }
1699 }
1700 gpr_mu_unlock(&pollset_set->mu);
1701}
1702
1703static void pollset_set_add_pollset_set(grpc_exec_ctx *exec_ctx,
1704 grpc_pollset_set *bag,
1705 grpc_pollset_set *item) {
1706 size_t i, j;
1707 gpr_mu_lock(&bag->mu);
1708 if (bag->pollset_set_count == bag->pollset_set_capacity) {
1709 bag->pollset_set_capacity = GPR_MAX(8, 2 * bag->pollset_set_capacity);
1710 bag->pollset_sets =
1711 gpr_realloc(bag->pollset_sets,
1712 bag->pollset_set_capacity * sizeof(*bag->pollset_sets));
1713 }
1714 bag->pollset_sets[bag->pollset_set_count++] = item;
1715 for (i = 0, j = 0; i < bag->fd_count; i++) {
1716 if (fd_is_orphaned(bag->fds[i])) {
1717 GRPC_FD_UNREF(bag->fds[i], "pollset_set");
1718 } else {
1719 pollset_set_add_fd(exec_ctx, item, bag->fds[i]);
1720 bag->fds[j++] = bag->fds[i];
1721 }
1722 }
1723 bag->fd_count = j;
1724 gpr_mu_unlock(&bag->mu);
1725}
1726
1727static void pollset_set_del_pollset_set(grpc_exec_ctx *exec_ctx,
1728 grpc_pollset_set *bag,
1729 grpc_pollset_set *item) {
1730 size_t i;
1731 gpr_mu_lock(&bag->mu);
1732 for (i = 0; i < bag->pollset_set_count; i++) {
1733 if (bag->pollset_sets[i] == item) {
1734 bag->pollset_set_count--;
1735 GPR_SWAP(grpc_pollset_set *, bag->pollset_sets[i],
1736 bag->pollset_sets[bag->pollset_set_count]);
1737 break;
1738 }
1739 }
1740 gpr_mu_unlock(&bag->mu);
1741}
1742
Sree Kuchibhotla2e12db92016-06-16 16:53:59 -07001743/* Test helper functions
1744 * */
1745void *grpc_fd_get_polling_island(grpc_fd *fd) {
1746 polling_island *pi;
1747
1748 gpr_mu_lock(&fd->pi_mu);
1749 pi = fd->polling_island;
1750 gpr_mu_unlock(&fd->pi_mu);
1751
1752 return pi;
1753}
1754
1755void *grpc_pollset_get_polling_island(grpc_pollset *ps) {
1756 polling_island *pi;
1757
Sree Kuchibhotla229533b12016-06-21 20:42:52 -07001758 gpr_mu_lock(&ps->mu);
Sree Kuchibhotla2e12db92016-06-16 16:53:59 -07001759 pi = ps->polling_island;
Sree Kuchibhotla229533b12016-06-21 20:42:52 -07001760 gpr_mu_unlock(&ps->mu);
Sree Kuchibhotla2e12db92016-06-16 16:53:59 -07001761
1762 return pi;
1763}
1764
Sree Kuchibhotla2e12db92016-06-16 16:53:59 -07001765bool grpc_are_polling_islands_equal(void *p, void *q) {
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001766 polling_island *p1 = p;
1767 polling_island *p2 = q;
1768
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001769 /* Note: polling_island_lock_pair() may change p1 and p2 to point to the
1770 latest polling islands in their respective linked lists */
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001771 polling_island_lock_pair(&p1, &p2);
Sree Kuchibhotla20d0a162016-06-23 15:14:03 -07001772 polling_island_unlock_pair(p1, p2);
Sree Kuchibhotla2f8ade02016-06-17 13:28:38 -07001773
1774 return p1 == p2;
Sree Kuchibhotla2e12db92016-06-16 16:53:59 -07001775}
1776
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001777/*******************************************************************************
Sree Kuchibhotla0bcbd792016-06-01 15:43:03 -07001778 * Event engine binding
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001779 */
1780
1781static void shutdown_engine(void) {
1782 fd_global_shutdown();
1783 pollset_global_shutdown();
Sree Kuchibhotlad627c102016-06-06 15:49:32 -07001784 polling_island_global_shutdown();
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001785}
1786
1787static const grpc_event_engine_vtable vtable = {
1788 .pollset_size = sizeof(grpc_pollset),
1789
1790 .fd_create = fd_create,
1791 .fd_wrapped_fd = fd_wrapped_fd,
1792 .fd_orphan = fd_orphan,
1793 .fd_shutdown = fd_shutdown,
Sree Kuchibhotla24b6eae2016-06-21 18:01:14 -07001794 .fd_is_shutdown = fd_is_shutdown,
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001795 .fd_notify_on_read = fd_notify_on_read,
1796 .fd_notify_on_write = fd_notify_on_write,
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001797 .fd_get_read_notifier_pollset = fd_get_read_notifier_pollset,
Craig Tiller70bd4832016-06-30 14:20:46 -07001798 .fd_get_workqueue = fd_get_workqueue,
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001799
1800 .pollset_init = pollset_init,
1801 .pollset_shutdown = pollset_shutdown,
1802 .pollset_reset = pollset_reset,
1803 .pollset_destroy = pollset_destroy,
1804 .pollset_work = pollset_work,
1805 .pollset_kick = pollset_kick,
1806 .pollset_add_fd = pollset_add_fd,
1807
1808 .pollset_set_create = pollset_set_create,
1809 .pollset_set_destroy = pollset_set_destroy,
1810 .pollset_set_add_pollset = pollset_set_add_pollset,
1811 .pollset_set_del_pollset = pollset_set_del_pollset,
1812 .pollset_set_add_pollset_set = pollset_set_add_pollset_set,
1813 .pollset_set_del_pollset_set = pollset_set_del_pollset_set,
1814 .pollset_set_add_fd = pollset_set_add_fd,
1815 .pollset_set_del_fd = pollset_set_del_fd,
1816
1817 .kick_poller = kick_poller,
1818
1819 .shutdown_engine = shutdown_engine,
1820};
1821
Sree Kuchibhotla72744022016-06-09 09:42:06 -07001822/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
1823 * Create a dummy epoll_fd to make sure epoll support is available */
1824static bool is_epoll_available() {
1825 int fd = epoll_create1(EPOLL_CLOEXEC);
1826 if (fd < 0) {
1827 gpr_log(
1828 GPR_ERROR,
1829 "epoll_create1 failed with error: %d. Not using epoll polling engine",
1830 fd);
1831 return false;
1832 }
1833 close(fd);
1834 return true;
1835}
1836
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001837const grpc_event_engine_vtable *grpc_init_epoll_linux(void) {
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -07001838 /* If use of signals is disabled, we cannot use epoll engine*/
1839 if (is_grpc_wakeup_signal_initialized && grpc_wakeup_signal < 0) {
1840 return NULL;
1841 }
1842
Sree Kuchibhotla72744022016-06-09 09:42:06 -07001843 if (!is_epoll_available()) {
1844 return NULL;
1845 }
1846
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -07001847 if (!is_grpc_wakeup_signal_initialized) {
1848 grpc_use_signal(SIGRTMIN + 2);
1849 }
1850
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001851 fd_global_init();
Sree Kuchibhotla3131c262016-06-21 17:28:28 -07001852
1853 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
1854 return NULL;
1855 }
1856
1857 if (!GRPC_LOG_IF_ERROR("polling_island_global_init",
1858 polling_island_global_init())) {
1859 return NULL;
1860 }
1861
Sree Kuchibhotlaf448c342016-05-19 10:51:24 -07001862 return &vtable;
1863}
1864
Sree Kuchibhotla41622a82016-06-13 16:43:14 -07001865#else /* defined(GPR_LINUX_EPOLL) */
1866#if defined(GPR_POSIX_SOCKET)
1867#include "src/core/lib/iomgr/ev_posix.h"
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001868/* If GPR_LINUX_EPOLL is not defined, it means epoll is not available. Return
1869 * NULL */
1870const grpc_event_engine_vtable *grpc_init_epoll_linux(void) { return NULL; }
Sree Kuchibhotla41622a82016-06-13 16:43:14 -07001871#endif /* defined(GPR_POSIX_SOCKET) */
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001872
Sree Kuchibhotlac7be7c62016-06-09 17:08:50 -07001873void grpc_use_signal(int signum) {}
Sree Kuchibhotla5855c472016-06-08 12:56:56 -07001874#endif /* !defined(GPR_LINUX_EPOLL) */