blob: 28981e8ca68e985e322857aba32815c1078c15ae [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
19#include "src/core/lib/iomgr/port.h"
20
yang-gceb24752017-11-07 12:06:37 -080021#include <grpc/support/log.h>
22
Craig Tillerc67cc992017-04-27 10:15:51 -070023/* This polling engine is only relevant on linux kernels supporting epoll() */
24#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000025#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070026
27#include <assert.h>
28#include <errno.h>
Craig Tiller20397792017-07-18 11:35:27 -070029#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070030#include <poll.h>
31#include <pthread.h>
32#include <string.h>
33#include <sys/epoll.h>
34#include <sys/socket.h>
35#include <unistd.h>
36
37#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070038#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070039#include <grpc/support/string_util.h>
40#include <grpc/support/tls.h>
41#include <grpc/support/useful.h>
42
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070043#include "src/core/lib/debug/stats.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070044#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070045#include "src/core/lib/iomgr/ev_posix.h"
46#include "src/core/lib/iomgr/iomgr_internal.h"
47#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070048#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070049#include "src/core/lib/profiling/timers.h"
Craig Tillerb89bac02017-05-26 15:20:32 +000050#include "src/core/lib/support/string.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070051
Craig Tillerc67cc992017-04-27 10:15:51 -070052static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070053
54/*******************************************************************************
55 * Singleton epoll set related fields
56 */
57
58#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070059#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070060
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070061/* NOTE ON SYNCHRONIZATION:
62 * - Fields in this struct are only modified by the designated poller. Hence
63 * there is no need for any locks to protect the struct.
64 * - num_events and cursor fields have to be of atomic type to provide memory
65 * visibility guarantees only. i.e In case of multiple pollers, the designated
66 * polling thread keeps changing; the thread that wrote these values may be
67 * different from the thread reading the values
68 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070069typedef struct epoll_set {
70 int epfd;
71
72 /* The epoll_events after the last call to epoll_wait() */
73 struct epoll_event events[MAX_EPOLL_EVENTS];
74
75 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070076 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070077
78 /* Index of the first event in epoll_events that has to be processed. This
79 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070080 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070081} epoll_set;
82
83/* The global singleton epoll set */
84static epoll_set g_epoll_set;
85
86/* Must be called *only* once */
87static bool epoll_set_init() {
88 g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC);
89 if (g_epoll_set.epfd < 0) {
90 gpr_log(GPR_ERROR, "epoll unavailable");
91 return false;
92 }
93
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070094 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
95 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
96 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070097 return true;
98}
99
100/* epoll_set_init() MUST be called before calling this. */
101static void epoll_set_shutdown() {
102 if (g_epoll_set.epfd >= 0) {
103 close(g_epoll_set.epfd);
104 g_epoll_set.epfd = -1;
105 }
106}
Craig Tillerc67cc992017-04-27 10:15:51 -0700107
108/*******************************************************************************
109 * Fd Declarations
110 */
111
112struct grpc_fd {
113 int fd;
114
Craig Tillerc67cc992017-04-27 10:15:51 -0700115 gpr_atm read_closure;
116 gpr_atm write_closure;
117
Craig Tillerbaa14a92017-11-03 09:09:36 -0700118 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700119
120 /* The pollset that last noticed that the fd is readable. The actual type
121 * stored in this is (grpc_pollset *) */
122 gpr_atm read_notifier_pollset;
123
124 grpc_iomgr_object iomgr_object;
125};
126
127static void fd_global_init(void);
128static void fd_global_shutdown(void);
129
130/*******************************************************************************
131 * Pollset Declarations
132 */
133
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700134typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700135
Craig Tillerbaa14a92017-11-03 09:09:36 -0700136static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700137 switch (st) {
138 case UNKICKED:
139 return "UNKICKED";
140 case KICKED:
141 return "KICKED";
142 case DESIGNATED_POLLER:
143 return "DESIGNATED_POLLER";
144 }
145 GPR_UNREACHABLE_CODE(return "UNKNOWN");
146}
147
Craig Tillerc67cc992017-04-27 10:15:51 -0700148struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700149 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700150 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700151 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700152 grpc_pollset_worker* next;
153 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700154 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700155 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700156};
157
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700158#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700159 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700160 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700161 (worker)->kick_state_mutator = __LINE__; \
162 } while (false)
163
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700164#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000165
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700166typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700167 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700168 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700169 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700170} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700171
Craig Tillerc67cc992017-04-27 10:15:51 -0700172struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700173 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700174 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700175 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700176 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000177 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700178
179 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700180 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700181 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700182 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700183 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700184
185 /* Number of workers who are *about-to* attach themselves to the pollset
186 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000187 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700188
Craig Tillerbaa14a92017-11-03 09:09:36 -0700189 grpc_pollset* next;
190 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700191};
192
193/*******************************************************************************
194 * Pollset-set Declarations
195 */
Craig Tiller6de05932017-04-28 09:17:38 -0700196
Craig Tiller61f96c12017-05-12 13:36:39 -0700197struct grpc_pollset_set {
198 char unused;
199};
Craig Tillerc67cc992017-04-27 10:15:51 -0700200
201/*******************************************************************************
202 * Common helpers
203 */
204
Craig Tillerbaa14a92017-11-03 09:09:36 -0700205static bool append_error(grpc_error** composite, grpc_error* error,
206 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700207 if (error == GRPC_ERROR_NONE) return true;
208 if (*composite == GRPC_ERROR_NONE) {
209 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
210 }
211 *composite = grpc_error_add_child(*composite, error);
212 return false;
213}
214
215/*******************************************************************************
216 * Fd Definitions
217 */
218
219/* We need to keep a freelist not because of any concerns of malloc performance
220 * but instead so that implementations with multiple threads in (for example)
221 * epoll_wait deal with the race between pollset removal and incoming poll
222 * notifications.
223 *
224 * The problem is that the poller ultimately holds a reference to this
225 * object, so it is very difficult to know when is safe to free it, at least
226 * without some expensive synchronization.
227 *
228 * If we keep the object freelisted, in the worst case losing this race just
229 * becomes a spurious read notification on a reused fd.
230 */
231
232/* The alarm system needs to be able to wakeup 'some poller' sometimes
233 * (specifically when a new alarm needs to be triggered earlier than the next
234 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
235 * case occurs. */
236
Craig Tillerbaa14a92017-11-03 09:09:36 -0700237static grpc_fd* fd_freelist = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700238static gpr_mu fd_freelist_mu;
239
Craig Tillerc67cc992017-04-27 10:15:51 -0700240static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
241
242static void fd_global_shutdown(void) {
243 gpr_mu_lock(&fd_freelist_mu);
244 gpr_mu_unlock(&fd_freelist_mu);
245 while (fd_freelist != NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700246 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700247 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700248 gpr_free(fd);
249 }
250 gpr_mu_destroy(&fd_freelist_mu);
251}
252
Craig Tillerbaa14a92017-11-03 09:09:36 -0700253static grpc_fd* fd_create(int fd, const char* name) {
254 grpc_fd* new_fd = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700255
256 gpr_mu_lock(&fd_freelist_mu);
257 if (fd_freelist != NULL) {
258 new_fd = fd_freelist;
259 fd_freelist = fd_freelist->freelist_next;
260 }
261 gpr_mu_unlock(&fd_freelist_mu);
262
263 if (new_fd == NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700264 new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
Craig Tillerc67cc992017-04-27 10:15:51 -0700265 }
266
Craig Tillerc67cc992017-04-27 10:15:51 -0700267 new_fd->fd = fd;
Craig Tillerc67cc992017-04-27 10:15:51 -0700268 grpc_lfev_init(&new_fd->read_closure);
269 grpc_lfev_init(&new_fd->write_closure);
270 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
271
272 new_fd->freelist_next = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700273
Craig Tillerbaa14a92017-11-03 09:09:36 -0700274 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700275 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
276 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700277#ifndef NDEBUG
278 if (GRPC_TRACER_ON(grpc_trace_fd_refcount)) {
279 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
280 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700281#endif
282 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000283
Yash Tibrewal533d1182017-09-18 10:48:22 -0700284 struct epoll_event ev;
285 ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
286 ev.data.ptr = new_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700287 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000288 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
289 }
290
Craig Tillerc67cc992017-04-27 10:15:51 -0700291 return new_fd;
292}
293
Craig Tillerbaa14a92017-11-03 09:09:36 -0700294static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700295
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700296/* if 'releasing_fd' is true, it means that we are going to detach the internal
297 * fd from grpc_fd structure (i.e which means we should not be calling
298 * shutdown() syscall on that fd) */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700299static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
300 grpc_error* why, bool releasing_fd) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000301 if (grpc_lfev_set_shutdown(exec_ctx, &fd->read_closure,
302 GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700303 if (!releasing_fd) {
304 shutdown(fd->fd, SHUT_RDWR);
305 }
Craig Tiller9ddb3152017-04-27 21:32:56 +0000306 grpc_lfev_set_shutdown(exec_ctx, &fd->write_closure, GRPC_ERROR_REF(why));
307 }
308 GRPC_ERROR_UNREF(why);
309}
310
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700311/* Might be called multiple times */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700312static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700313 fd_shutdown_internal(exec_ctx, fd, why, false);
314}
315
Craig Tillerbaa14a92017-11-03 09:09:36 -0700316static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
317 grpc_closure* on_done, int* release_fd,
318 bool already_closed, const char* reason) {
319 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700320 bool is_release_fd = (release_fd != NULL);
Craig Tillerc67cc992017-04-27 10:15:51 -0700321
Craig Tiller9ddb3152017-04-27 21:32:56 +0000322 if (!grpc_lfev_is_shutdown(&fd->read_closure)) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700323 fd_shutdown_internal(exec_ctx, fd,
324 GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
325 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000326 }
327
Craig Tillerc67cc992017-04-27 10:15:51 -0700328 /* If release_fd is not NULL, we should be relinquishing control of the file
329 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700330 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700331 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700332 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700333 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700334 }
335
ncteisen969b46e2017-06-08 14:57:11 -0700336 GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700337
Craig Tiller4509c472017-04-27 19:05:13 +0000338 grpc_iomgr_unregister_object(&fd->iomgr_object);
339 grpc_lfev_destroy(&fd->read_closure);
340 grpc_lfev_destroy(&fd->write_closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700341
Craig Tiller4509c472017-04-27 19:05:13 +0000342 gpr_mu_lock(&fd_freelist_mu);
343 fd->freelist_next = fd_freelist;
344 fd_freelist = fd;
345 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700346}
347
Craig Tillerbaa14a92017-11-03 09:09:36 -0700348static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
349 grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700350 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700351 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700352}
353
Craig Tillerbaa14a92017-11-03 09:09:36 -0700354static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700355 return grpc_lfev_is_shutdown(&fd->read_closure);
356}
357
Craig Tillerbaa14a92017-11-03 09:09:36 -0700358static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
359 grpc_closure* closure) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700360 grpc_lfev_notify_on(exec_ctx, &fd->read_closure, closure, "read");
Craig Tillerc67cc992017-04-27 10:15:51 -0700361}
362
Craig Tillerbaa14a92017-11-03 09:09:36 -0700363static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
364 grpc_closure* closure) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700365 grpc_lfev_notify_on(exec_ctx, &fd->write_closure, closure, "write");
Craig Tillerc67cc992017-04-27 10:15:51 -0700366}
367
Craig Tillerbaa14a92017-11-03 09:09:36 -0700368static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
369 grpc_pollset* notifier) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700370 grpc_lfev_set_ready(exec_ctx, &fd->read_closure, "read");
Craig Tiller4509c472017-04-27 19:05:13 +0000371 /* Use release store to match with acquire load in fd_get_read_notifier */
372 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
373}
374
Craig Tillerbaa14a92017-11-03 09:09:36 -0700375static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700376 grpc_lfev_set_ready(exec_ctx, &fd->write_closure, "write");
Craig Tillerc67cc992017-04-27 10:15:51 -0700377}
378
379/*******************************************************************************
380 * Pollset Definitions
381 */
382
Craig Tiller6de05932017-04-28 09:17:38 -0700383GPR_TLS_DECL(g_current_thread_pollset);
384GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700385
386/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700387static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700388
Craig Tillerbaa14a92017-11-03 09:09:36 -0700389static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700390static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700391
Craig Tillerc67cc992017-04-27 10:15:51 -0700392/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700393static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700394 if (pollset->root_worker == NULL) {
395 pollset->root_worker = worker;
396 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700397 return true;
398 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700399 worker->next = pollset->root_worker;
400 worker->prev = worker->next->prev;
401 worker->next->prev = worker;
402 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700403 return false;
404 }
405}
406
407/* Return true if last in list */
408typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
409
Craig Tillerbaa14a92017-11-03 09:09:36 -0700410static worker_remove_result worker_remove(grpc_pollset* pollset,
411 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700412 if (worker == pollset->root_worker) {
413 if (worker == worker->next) {
414 pollset->root_worker = NULL;
Craig Tillerc67cc992017-04-27 10:15:51 -0700415 return EMPTIED;
416 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700417 pollset->root_worker = worker->next;
418 worker->prev->next = worker->next;
419 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700420 return NEW_ROOT;
421 }
422 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700423 worker->prev->next = worker->next;
424 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700425 return REMOVED;
426 }
427}
428
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700429static size_t choose_neighborhood(void) {
430 return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000431}
432
Craig Tillerbaa14a92017-11-03 09:09:36 -0700433static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000434 gpr_tls_init(&g_current_thread_pollset);
435 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700436 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000437 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700438 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000439 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700440 struct epoll_event ev;
441 ev.events = (uint32_t)(EPOLLIN | EPOLLET);
442 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700443 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
444 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000445 return GRPC_OS_ERROR(errno, "epoll_ctl");
446 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700447 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700448 g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
449 g_num_neighborhoods);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700450 for (size_t i = 0; i < g_num_neighborhoods; i++) {
451 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700452 }
Craig Tiller4509c472017-04-27 19:05:13 +0000453 return GRPC_ERROR_NONE;
454}
455
456static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000457 gpr_tls_destroy(&g_current_thread_pollset);
458 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000459 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700460 for (size_t i = 0; i < g_num_neighborhoods; i++) {
461 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700462 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700463 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000464}
465
Craig Tillerbaa14a92017-11-03 09:09:36 -0700466static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700467 gpr_mu_init(&pollset->mu);
468 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700469 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
470 pollset->reassigning_neighborhood = false;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700471 pollset->root_worker = NULL;
472 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700473 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700474 pollset->shutting_down = false;
475 pollset->shutdown_closure = NULL;
476 pollset->begin_refs = 0;
477 pollset->next = pollset->prev = NULL;
Craig Tiller6de05932017-04-28 09:17:38 -0700478}
479
Craig Tillerbaa14a92017-11-03 09:09:36 -0700480static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000481 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000482 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700483 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000484 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700485 retry_lock_neighborhood:
486 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000487 gpr_mu_lock(&pollset->mu);
488 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700489 if (pollset->neighborhood != neighborhood) {
490 gpr_mu_unlock(&neighborhood->mu);
491 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000492 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700493 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000494 }
495 pollset->prev->next = pollset->next;
496 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700497 if (pollset == pollset->neighborhood->active_root) {
498 pollset->neighborhood->active_root =
Craig Tillere00d7332017-05-01 15:43:51 +0000499 pollset->next == pollset ? NULL : pollset->next;
500 }
Craig Tillerba550da2017-05-01 14:26:31 +0000501 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700502 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700503 }
Craig Tillere00d7332017-05-01 15:43:51 +0000504 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700505 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000506}
507
Craig Tillerbaa14a92017-11-03 09:09:36 -0700508static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
509 grpc_pollset* pollset) {
yang-gdf92a642017-08-21 22:38:45 -0700510 GPR_TIMER_BEGIN("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700511 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000512 if (pollset->root_worker != NULL) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700513 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000514 do {
Craig Tiller0ff222a2017-09-01 09:41:43 -0700515 GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700516 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700517 case KICKED:
Craig Tiller480f5d82017-09-13 09:36:07 -0700518 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -0700519 break;
520 case UNKICKED:
521 SET_KICK_STATE(worker, KICKED);
522 if (worker->initialized_cv) {
Craig Tillerebacb2f2017-09-13 12:32:33 -0700523 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -0700524 gpr_cv_signal(&worker->cv);
525 }
526 break;
527 case DESIGNATED_POLLER:
Craig Tiller480f5d82017-09-13 09:36:07 -0700528 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -0700529 SET_KICK_STATE(worker, KICKED);
530 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700531 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700532 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000533 }
534
Craig Tiller32f90ee2017-04-28 12:46:41 -0700535 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000536 } while (worker != pollset->root_worker);
537 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700538 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
539 // in the else case
yang-gdf92a642017-08-21 22:38:45 -0700540 GPR_TIMER_END("pollset_kick_all", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000541 return error;
542}
543
Craig Tillerbaa14a92017-11-03 09:09:36 -0700544static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
545 grpc_pollset* pollset) {
Craig Tillerba550da2017-05-01 14:26:31 +0000546 if (pollset->shutdown_closure != NULL && pollset->root_worker == NULL &&
547 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700548 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
ncteisen969b46e2017-06-08 14:57:11 -0700549 GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4509c472017-04-27 19:05:13 +0000550 pollset->shutdown_closure = NULL;
551 }
552}
553
Craig Tillerbaa14a92017-11-03 09:09:36 -0700554static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
555 grpc_closure* closure) {
yang-gdf92a642017-08-21 22:38:45 -0700556 GPR_TIMER_BEGIN("pollset_shutdown", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000557 GPR_ASSERT(pollset->shutdown_closure == NULL);
Craig Tillerc81512a2017-05-26 09:53:58 -0700558 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000559 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700560 pollset->shutting_down = true;
Craig Tiller0ff222a2017-09-01 09:41:43 -0700561 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
Craig Tiller4509c472017-04-27 19:05:13 +0000562 pollset_maybe_finish_shutdown(exec_ctx, pollset);
yang-gdf92a642017-08-21 22:38:45 -0700563 GPR_TIMER_END("pollset_shutdown", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000564}
565
Craig Tillerbaa14a92017-11-03 09:09:36 -0700566static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
Craig Tiller20397792017-07-18 11:35:27 -0700567 grpc_millis millis) {
568 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
569 grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700570 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700571 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700572 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000573 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700574 } else {
Craig Tiller20397792017-07-18 11:35:27 -0700575 return (int)delta;
Craig Tiller4509c472017-04-27 19:05:13 +0000576 }
Craig Tiller4509c472017-04-27 19:05:13 +0000577}
578
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700579/* Process the epoll events found by do_epoll_wait() function.
580 - g_epoll_set.cursor points to the index of the first event to be processed
581 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
582 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000583
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700584 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
585 called by g_active_poller thread. So there is no need for synchronization
586 when accessing fields in g_epoll_set */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700587static grpc_error* process_epoll_events(grpc_exec_ctx* exec_ctx,
588 grpc_pollset* pollset) {
589 static const char* err_desc = "process_events";
590 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700591
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700592 GPR_TIMER_BEGIN("process_epoll_events", 0);
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700593 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
594 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
595 for (int idx = 0;
596 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700597 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700598 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700599 struct epoll_event* ev = &g_epoll_set.events[c];
600 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700601
Craig Tiller4509c472017-04-27 19:05:13 +0000602 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000603 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
604 err_desc);
605 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700606 grpc_fd* fd = (grpc_fd*)(data_ptr);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700607 bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
608 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
609 bool write_ev = (ev->events & EPOLLOUT) != 0;
610
Craig Tiller4509c472017-04-27 19:05:13 +0000611 if (read_ev || cancel) {
612 fd_become_readable(exec_ctx, fd, pollset);
613 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700614
Craig Tiller4509c472017-04-27 19:05:13 +0000615 if (write_ev || cancel) {
616 fd_become_writable(exec_ctx, fd);
617 }
618 }
619 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700620 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700621 GPR_TIMER_END("process_epoll_events", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000622 return error;
623}
624
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700625/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
626 "process" any of the events yet; that is done in process_epoll_events().
627 *See process_epoll_events() function for more details.
628
629 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
630 (i.e the designated poller thread) will be calling this function. So there is
631 no need for any synchronization when accesing fields in g_epoll_set */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700632static grpc_error* do_epoll_wait(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
Craig Tiller20397792017-07-18 11:35:27 -0700633 grpc_millis deadline) {
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700634 GPR_TIMER_BEGIN("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000635
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700636 int r;
Craig Tiller20397792017-07-18 11:35:27 -0700637 int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000638 if (timeout != 0) {
639 GRPC_SCHEDULING_START_BLOCKING_REGION;
640 }
Craig Tiller4509c472017-04-27 19:05:13 +0000641 do {
Craig Tillerb4bb1cd2017-07-20 14:18:17 -0700642 GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700643 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
644 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000645 } while (r < 0 && errno == EINTR);
646 if (timeout != 0) {
Craig Tiller781e91a2017-07-17 16:21:00 -0700647 GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +0000648 }
649
650 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
651
Craig Tiller0ff222a2017-09-01 09:41:43 -0700652 GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
653
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700654 if (GRPC_TRACER_ON(grpc_polling_trace)) {
655 gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000656 }
657
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700658 gpr_atm_rel_store(&g_epoll_set.num_events, r);
659 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700660
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700661 GPR_TIMER_END("do_epoll_wait", 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700662 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000663}
664
Craig Tillerbaa14a92017-11-03 09:09:36 -0700665static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
666 grpc_pollset_worker* worker,
667 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700668 grpc_millis deadline) {
yang-gdf92a642017-08-21 22:38:45 -0700669 GPR_TIMER_BEGIN("begin_worker", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000670 if (worker_hdl != NULL) *worker_hdl = worker;
671 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700672 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700673 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000674 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000675
Craig Tiller830e82a2017-05-31 16:26:27 -0700676 if (GRPC_TRACER_ON(grpc_polling_trace)) {
677 gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
678 }
679
Craig Tiller32f90ee2017-04-28 12:46:41 -0700680 if (pollset->seen_inactive) {
681 // pollset has been observed to be inactive, we need to move back to the
682 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000683 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700684 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000685 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700686 pollset->reassigning_neighborhood = true;
687 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000688 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700689 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700690 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000691 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700692 retry_lock_neighborhood:
693 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700694 gpr_mu_lock(&pollset->mu);
Craig Tiller830e82a2017-05-31 16:26:27 -0700695 if (GRPC_TRACER_ON(grpc_polling_trace)) {
696 gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700697 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700698 is_reassigning);
699 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700700 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700701 if (neighborhood != pollset->neighborhood) {
702 gpr_mu_unlock(&neighborhood->mu);
703 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000704 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700705 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000706 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700707
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700708 /* In the brief time we released the pollset locks above, the worker MAY
709 have been kicked. In this case, the worker should get out of this
710 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700711 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700712
713 On a side note, the only way a worker's kick state could have changed
714 at this point is if it were "kicked specifically". Since the worker has
715 not added itself to the pollset yet (by calling worker_insert()), it is
716 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700717 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700718 pollset->seen_inactive = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700719 if (neighborhood->active_root == NULL) {
720 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700721 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700722 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700723 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
724 SET_KICK_STATE(worker, DESIGNATED_POLLER);
725 }
726 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700727 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700728 pollset->prev = pollset->next->prev;
729 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700730 }
Craig Tiller4509c472017-04-27 19:05:13 +0000731 }
732 }
Craig Tillere00d7332017-05-01 15:43:51 +0000733 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700734 GPR_ASSERT(pollset->reassigning_neighborhood);
735 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000736 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700737 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700738 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700739
Craig Tiller32f90ee2017-04-28 12:46:41 -0700740 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000741 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700742 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000743 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700744 worker->initialized_cv = true;
745 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700746 while (worker->state == UNKICKED && !pollset->shutting_down) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700747 if (GRPC_TRACER_ON(grpc_polling_trace)) {
748 gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700749 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700750 pollset->shutting_down);
751 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700752
Craig Tiller20397792017-07-18 11:35:27 -0700753 if (gpr_cv_wait(&worker->cv, &pollset->mu,
754 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700755 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700756 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
757 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700758 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700759 }
Craig Tillerba550da2017-05-01 14:26:31 +0000760 }
Craig Tiller20397792017-07-18 11:35:27 -0700761 grpc_exec_ctx_invalidate_now(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +0000762 }
763
Craig Tiller830e82a2017-05-31 16:26:27 -0700764 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700765 gpr_log(GPR_ERROR,
766 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
767 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700768 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700769 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700770 }
Craig Tiller4509c472017-04-27 19:05:13 +0000771
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700772 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700773 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700774 * 2. When doing gpr_cv_wait()
775 * It is possible that 'kicked_without_poller' was set to true during (1) and
776 * 'shutting_down' is set to true during (1) or (2). If either of them is
777 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700778 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
779 * case; especially when the worker is the DESIGNATED_POLLER */
780
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700781 if (pollset->kicked_without_poller) {
782 pollset->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700783 GPR_TIMER_END("begin_worker", 0);
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700784 return false;
785 }
786
yang-gdf92a642017-08-21 22:38:45 -0700787 GPR_TIMER_END("begin_worker", 0);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700788 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000789}
790
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700791static bool check_neighborhood_for_available_poller(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700792 grpc_exec_ctx* exec_ctx, pollset_neighborhood* neighborhood) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700793 GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700794 bool found_worker = false;
795 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700796 grpc_pollset* inspect = neighborhood->active_root;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700797 if (inspect == NULL) {
798 break;
799 }
800 gpr_mu_lock(&inspect->mu);
801 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700802 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700803 if (inspect_worker != NULL) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000804 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700805 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000806 case UNKICKED:
807 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
808 (gpr_atm)inspect_worker)) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700809 if (GRPC_TRACER_ON(grpc_polling_trace)) {
810 gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
811 inspect_worker);
812 }
Craig Tiller55624a32017-05-26 08:14:44 -0700813 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000814 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700815 GPR_TIMER_MARK("signal worker", 0);
Craig Tillercf34fa52017-09-13 12:37:01 -0700816 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tillerba550da2017-05-01 14:26:31 +0000817 gpr_cv_signal(&inspect_worker->cv);
818 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700819 } else {
820 if (GRPC_TRACER_ON(grpc_polling_trace)) {
821 gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
822 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000823 }
Craig Tillerba550da2017-05-01 14:26:31 +0000824 // even if we didn't win the cas, there's a worker, we can stop
825 found_worker = true;
826 break;
827 case KICKED:
828 break;
829 case DESIGNATED_POLLER:
830 found_worker = true; // ok, so someone else found the worker, but
831 // we'll accept that
832 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700833 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000834 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700835 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000836 }
837 if (!found_worker) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700838 if (GRPC_TRACER_ON(grpc_polling_trace)) {
839 gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
840 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700841 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700842 if (inspect == neighborhood->active_root) {
843 neighborhood->active_root =
Craig Tillera95bacf2017-05-01 12:51:24 -0700844 inspect->next == inspect ? NULL : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000845 }
846 inspect->next->prev = inspect->prev;
847 inspect->prev->next = inspect->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000848 inspect->next = inspect->prev = NULL;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700849 }
850 gpr_mu_unlock(&inspect->mu);
851 } while (!found_worker);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700852 GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700853 return found_worker;
854}
855
Craig Tillerbaa14a92017-11-03 09:09:36 -0700856static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
857 grpc_pollset_worker* worker,
858 grpc_pollset_worker** worker_hdl) {
yang-gdf92a642017-08-21 22:38:45 -0700859 GPR_TIMER_BEGIN("end_worker", 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700860 if (GRPC_TRACER_ON(grpc_polling_trace)) {
861 gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
862 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700863 if (worker_hdl != NULL) *worker_hdl = NULL;
Craig Tiller830e82a2017-05-31 16:26:27 -0700864 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700865 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700866 grpc_closure_list_move(&worker->schedule_on_end_work,
867 &exec_ctx->closure_list);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700868 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700869 if (worker->next != worker && worker->next->state == UNKICKED) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700870 if (GRPC_TRACER_ON(grpc_polling_trace)) {
871 gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
872 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000873 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700874 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700875 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Craig Tiller1a012bb2017-09-13 14:29:00 -0700876 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700877 gpr_cv_signal(&worker->next->cv);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700878 if (grpc_exec_ctx_has_work(exec_ctx)) {
879 gpr_mu_unlock(&pollset->mu);
880 grpc_exec_ctx_flush(exec_ctx);
881 gpr_mu_lock(&pollset->mu);
882 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700883 } else {
884 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700885 size_t poller_neighborhood_idx =
886 (size_t)(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000887 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700888 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700889 bool scan_state[MAX_NEIGHBORHOODS];
890 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700891 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700892 &g_neighborhoods[(poller_neighborhood_idx + i) %
893 g_num_neighborhoods];
894 if (gpr_mu_trylock(&neighborhood->mu)) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700895 found_worker =
Craig Tiller64f8b122017-09-13 12:39:21 -0700896 check_neighborhood_for_available_poller(exec_ctx, neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700897 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000898 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700899 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000900 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700901 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700902 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700903 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000904 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700905 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700906 &g_neighborhoods[(poller_neighborhood_idx + i) %
907 g_num_neighborhoods];
908 gpr_mu_lock(&neighborhood->mu);
Craig Tillercf34fa52017-09-13 12:37:01 -0700909 found_worker =
Craig Tiller64f8b122017-09-13 12:39:21 -0700910 check_neighborhood_for_available_poller(exec_ctx, neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700911 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700912 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700913 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700914 gpr_mu_lock(&pollset->mu);
915 }
Craig Tiller50da5ec2017-05-01 13:51:14 -0700916 } else if (grpc_exec_ctx_has_work(exec_ctx)) {
917 gpr_mu_unlock(&pollset->mu);
918 grpc_exec_ctx_flush(exec_ctx);
919 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000920 }
921 if (worker->initialized_cv) {
922 gpr_cv_destroy(&worker->cv);
923 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700924 if (GRPC_TRACER_ON(grpc_polling_trace)) {
925 gpr_log(GPR_DEBUG, " .. remove worker");
926 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700927 if (EMPTIED == worker_remove(pollset, worker)) {
Craig Tiller4509c472017-04-27 19:05:13 +0000928 pollset_maybe_finish_shutdown(exec_ctx, pollset);
929 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000930 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
yang-gdf92a642017-08-21 22:38:45 -0700931 GPR_TIMER_END("end_worker", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000932}
933
934/* pollset->po.mu lock must be held by the caller before calling this.
935 The function pollset_work() may temporarily release the lock (pollset->po.mu)
936 during the course of its execution but it will always re-acquire the lock and
937 ensure that it is held by the time the function returns */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700938static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
939 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700940 grpc_millis deadline) {
Craig Tiller4509c472017-04-27 19:05:13 +0000941 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700942 grpc_error* error = GRPC_ERROR_NONE;
943 static const char* err_desc = "pollset_work";
yang-gdf92a642017-08-21 22:38:45 -0700944 GPR_TIMER_BEGIN("pollset_work", 0);
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700945 if (ps->kicked_without_poller) {
946 ps->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700947 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000948 return GRPC_ERROR_NONE;
949 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700950
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700951 if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700952 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000953 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700954 GPR_ASSERT(!ps->shutting_down);
955 GPR_ASSERT(!ps->seen_inactive);
956
957 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700958 /* This is the designated polling thread at this point and should ideally do
959 polling. However, if there are unprocessed events left from a previous
960 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
961 process the pending epoll events.
962
963 The reason for decoupling do_epoll_wait and process_epoll_events is to
964 better distrubute the work (i.e handling epoll events) across multiple
965 threads
966
967 process_epoll_events() returns very quickly: It just queues the work on
968 exec_ctx but does not execute it (the actual exectution or more
969 accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
970 a designated poller). So we are not waiting long periods without a
971 designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700972 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
973 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700974 append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700975 }
976 append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
977
978 gpr_mu_lock(&ps->mu); /* lock */
979
Craig Tiller4509c472017-04-27 19:05:13 +0000980 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700981 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700982 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000983 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700984 end_worker(exec_ctx, ps, &worker, worker_hdl);
985
Craig Tiller8502ecb2017-04-28 14:22:01 -0700986 gpr_tls_set(&g_current_thread_pollset, 0);
yang-gdf92a642017-08-21 22:38:45 -0700987 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000988 return error;
989}
990
Craig Tillerbaa14a92017-11-03 09:09:36 -0700991static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
992 grpc_pollset_worker* specific_worker) {
yang-gdf92a642017-08-21 22:38:45 -0700993 GPR_TIMER_BEGIN("pollset_kick", 0);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700994 GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700995 grpc_error* ret_err = GRPC_ERROR_NONE;
Craig Tillerb89bac02017-05-26 15:20:32 +0000996 if (GRPC_TRACER_ON(grpc_polling_trace)) {
997 gpr_strvec log;
998 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700999 char* tmp;
1000 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
1001 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
1002 (void*)gpr_tls_get(&g_current_thread_worker),
1003 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +00001004 gpr_strvec_add(&log, tmp);
1005 if (pollset->root_worker != NULL) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001006 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001007 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001008 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001009 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001010 gpr_strvec_add(&log, tmp);
1011 }
1012 if (specific_worker != NULL) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001013 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001014 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001015 gpr_strvec_add(&log, tmp);
1016 }
1017 tmp = gpr_strvec_flatten(&log, NULL);
1018 gpr_strvec_destroy(&log);
Craig Tiller830e82a2017-05-31 16:26:27 -07001019 gpr_log(GPR_ERROR, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001020 gpr_free(tmp);
1021 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001022
Craig Tiller4509c472017-04-27 19:05:13 +00001023 if (specific_worker == NULL) {
1024 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001025 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller375eb252017-04-27 23:29:12 +00001026 if (root_worker == NULL) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001027 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +00001028 pollset->kicked_without_poller = true;
Craig Tiller75aef7f2017-05-26 08:26:08 -07001029 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001030 gpr_log(GPR_ERROR, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001031 }
yang-gdf92a642017-08-21 22:38:45 -07001032 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001033 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001034 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001035 if (root_worker->state == KICKED) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001036 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001037 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001038 gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
1039 }
1040 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001041 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001042 } else if (next_worker->state == KICKED) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001043 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Craig Tiller830e82a2017-05-31 16:26:27 -07001044 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1045 gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
1046 }
1047 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001048 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001049 } else if (root_worker ==
1050 next_worker && // only try and wake up a poller if
1051 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001052 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001053 &g_active_poller)) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001054 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
Craig Tiller830e82a2017-05-31 16:26:27 -07001055 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1056 gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001057 }
Craig Tiller55624a32017-05-26 08:14:44 -07001058 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001059 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1060 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001061 } else if (next_worker->state == UNKICKED) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001062 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001063 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001064 gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001065 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001066 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001067 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001068 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001069 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001070 } else if (next_worker->state == DESIGNATED_POLLER) {
1071 if (root_worker->state != DESIGNATED_POLLER) {
Craig Tiller75aef7f2017-05-26 08:26:08 -07001072 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001073 gpr_log(
1074 GPR_ERROR,
1075 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1076 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001077 }
Craig Tiller55624a32017-05-26 08:14:44 -07001078 SET_KICK_STATE(root_worker, KICKED);
1079 if (root_worker->initialized_cv) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001080 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -07001081 gpr_cv_signal(&root_worker->cv);
1082 }
yang-gdf92a642017-08-21 22:38:45 -07001083 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001084 } else {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001085 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001086 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001087 gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001088 root_worker);
1089 }
Craig Tiller55624a32017-05-26 08:14:44 -07001090 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001091 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1092 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001093 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001094 } else {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001095 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001096 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001097 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001098 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001099 }
1100 } else {
Craig Tiller1a012bb2017-09-13 14:29:00 -07001101 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
Craig Tiller830e82a2017-05-31 16:26:27 -07001102 if (GRPC_TRACER_ON(grpc_polling_trace)) {
1103 gpr_log(GPR_ERROR, " .. kicked while waking up");
1104 }
yang-gdf92a642017-08-21 22:38:45 -07001105 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001106 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001107
1108 GPR_UNREACHABLE_CODE(goto done);
1109 }
1110
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001111 if (specific_worker->state == KICKED) {
Craig Tiller75aef7f2017-05-26 08:26:08 -07001112 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001113 gpr_log(GPR_ERROR, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001114 }
yang-gdf92a642017-08-21 22:38:45 -07001115 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001116 } else if (gpr_tls_get(&g_current_thread_worker) ==
1117 (intptr_t)specific_worker) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001118 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001119 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001120 gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001121 }
Craig Tiller55624a32017-05-26 08:14:44 -07001122 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001123 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001124 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001125 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001126 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001127 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001128 gpr_log(GPR_ERROR, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001129 }
Craig Tiller55624a32017-05-26 08:14:44 -07001130 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001131 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1132 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001133 } else if (specific_worker->initialized_cv) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001134 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001135 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001136 gpr_log(GPR_ERROR, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001137 }
Craig Tiller55624a32017-05-26 08:14:44 -07001138 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001139 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001140 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001141 } else {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001142 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001143 if (GRPC_TRACER_ON(grpc_polling_trace)) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001144 gpr_log(GPR_ERROR, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001145 }
Craig Tiller55624a32017-05-26 08:14:44 -07001146 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001147 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001148 }
yang-gdf92a642017-08-21 22:38:45 -07001149done:
1150 GPR_TIMER_END("pollset_kick", 0);
1151 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001152}
1153
Craig Tillerbaa14a92017-11-03 09:09:36 -07001154static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
1155 grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001156
Craig Tiller4509c472017-04-27 19:05:13 +00001157/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001158 * Pollset-set Definitions
1159 */
1160
Craig Tillerbaa14a92017-11-03 09:09:36 -07001161static grpc_pollset_set* pollset_set_create(void) {
1162 return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
Craig Tillerc67cc992017-04-27 10:15:51 -07001163}
1164
Craig Tillerbaa14a92017-11-03 09:09:36 -07001165static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
1166 grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001167
Craig Tillerbaa14a92017-11-03 09:09:36 -07001168static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
1169 grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001170
Craig Tillerbaa14a92017-11-03 09:09:36 -07001171static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
1172 grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001173
Craig Tillerbaa14a92017-11-03 09:09:36 -07001174static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
1175 grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001176
Craig Tillerbaa14a92017-11-03 09:09:36 -07001177static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
1178 grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001179
Craig Tillerbaa14a92017-11-03 09:09:36 -07001180static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
1181 grpc_pollset_set* bag,
1182 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001183
Craig Tillerbaa14a92017-11-03 09:09:36 -07001184static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
1185 grpc_pollset_set* bag,
1186 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001187
1188/*******************************************************************************
1189 * Event engine binding
1190 */
1191
1192static void shutdown_engine(void) {
1193 fd_global_shutdown();
1194 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001195 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001196}
1197
1198static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001199 sizeof(grpc_pollset),
Craig Tillerc67cc992017-04-27 10:15:51 -07001200
Yash Tibrewal533d1182017-09-18 10:48:22 -07001201 fd_create,
1202 fd_wrapped_fd,
1203 fd_orphan,
1204 fd_shutdown,
1205 fd_notify_on_read,
1206 fd_notify_on_write,
1207 fd_is_shutdown,
1208 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001209
Yash Tibrewal533d1182017-09-18 10:48:22 -07001210 pollset_init,
1211 pollset_shutdown,
1212 pollset_destroy,
1213 pollset_work,
1214 pollset_kick,
1215 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001216
Yash Tibrewal533d1182017-09-18 10:48:22 -07001217 pollset_set_create,
1218 pollset_set_destroy,
1219 pollset_set_add_pollset,
1220 pollset_set_del_pollset,
1221 pollset_set_add_pollset_set,
1222 pollset_set_del_pollset_set,
1223 pollset_set_add_fd,
1224 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001225
Yash Tibrewal533d1182017-09-18 10:48:22 -07001226 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001227};
1228
1229/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001230 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1231 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001232const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001233 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001234 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tillerc67cc992017-04-27 10:15:51 -07001235 return NULL;
1236 }
1237
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001238 if (!epoll_set_init()) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001239 return NULL;
1240 }
1241
Craig Tillerc67cc992017-04-27 10:15:51 -07001242 fd_global_init();
1243
1244 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001245 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001246 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001247 return NULL;
1248 }
1249
1250 return &vtable;
1251}
1252
1253#else /* defined(GRPC_LINUX_EPOLL) */
1254#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001255#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001256/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1257 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001258const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
yang-g30101b02017-11-06 14:35:30 -08001259 gpr_log(GPR_ERROR,
yang-g56e72572017-11-06 15:54:48 -08001260 "Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
Craig Tiller9ddb3152017-04-27 21:32:56 +00001261 return NULL;
1262}
Craig Tillerc67cc992017-04-27 10:15:51 -07001263#endif /* defined(GRPC_POSIX_SOCKET) */
1264#endif /* !defined(GRPC_LINUX_EPOLL) */