blob: 4b4c954d27cede7696e36ff6577ad13c167424e3 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
Alexander Polcyndb3e8982018-02-21 16:59:24 -080019#include <grpc/support/port_platform.h>
20
Craig Tillerc67cc992017-04-27 10:15:51 -070021#include "src/core/lib/iomgr/port.h"
22
yang-gceb24752017-11-07 12:06:37 -080023#include <grpc/support/log.h>
24
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080025/* This polling engine is only relevant on linux kernels supporting epoll
26 epoll_create() or epoll_create1() */
Mehrdad Afsharifb669002018-01-17 15:37:56 -080027#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000028#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070029
30#include <assert.h>
31#include <errno.h>
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080032#include <fcntl.h>
Craig Tiller20397792017-07-18 11:35:27 -070033#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070034#include <poll.h>
35#include <pthread.h>
36#include <string.h>
37#include <sys/epoll.h>
38#include <sys/socket.h>
39#include <unistd.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070042#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070043#include <grpc/support/string_util.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070044
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070045#include "src/core/lib/debug/stats.h"
Mark D. Rothdbdf4952018-01-18 11:21:12 -080046#include "src/core/lib/gpr/string.h"
Vijay Paib6cf1232018-01-25 21:02:26 -080047#include "src/core/lib/gpr/tls.h"
Vijay Paid4d0a302018-01-25 13:24:03 -080048#include "src/core/lib/gpr/useful.h"
Mark D. Roth4f2b0fd2018-01-19 12:12:23 -080049#include "src/core/lib/gprpp/manual_constructor.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070050#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070051#include "src/core/lib/iomgr/ev_posix.h"
52#include "src/core/lib/iomgr/iomgr_internal.h"
53#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070054#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070055#include "src/core/lib/profiling/timers.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070056
Craig Tillerc67cc992017-04-27 10:15:51 -070057static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070058
59/*******************************************************************************
60 * Singleton epoll set related fields
61 */
62
63#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070064#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070065
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070066/* NOTE ON SYNCHRONIZATION:
67 * - Fields in this struct are only modified by the designated poller. Hence
68 * there is no need for any locks to protect the struct.
69 * - num_events and cursor fields have to be of atomic type to provide memory
70 * visibility guarantees only. i.e In case of multiple pollers, the designated
71 * polling thread keeps changing; the thread that wrote these values may be
72 * different from the thread reading the values
73 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070074typedef struct epoll_set {
75 int epfd;
76
77 /* The epoll_events after the last call to epoll_wait() */
78 struct epoll_event events[MAX_EPOLL_EVENTS];
79
80 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070081 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070082
83 /* Index of the first event in epoll_events that has to be processed. This
84 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070085 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070086} epoll_set;
87
88/* The global singleton epoll set */
89static epoll_set g_epoll_set;
90
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080091static int epoll_create_and_cloexec() {
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080092#ifdef GRPC_LINUX_EPOLL_CREATE1
93 int fd = epoll_create1(EPOLL_CLOEXEC);
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080094 if (fd < 0) {
95 gpr_log(GPR_ERROR, "epoll_create1 unavailable");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080096 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080097#else
98 int fd = epoll_create(MAX_EPOLL_EVENTS);
99 if (fd < 0) {
100 gpr_log(GPR_ERROR, "epoll_create unavailable");
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800101 } else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
102 gpr_log(GPR_ERROR, "fcntl following epoll_create failed");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800103 return -1;
104 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800105#endif
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800106 return fd;
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800107}
108
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700109/* Must be called *only* once */
110static bool epoll_set_init() {
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800111 g_epoll_set.epfd = epoll_create_and_cloexec();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700112 if (g_epoll_set.epfd < 0) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700113 return false;
114 }
115
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700116 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
117 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
118 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700119 return true;
120}
121
122/* epoll_set_init() MUST be called before calling this. */
123static void epoll_set_shutdown() {
124 if (g_epoll_set.epfd >= 0) {
125 close(g_epoll_set.epfd);
126 g_epoll_set.epfd = -1;
127 }
128}
Craig Tillerc67cc992017-04-27 10:15:51 -0700129
130/*******************************************************************************
131 * Fd Declarations
132 */
133
134struct grpc_fd {
135 int fd;
136
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800137 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
138 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700139
Craig Tillerbaa14a92017-11-03 09:09:36 -0700140 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700141
142 /* The pollset that last noticed that the fd is readable. The actual type
143 * stored in this is (grpc_pollset *) */
144 gpr_atm read_notifier_pollset;
145
146 grpc_iomgr_object iomgr_object;
147};
148
149static void fd_global_init(void);
150static void fd_global_shutdown(void);
151
152/*******************************************************************************
153 * Pollset Declarations
154 */
155
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700156typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700157
Craig Tillerbaa14a92017-11-03 09:09:36 -0700158static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700159 switch (st) {
160 case UNKICKED:
161 return "UNKICKED";
162 case KICKED:
163 return "KICKED";
164 case DESIGNATED_POLLER:
165 return "DESIGNATED_POLLER";
166 }
167 GPR_UNREACHABLE_CODE(return "UNKNOWN");
168}
169
Craig Tillerc67cc992017-04-27 10:15:51 -0700170struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700171 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700172 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700173 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700174 grpc_pollset_worker* next;
175 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700176 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700177 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700178};
179
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700180#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700181 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700182 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700183 (worker)->kick_state_mutator = __LINE__; \
184 } while (false)
185
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700186#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000187
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700188typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700189 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700190 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700191 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700192} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700193
Craig Tillerc67cc992017-04-27 10:15:51 -0700194struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700195 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700196 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700197 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700198 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000199 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700200
201 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700202 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700203 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700204 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700205 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700206
207 /* Number of workers who are *about-to* attach themselves to the pollset
208 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000209 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700210
Craig Tillerbaa14a92017-11-03 09:09:36 -0700211 grpc_pollset* next;
212 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700213};
214
215/*******************************************************************************
216 * Pollset-set Declarations
217 */
Craig Tiller6de05932017-04-28 09:17:38 -0700218
Craig Tiller61f96c12017-05-12 13:36:39 -0700219struct grpc_pollset_set {
220 char unused;
221};
Craig Tillerc67cc992017-04-27 10:15:51 -0700222
223/*******************************************************************************
224 * Common helpers
225 */
226
Craig Tillerbaa14a92017-11-03 09:09:36 -0700227static bool append_error(grpc_error** composite, grpc_error* error,
228 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700229 if (error == GRPC_ERROR_NONE) return true;
230 if (*composite == GRPC_ERROR_NONE) {
231 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
232 }
233 *composite = grpc_error_add_child(*composite, error);
234 return false;
235}
236
237/*******************************************************************************
238 * Fd Definitions
239 */
240
241/* We need to keep a freelist not because of any concerns of malloc performance
242 * but instead so that implementations with multiple threads in (for example)
243 * epoll_wait deal with the race between pollset removal and incoming poll
244 * notifications.
245 *
246 * The problem is that the poller ultimately holds a reference to this
247 * object, so it is very difficult to know when is safe to free it, at least
248 * without some expensive synchronization.
249 *
250 * If we keep the object freelisted, in the worst case losing this race just
251 * becomes a spurious read notification on a reused fd.
252 */
253
254/* The alarm system needs to be able to wakeup 'some poller' sometimes
255 * (specifically when a new alarm needs to be triggered earlier than the next
256 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
257 * case occurs. */
258
Craig Tiller4782d922017-11-10 09:53:21 -0800259static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700260static gpr_mu fd_freelist_mu;
261
Craig Tillerc67cc992017-04-27 10:15:51 -0700262static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
263
264static void fd_global_shutdown(void) {
265 gpr_mu_lock(&fd_freelist_mu);
266 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800267 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700268 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700269 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700270 gpr_free(fd);
271 }
272 gpr_mu_destroy(&fd_freelist_mu);
273}
274
Craig Tillerbaa14a92017-11-03 09:09:36 -0700275static grpc_fd* fd_create(int fd, const char* name) {
Craig Tiller4782d922017-11-10 09:53:21 -0800276 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700277
278 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800279 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700280 new_fd = fd_freelist;
281 fd_freelist = fd_freelist->freelist_next;
282 }
283 gpr_mu_unlock(&fd_freelist_mu);
284
Craig Tiller4782d922017-11-10 09:53:21 -0800285 if (new_fd == nullptr) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800286 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
yang-g26521b32017-11-17 17:15:37 -0800287 new_fd->read_closure.Init();
288 new_fd->write_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700289 }
290
Craig Tillerc67cc992017-04-27 10:15:51 -0700291 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800292 new_fd->read_closure->InitEvent();
293 new_fd->write_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700294 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
295
Craig Tiller4782d922017-11-10 09:53:21 -0800296 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700297
Craig Tillerbaa14a92017-11-03 09:09:36 -0700298 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700299 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
300 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700301#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800302 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700303 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
304 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700305#endif
306 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000307
Yash Tibrewal533d1182017-09-18 10:48:22 -0700308 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800309 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
Yash Tibrewal533d1182017-09-18 10:48:22 -0700310 ev.data.ptr = new_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700311 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000312 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
313 }
314
Craig Tillerc67cc992017-04-27 10:15:51 -0700315 return new_fd;
316}
317
Craig Tillerbaa14a92017-11-03 09:09:36 -0700318static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700319
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700320/* if 'releasing_fd' is true, it means that we are going to detach the internal
321 * fd from grpc_fd structure (i.e which means we should not be calling
322 * shutdown() syscall on that fd) */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800323static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
324 bool releasing_fd) {
325 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700326 if (!releasing_fd) {
327 shutdown(fd->fd, SHUT_RDWR);
328 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800329 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000330 }
331 GRPC_ERROR_UNREF(why);
332}
333
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700334/* Might be called multiple times */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800335static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
336 fd_shutdown_internal(fd, why, false);
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700337}
338
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800339static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700340 bool already_closed, const char* reason) {
341 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800342 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700343
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800344 if (!fd->read_closure->IsShutdown()) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800345 fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700346 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000347 }
348
Craig Tillerc67cc992017-04-27 10:15:51 -0700349 /* If release_fd is not NULL, we should be relinquishing control of the file
350 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700351 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700352 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700353 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700354 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700355 }
356
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800357 GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700358
Craig Tiller4509c472017-04-27 19:05:13 +0000359 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800360 fd->read_closure->DestroyEvent();
361 fd->write_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700362
Craig Tiller4509c472017-04-27 19:05:13 +0000363 gpr_mu_lock(&fd_freelist_mu);
364 fd->freelist_next = fd_freelist;
365 fd_freelist = fd;
366 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700367}
368
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800369static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700370 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700371 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700372}
373
Craig Tillerbaa14a92017-11-03 09:09:36 -0700374static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800375 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700376}
377
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800378static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
379 fd->read_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700380}
381
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800382static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
383 fd->write_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700384}
385
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800386static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
387 fd->read_closure->SetReady();
Craig Tiller4509c472017-04-27 19:05:13 +0000388 /* Use release store to match with acquire load in fd_get_read_notifier */
389 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
390}
391
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800392static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
Craig Tillerc67cc992017-04-27 10:15:51 -0700393
394/*******************************************************************************
395 * Pollset Definitions
396 */
397
Craig Tiller6de05932017-04-28 09:17:38 -0700398GPR_TLS_DECL(g_current_thread_pollset);
399GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700400
401/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700402static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700403
Craig Tillerbaa14a92017-11-03 09:09:36 -0700404static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700405static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700406
Craig Tillerc67cc992017-04-27 10:15:51 -0700407/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700408static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800409 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700410 pollset->root_worker = worker;
411 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700412 return true;
413 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700414 worker->next = pollset->root_worker;
415 worker->prev = worker->next->prev;
416 worker->next->prev = worker;
417 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700418 return false;
419 }
420}
421
422/* Return true if last in list */
423typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
424
Craig Tillerbaa14a92017-11-03 09:09:36 -0700425static worker_remove_result worker_remove(grpc_pollset* pollset,
426 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700427 if (worker == pollset->root_worker) {
428 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800429 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700430 return EMPTIED;
431 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700432 pollset->root_worker = worker->next;
433 worker->prev->next = worker->next;
434 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700435 return NEW_ROOT;
436 }
437 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700438 worker->prev->next = worker->next;
439 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700440 return REMOVED;
441 }
442}
443
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700444static size_t choose_neighborhood(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800445 return static_cast<size_t>(gpr_cpu_current_cpu()) % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000446}
447
Craig Tillerbaa14a92017-11-03 09:09:36 -0700448static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000449 gpr_tls_init(&g_current_thread_pollset);
450 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700451 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000452 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700453 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000454 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700455 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800456 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
Yash Tibrewal533d1182017-09-18 10:48:22 -0700457 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700458 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
459 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000460 return GRPC_OS_ERROR(errno, "epoll_ctl");
461 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700462 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Noah Eisen4d20a662018-02-09 09:34:04 -0800463 g_neighborhoods = static_cast<pollset_neighborhood*>(
464 gpr_zalloc(sizeof(*g_neighborhoods) * g_num_neighborhoods));
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700465 for (size_t i = 0; i < g_num_neighborhoods; i++) {
466 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700467 }
Craig Tiller4509c472017-04-27 19:05:13 +0000468 return GRPC_ERROR_NONE;
469}
470
471static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000472 gpr_tls_destroy(&g_current_thread_pollset);
473 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000474 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700475 for (size_t i = 0; i < g_num_neighborhoods; i++) {
476 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700477 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700478 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000479}
480
Craig Tillerbaa14a92017-11-03 09:09:36 -0700481static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700482 gpr_mu_init(&pollset->mu);
483 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700484 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
485 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800486 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700487 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700488 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700489 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800490 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700491 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800492 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700493}
494
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800495static void pollset_destroy(grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000496 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000497 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700498 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000499 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700500 retry_lock_neighborhood:
501 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000502 gpr_mu_lock(&pollset->mu);
503 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700504 if (pollset->neighborhood != neighborhood) {
505 gpr_mu_unlock(&neighborhood->mu);
506 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000507 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700508 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000509 }
510 pollset->prev->next = pollset->next;
511 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700512 if (pollset == pollset->neighborhood->active_root) {
513 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800514 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000515 }
Craig Tillerba550da2017-05-01 14:26:31 +0000516 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700517 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700518 }
Craig Tillere00d7332017-05-01 15:43:51 +0000519 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700520 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000521}
522
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800523static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800524 GPR_TIMER_SCOPE("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700525 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800526 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700527 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000528 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800529 GRPC_STATS_INC_POLLSET_KICK();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700530 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700531 case KICKED:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800532 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Craig Tiller55624a32017-05-26 08:14:44 -0700533 break;
534 case UNKICKED:
535 SET_KICK_STATE(worker, KICKED);
536 if (worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800537 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -0700538 gpr_cv_signal(&worker->cv);
539 }
540 break;
541 case DESIGNATED_POLLER:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800542 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
Craig Tiller55624a32017-05-26 08:14:44 -0700543 SET_KICK_STATE(worker, KICKED);
544 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700545 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700546 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000547 }
548
Craig Tiller32f90ee2017-04-28 12:46:41 -0700549 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000550 } while (worker != pollset->root_worker);
551 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700552 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
553 // in the else case
Craig Tiller4509c472017-04-27 19:05:13 +0000554 return error;
555}
556
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800557static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800558 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000559 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700560 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800561 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800562 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000563 }
564}
565
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800566static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
yang-gce1cfea2018-01-31 15:59:50 -0800567 GPR_TIMER_SCOPE("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800568 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700569 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000570 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700571 pollset->shutting_down = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800572 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
573 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000574}
575
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800576static int poll_deadline_to_millis_timeout(grpc_millis millis) {
Craig Tiller20397792017-07-18 11:35:27 -0700577 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800578 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700579 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700580 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700581 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000582 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700583 } else {
Noah Eisenbe82e642018-02-09 09:16:55 -0800584 return static_cast<int>(delta);
Craig Tiller4509c472017-04-27 19:05:13 +0000585 }
Craig Tiller4509c472017-04-27 19:05:13 +0000586}
587
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700588/* Process the epoll events found by do_epoll_wait() function.
589 - g_epoll_set.cursor points to the index of the first event to be processed
590 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
591 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000592
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700593 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
594 called by g_active_poller thread. So there is no need for synchronization
595 when accessing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800596static grpc_error* process_epoll_events(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800597 GPR_TIMER_SCOPE("process_epoll_events", 0);
598
Craig Tillerbaa14a92017-11-03 09:09:36 -0700599 static const char* err_desc = "process_events";
600 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700601 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
602 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
603 for (int idx = 0;
604 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700605 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700606 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700607 struct epoll_event* ev = &g_epoll_set.events[c];
608 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700609
Craig Tiller4509c472017-04-27 19:05:13 +0000610 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000611 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
612 err_desc);
613 } else {
Noah Eisenbe82e642018-02-09 09:16:55 -0800614 grpc_fd* fd = static_cast<grpc_fd*>(data_ptr);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700615 bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
616 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
617 bool write_ev = (ev->events & EPOLLOUT) != 0;
618
Craig Tiller4509c472017-04-27 19:05:13 +0000619 if (read_ev || cancel) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800620 fd_become_readable(fd, pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000621 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700622
Craig Tiller4509c472017-04-27 19:05:13 +0000623 if (write_ev || cancel) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800624 fd_become_writable(fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000625 }
626 }
627 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700628 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Craig Tiller4509c472017-04-27 19:05:13 +0000629 return error;
630}
631
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700632/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
633 "process" any of the events yet; that is done in process_epoll_events().
634 *See process_epoll_events() function for more details.
635
636 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
637 (i.e the designated poller thread) will be calling this function. So there is
638 no need for any synchronization when accesing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800639static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800640 GPR_TIMER_SCOPE("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000641
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700642 int r;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800643 int timeout = poll_deadline_to_millis_timeout(deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000644 if (timeout != 0) {
645 GRPC_SCHEDULING_START_BLOCKING_REGION;
646 }
Craig Tiller4509c472017-04-27 19:05:13 +0000647 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800648 GRPC_STATS_INC_SYSCALL_POLL();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700649 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
650 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000651 } while (r < 0 && errno == EINTR);
652 if (timeout != 0) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800653 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller4509c472017-04-27 19:05:13 +0000654 }
655
656 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
657
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800658 GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700659
ncteisen3cffe1f2017-11-10 13:56:23 -0800660 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700661 gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000662 }
663
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700664 gpr_atm_rel_store(&g_epoll_set.num_events, r);
665 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700666
667 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000668}
669
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800670static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700671 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700672 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800673 GPR_TIMER_SCOPE("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800674 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000675 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700676 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700677 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000678 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000679
ncteisen3cffe1f2017-11-10 13:56:23 -0800680 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800681 gpr_log(GPR_DEBUG, "PS:%p BEGIN_STARTS:%p", pollset, worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700682 }
683
Craig Tiller32f90ee2017-04-28 12:46:41 -0700684 if (pollset->seen_inactive) {
685 // pollset has been observed to be inactive, we need to move back to the
686 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000687 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700688 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000689 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700690 pollset->reassigning_neighborhood = true;
691 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000692 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700693 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700694 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000695 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700696 retry_lock_neighborhood:
697 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700698 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800699 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800700 gpr_log(GPR_DEBUG, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700701 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700702 is_reassigning);
703 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700704 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700705 if (neighborhood != pollset->neighborhood) {
706 gpr_mu_unlock(&neighborhood->mu);
707 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000708 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700709 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000710 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700711
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700712 /* In the brief time we released the pollset locks above, the worker MAY
713 have been kicked. In this case, the worker should get out of this
714 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700715 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700716
717 On a side note, the only way a worker's kick state could have changed
718 at this point is if it were "kicked specifically". Since the worker has
719 not added itself to the pollset yet (by calling worker_insert()), it is
720 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700721 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700722 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800723 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700724 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700725 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700726 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700727 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
728 SET_KICK_STATE(worker, DESIGNATED_POLLER);
729 }
730 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700731 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700732 pollset->prev = pollset->next->prev;
733 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700734 }
Craig Tiller4509c472017-04-27 19:05:13 +0000735 }
736 }
Craig Tillere00d7332017-05-01 15:43:51 +0000737 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700738 GPR_ASSERT(pollset->reassigning_neighborhood);
739 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000740 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700741 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700742 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700743
Craig Tiller32f90ee2017-04-28 12:46:41 -0700744 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000745 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700746 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000747 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700748 worker->initialized_cv = true;
749 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700750 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800751 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800752 gpr_log(GPR_DEBUG, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700753 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700754 pollset->shutting_down);
755 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700756
Craig Tiller20397792017-07-18 11:35:27 -0700757 if (gpr_cv_wait(&worker->cv, &pollset->mu,
Sree Kuchibhotla54961bb2017-12-04 12:50:27 -0800758 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700759 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700760 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
761 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700762 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700763 }
Craig Tillerba550da2017-05-01 14:26:31 +0000764 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800765 grpc_core::ExecCtx::Get()->InvalidateNow();
Craig Tiller4509c472017-04-27 19:05:13 +0000766 }
767
ncteisen3cffe1f2017-11-10 13:56:23 -0800768 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800769 gpr_log(GPR_DEBUG,
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700770 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
771 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700772 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700773 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700774 }
Craig Tiller4509c472017-04-27 19:05:13 +0000775
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700776 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700777 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700778 * 2. When doing gpr_cv_wait()
779 * It is possible that 'kicked_without_poller' was set to true during (1) and
780 * 'shutting_down' is set to true during (1) or (2). If either of them is
781 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700782 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
783 * case; especially when the worker is the DESIGNATED_POLLER */
784
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700785 if (pollset->kicked_without_poller) {
786 pollset->kicked_without_poller = false;
787 return false;
788 }
789
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700790 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000791}
792
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700793static bool check_neighborhood_for_available_poller(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800794 pollset_neighborhood* neighborhood) {
yang-gce1cfea2018-01-31 15:59:50 -0800795 GPR_TIMER_SCOPE("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700796 bool found_worker = false;
797 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700798 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800799 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700800 break;
801 }
802 gpr_mu_lock(&inspect->mu);
803 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700804 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800805 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000806 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700807 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000808 case UNKICKED:
809 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
810 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800811 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700812 gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
813 inspect_worker);
814 }
Craig Tiller55624a32017-05-26 08:14:44 -0700815 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000816 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700817 GPR_TIMER_MARK("signal worker", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800818 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tillerba550da2017-05-01 14:26:31 +0000819 gpr_cv_signal(&inspect_worker->cv);
820 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700821 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800822 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700823 gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
824 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000825 }
Craig Tillerba550da2017-05-01 14:26:31 +0000826 // even if we didn't win the cas, there's a worker, we can stop
827 found_worker = true;
828 break;
829 case KICKED:
830 break;
831 case DESIGNATED_POLLER:
832 found_worker = true; // ok, so someone else found the worker, but
833 // we'll accept that
834 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700835 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000836 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700837 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000838 }
839 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800840 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700841 gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
842 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700843 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700844 if (inspect == neighborhood->active_root) {
845 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800846 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000847 }
848 inspect->next->prev = inspect->prev;
849 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800850 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700851 }
852 gpr_mu_unlock(&inspect->mu);
853 } while (!found_worker);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700854 return found_worker;
855}
856
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800857static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700858 grpc_pollset_worker** worker_hdl) {
yang-gce1cfea2018-01-31 15:59:50 -0800859 GPR_TIMER_SCOPE("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800860 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700861 gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
862 }
Craig Tiller4782d922017-11-10 09:53:21 -0800863 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700864 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700865 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700866 grpc_closure_list_move(&worker->schedule_on_end_work,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800867 grpc_core::ExecCtx::Get()->closure_list());
Craig Tiller8502ecb2017-04-28 14:22:01 -0700868 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700869 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800870 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700871 gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
872 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000873 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700874 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700875 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800876 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700877 gpr_cv_signal(&worker->next->cv);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800878 if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700879 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800880 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller8502ecb2017-04-28 14:22:01 -0700881 gpr_mu_lock(&pollset->mu);
882 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700883 } else {
884 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700885 size_t poller_neighborhood_idx =
Noah Eisenbe82e642018-02-09 09:16:55 -0800886 static_cast<size_t>(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000887 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700888 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700889 bool scan_state[MAX_NEIGHBORHOODS];
890 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700891 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700892 &g_neighborhoods[(poller_neighborhood_idx + i) %
893 g_num_neighborhoods];
894 if (gpr_mu_trylock(&neighborhood->mu)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800895 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700896 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000897 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700898 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000899 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700900 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700901 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700902 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000903 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700904 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700905 &g_neighborhoods[(poller_neighborhood_idx + i) %
906 g_num_neighborhoods];
907 gpr_mu_lock(&neighborhood->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800908 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700909 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700910 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800911 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700912 gpr_mu_lock(&pollset->mu);
913 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800914 } else if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller50da5ec2017-05-01 13:51:14 -0700915 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800916 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller50da5ec2017-05-01 13:51:14 -0700917 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000918 }
919 if (worker->initialized_cv) {
920 gpr_cv_destroy(&worker->cv);
921 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800922 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700923 gpr_log(GPR_DEBUG, " .. remove worker");
924 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700925 if (EMPTIED == worker_remove(pollset, worker)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800926 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000927 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000928 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000929}
930
931/* pollset->po.mu lock must be held by the caller before calling this.
932 The function pollset_work() may temporarily release the lock (pollset->po.mu)
933 during the course of its execution but it will always re-acquire the lock and
934 ensure that it is held by the time the function returns */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800935static grpc_error* pollset_work(grpc_pollset* ps,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700936 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700937 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800938 GPR_TIMER_SCOPE("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000939 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700940 grpc_error* error = GRPC_ERROR_NONE;
941 static const char* err_desc = "pollset_work";
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700942 if (ps->kicked_without_poller) {
943 ps->kicked_without_poller = false;
Craig Tiller4509c472017-04-27 19:05:13 +0000944 return GRPC_ERROR_NONE;
945 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700946
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800947 if (begin_worker(ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700948 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000949 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700950 GPR_ASSERT(!ps->shutting_down);
951 GPR_ASSERT(!ps->seen_inactive);
952
953 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700954 /* This is the designated polling thread at this point and should ideally do
955 polling. However, if there are unprocessed events left from a previous
956 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
957 process the pending epoll events.
958
959 The reason for decoupling do_epoll_wait and process_epoll_events is to
960 better distrubute the work (i.e handling epoll events) across multiple
961 threads
962
963 process_epoll_events() returns very quickly: It just queues the work on
964 exec_ctx but does not execute it (the actual exectution or more
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800965 accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
966 AFTER selecting a designated poller). So we are not waiting long periods
967 without a designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700968 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
969 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800970 append_error(&error, do_epoll_wait(ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700971 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800972 append_error(&error, process_epoll_events(ps), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700973
974 gpr_mu_lock(&ps->mu); /* lock */
975
Craig Tiller4509c472017-04-27 19:05:13 +0000976 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700977 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700978 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000979 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800980 end_worker(ps, &worker, worker_hdl);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700981
Craig Tiller8502ecb2017-04-28 14:22:01 -0700982 gpr_tls_set(&g_current_thread_pollset, 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000983 return error;
984}
985
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800986static grpc_error* pollset_kick(grpc_pollset* pollset,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700987 grpc_pollset_worker* specific_worker) {
yang-gce1cfea2018-01-31 15:59:50 -0800988 GPR_TIMER_SCOPE("pollset_kick", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800989 GRPC_STATS_INC_POLLSET_KICK();
Craig Tillerbaa14a92017-11-03 09:09:36 -0700990 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -0800991 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +0000992 gpr_strvec log;
993 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700994 char* tmp;
995 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
996 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
997 (void*)gpr_tls_get(&g_current_thread_worker),
998 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +0000999 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -08001000 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001001 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001002 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001003 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001004 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001005 gpr_strvec_add(&log, tmp);
1006 }
Craig Tiller4782d922017-11-10 09:53:21 -08001007 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001008 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001009 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001010 gpr_strvec_add(&log, tmp);
1011 }
Craig Tiller4782d922017-11-10 09:53:21 -08001012 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001013 gpr_strvec_destroy(&log);
yang-g69b4e4c2018-01-24 14:36:20 -08001014 gpr_log(GPR_DEBUG, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001015 gpr_free(tmp);
1016 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001017
Craig Tiller4782d922017-11-10 09:53:21 -08001018 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001019 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001020 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001021 if (root_worker == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001022 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
Craig Tiller4509c472017-04-27 19:05:13 +00001023 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001024 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001025 gpr_log(GPR_DEBUG, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001026 }
yang-gdf92a642017-08-21 22:38:45 -07001027 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001028 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001029 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001030 if (root_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001031 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001032 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001033 gpr_log(GPR_DEBUG, " .. already kicked %p", root_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001034 }
1035 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001036 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001037 } else if (next_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001038 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001039 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001040 gpr_log(GPR_DEBUG, " .. already kicked %p", next_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001041 }
1042 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001043 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001044 } else if (root_worker ==
1045 next_worker && // only try and wake up a poller if
1046 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001047 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001048 &g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001049 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001050 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001051 gpr_log(GPR_DEBUG, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001052 }
Craig Tiller55624a32017-05-26 08:14:44 -07001053 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001054 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1055 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001056 } else if (next_worker->state == UNKICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001057 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001058 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001059 gpr_log(GPR_DEBUG, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001060 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001061 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001062 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001063 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001064 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001065 } else if (next_worker->state == DESIGNATED_POLLER) {
1066 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001067 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001068 gpr_log(
yang-g69b4e4c2018-01-24 14:36:20 -08001069 GPR_DEBUG,
Craig Tiller830e82a2017-05-31 16:26:27 -07001070 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1071 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001072 }
Craig Tiller55624a32017-05-26 08:14:44 -07001073 SET_KICK_STATE(root_worker, KICKED);
1074 if (root_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001075 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -07001076 gpr_cv_signal(&root_worker->cv);
1077 }
yang-gdf92a642017-08-21 22:38:45 -07001078 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001079 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001080 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001081 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001082 gpr_log(GPR_DEBUG, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001083 root_worker);
1084 }
Craig Tiller55624a32017-05-26 08:14:44 -07001085 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001086 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1087 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001088 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001089 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001090 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001091 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001092 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001093 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001094 }
1095 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001096 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001097 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001098 gpr_log(GPR_DEBUG, " .. kicked while waking up");
Craig Tiller830e82a2017-05-31 16:26:27 -07001099 }
yang-gdf92a642017-08-21 22:38:45 -07001100 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001101 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001102
1103 GPR_UNREACHABLE_CODE(goto done);
1104 }
1105
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001106 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001107 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001108 gpr_log(GPR_DEBUG, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001109 }
yang-gdf92a642017-08-21 22:38:45 -07001110 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001111 } else if (gpr_tls_get(&g_current_thread_worker) ==
1112 (intptr_t)specific_worker) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001113 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001114 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001115 gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001116 }
Craig Tiller55624a32017-05-26 08:14:44 -07001117 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001118 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001119 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001120 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001121 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001122 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001123 gpr_log(GPR_DEBUG, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001124 }
Craig Tiller55624a32017-05-26 08:14:44 -07001125 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001126 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1127 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001128 } else if (specific_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001129 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001130 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001131 gpr_log(GPR_DEBUG, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001132 }
Craig Tiller55624a32017-05-26 08:14:44 -07001133 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001134 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001135 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001136 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001137 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001138 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001139 gpr_log(GPR_DEBUG, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001140 }
Craig Tiller55624a32017-05-26 08:14:44 -07001141 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001142 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001143 }
yang-gdf92a642017-08-21 22:38:45 -07001144done:
yang-gdf92a642017-08-21 22:38:45 -07001145 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001146}
1147
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001148static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001149
Craig Tiller4509c472017-04-27 19:05:13 +00001150/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001151 * Pollset-set Definitions
1152 */
1153
Craig Tillerbaa14a92017-11-03 09:09:36 -07001154static grpc_pollset_set* pollset_set_create(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001155 return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
Craig Tillerc67cc992017-04-27 10:15:51 -07001156}
1157
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001158static void pollset_set_destroy(grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001159
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001160static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001161
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001162static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001163
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001164static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001165
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001166static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001167
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001168static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001169 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001170
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001171static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001172 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001173
1174/*******************************************************************************
1175 * Event engine binding
1176 */
1177
1178static void shutdown_engine(void) {
1179 fd_global_shutdown();
1180 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001181 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001182}
1183
1184static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001185 sizeof(grpc_pollset),
Craig Tillerc67cc992017-04-27 10:15:51 -07001186
Yash Tibrewal533d1182017-09-18 10:48:22 -07001187 fd_create,
1188 fd_wrapped_fd,
1189 fd_orphan,
1190 fd_shutdown,
1191 fd_notify_on_read,
1192 fd_notify_on_write,
1193 fd_is_shutdown,
1194 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001195
Yash Tibrewal533d1182017-09-18 10:48:22 -07001196 pollset_init,
1197 pollset_shutdown,
1198 pollset_destroy,
1199 pollset_work,
1200 pollset_kick,
1201 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001202
Yash Tibrewal533d1182017-09-18 10:48:22 -07001203 pollset_set_create,
1204 pollset_set_destroy,
1205 pollset_set_add_pollset,
1206 pollset_set_del_pollset,
1207 pollset_set_add_pollset_set,
1208 pollset_set_del_pollset_set,
1209 pollset_set_add_fd,
1210 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001211
Yash Tibrewal533d1182017-09-18 10:48:22 -07001212 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001213};
1214
1215/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001216 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1217 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001218const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001219 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001220 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001221 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001222 }
1223
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001224 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001225 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001226 }
1227
Craig Tillerc67cc992017-04-27 10:15:51 -07001228 fd_global_init();
1229
1230 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001231 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001232 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001233 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001234 }
1235
1236 return &vtable;
1237}
1238
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001239#else /* defined(GRPC_LINUX_EPOLL) */
Muxi Yan67ff4052018-05-15 12:36:10 -07001240#if defined(GRPC_POSIX_SOCKET_EV_EPOLL1)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001241#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001242/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1243 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001244const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001245 return nullptr;
Craig Tiller9ddb3152017-04-27 21:32:56 +00001246}
Craig Tillerc67cc992017-04-27 10:15:51 -07001247#endif /* defined(GRPC_POSIX_SOCKET) */
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001248#endif /* !defined(GRPC_LINUX_EPOLL) */