blob: 1cb0150f450142263d701b19320f73844fc9dcbd [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
19#include "src/core/lib/iomgr/port.h"
20
yang-gceb24752017-11-07 12:06:37 -080021#include <grpc/support/log.h>
22
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080023/* This polling engine is only relevant on linux kernels supporting epoll
24 epoll_create() or epoll_create1() */
Mehrdad Afsharifb669002018-01-17 15:37:56 -080025#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000026#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070027
28#include <assert.h>
29#include <errno.h>
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080030#include <fcntl.h>
Craig Tiller20397792017-07-18 11:35:27 -070031#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070032#include <poll.h>
33#include <pthread.h>
34#include <string.h>
35#include <sys/epoll.h>
36#include <sys/socket.h>
37#include <unistd.h>
38
39#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070040#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070041#include <grpc/support/string_util.h>
42#include <grpc/support/tls.h>
43#include <grpc/support/useful.h>
44
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070045#include "src/core/lib/debug/stats.h"
Mark D. Rothdbdf4952018-01-18 11:21:12 -080046#include "src/core/lib/gpr++/manual_constructor.h"
47#include "src/core/lib/gpr/string.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070048#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070049#include "src/core/lib/iomgr/ev_posix.h"
50#include "src/core/lib/iomgr/iomgr_internal.h"
51#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070052#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070053#include "src/core/lib/profiling/timers.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070054
Craig Tillerc67cc992017-04-27 10:15:51 -070055static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070056
57/*******************************************************************************
58 * Singleton epoll set related fields
59 */
60
61#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070062#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070063
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070064/* NOTE ON SYNCHRONIZATION:
65 * - Fields in this struct are only modified by the designated poller. Hence
66 * there is no need for any locks to protect the struct.
67 * - num_events and cursor fields have to be of atomic type to provide memory
68 * visibility guarantees only. i.e In case of multiple pollers, the designated
69 * polling thread keeps changing; the thread that wrote these values may be
70 * different from the thread reading the values
71 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070072typedef struct epoll_set {
73 int epfd;
74
75 /* The epoll_events after the last call to epoll_wait() */
76 struct epoll_event events[MAX_EPOLL_EVENTS];
77
78 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070079 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070080
81 /* Index of the first event in epoll_events that has to be processed. This
82 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070083 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070084} epoll_set;
85
86/* The global singleton epoll set */
87static epoll_set g_epoll_set;
88
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080089static int epoll_create_and_cloexec() {
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080090#ifdef GRPC_LINUX_EPOLL_CREATE1
91 int fd = epoll_create1(EPOLL_CLOEXEC);
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080092 if (fd < 0) {
93 gpr_log(GPR_ERROR, "epoll_create1 unavailable");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080094 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080095#else
96 int fd = epoll_create(MAX_EPOLL_EVENTS);
97 if (fd < 0) {
98 gpr_log(GPR_ERROR, "epoll_create unavailable");
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080099 } else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
100 gpr_log(GPR_ERROR, "fcntl following epoll_create failed");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800101 return -1;
102 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800103#endif
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800104 return fd;
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800105}
106
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700107/* Must be called *only* once */
108static bool epoll_set_init() {
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800109 g_epoll_set.epfd = epoll_create_and_cloexec();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700110 if (g_epoll_set.epfd < 0) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700111 return false;
112 }
113
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700114 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
115 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
116 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700117 return true;
118}
119
120/* epoll_set_init() MUST be called before calling this. */
121static void epoll_set_shutdown() {
122 if (g_epoll_set.epfd >= 0) {
123 close(g_epoll_set.epfd);
124 g_epoll_set.epfd = -1;
125 }
126}
Craig Tillerc67cc992017-04-27 10:15:51 -0700127
128/*******************************************************************************
129 * Fd Declarations
130 */
131
132struct grpc_fd {
133 int fd;
134
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800135 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
136 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700137
Craig Tillerbaa14a92017-11-03 09:09:36 -0700138 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700139
140 /* The pollset that last noticed that the fd is readable. The actual type
141 * stored in this is (grpc_pollset *) */
142 gpr_atm read_notifier_pollset;
143
144 grpc_iomgr_object iomgr_object;
145};
146
147static void fd_global_init(void);
148static void fd_global_shutdown(void);
149
150/*******************************************************************************
151 * Pollset Declarations
152 */
153
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700154typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700155
Craig Tillerbaa14a92017-11-03 09:09:36 -0700156static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700157 switch (st) {
158 case UNKICKED:
159 return "UNKICKED";
160 case KICKED:
161 return "KICKED";
162 case DESIGNATED_POLLER:
163 return "DESIGNATED_POLLER";
164 }
165 GPR_UNREACHABLE_CODE(return "UNKNOWN");
166}
167
Craig Tillerc67cc992017-04-27 10:15:51 -0700168struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700169 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700170 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700171 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700172 grpc_pollset_worker* next;
173 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700174 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700175 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700176};
177
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700178#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700179 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700180 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700181 (worker)->kick_state_mutator = __LINE__; \
182 } while (false)
183
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700184#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000185
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700186typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700187 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700188 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700189 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700190} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700191
Craig Tillerc67cc992017-04-27 10:15:51 -0700192struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700193 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700194 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700195 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700196 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000197 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700198
199 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700200 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700201 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700202 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700203 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700204
205 /* Number of workers who are *about-to* attach themselves to the pollset
206 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000207 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700208
Craig Tillerbaa14a92017-11-03 09:09:36 -0700209 grpc_pollset* next;
210 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700211};
212
213/*******************************************************************************
214 * Pollset-set Declarations
215 */
Craig Tiller6de05932017-04-28 09:17:38 -0700216
Craig Tiller61f96c12017-05-12 13:36:39 -0700217struct grpc_pollset_set {
218 char unused;
219};
Craig Tillerc67cc992017-04-27 10:15:51 -0700220
221/*******************************************************************************
222 * Common helpers
223 */
224
Craig Tillerbaa14a92017-11-03 09:09:36 -0700225static bool append_error(grpc_error** composite, grpc_error* error,
226 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700227 if (error == GRPC_ERROR_NONE) return true;
228 if (*composite == GRPC_ERROR_NONE) {
229 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
230 }
231 *composite = grpc_error_add_child(*composite, error);
232 return false;
233}
234
235/*******************************************************************************
236 * Fd Definitions
237 */
238
239/* We need to keep a freelist not because of any concerns of malloc performance
240 * but instead so that implementations with multiple threads in (for example)
241 * epoll_wait deal with the race between pollset removal and incoming poll
242 * notifications.
243 *
244 * The problem is that the poller ultimately holds a reference to this
245 * object, so it is very difficult to know when is safe to free it, at least
246 * without some expensive synchronization.
247 *
248 * If we keep the object freelisted, in the worst case losing this race just
249 * becomes a spurious read notification on a reused fd.
250 */
251
252/* The alarm system needs to be able to wakeup 'some poller' sometimes
253 * (specifically when a new alarm needs to be triggered earlier than the next
254 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
255 * case occurs. */
256
Craig Tiller4782d922017-11-10 09:53:21 -0800257static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700258static gpr_mu fd_freelist_mu;
259
Craig Tillerc67cc992017-04-27 10:15:51 -0700260static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
261
262static void fd_global_shutdown(void) {
263 gpr_mu_lock(&fd_freelist_mu);
264 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800265 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700266 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700267 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700268 gpr_free(fd);
269 }
270 gpr_mu_destroy(&fd_freelist_mu);
271}
272
Craig Tillerbaa14a92017-11-03 09:09:36 -0700273static grpc_fd* fd_create(int fd, const char* name) {
Craig Tiller4782d922017-11-10 09:53:21 -0800274 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700275
276 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800277 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700278 new_fd = fd_freelist;
279 fd_freelist = fd_freelist->freelist_next;
280 }
281 gpr_mu_unlock(&fd_freelist_mu);
282
Craig Tiller4782d922017-11-10 09:53:21 -0800283 if (new_fd == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700284 new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
yang-g26521b32017-11-17 17:15:37 -0800285 new_fd->read_closure.Init();
286 new_fd->write_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700287 }
288
Craig Tillerc67cc992017-04-27 10:15:51 -0700289 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800290 new_fd->read_closure->InitEvent();
291 new_fd->write_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700292 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
293
Craig Tiller4782d922017-11-10 09:53:21 -0800294 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700295
Craig Tillerbaa14a92017-11-03 09:09:36 -0700296 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700297 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
298 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700299#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800300 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700301 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
302 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700303#endif
304 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000305
Yash Tibrewal533d1182017-09-18 10:48:22 -0700306 struct epoll_event ev;
307 ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
308 ev.data.ptr = new_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700309 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000310 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
311 }
312
Craig Tillerc67cc992017-04-27 10:15:51 -0700313 return new_fd;
314}
315
Craig Tillerbaa14a92017-11-03 09:09:36 -0700316static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700317
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700318/* if 'releasing_fd' is true, it means that we are going to detach the internal
319 * fd from grpc_fd structure (i.e which means we should not be calling
320 * shutdown() syscall on that fd) */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800321static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
322 bool releasing_fd) {
323 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700324 if (!releasing_fd) {
325 shutdown(fd->fd, SHUT_RDWR);
326 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800327 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000328 }
329 GRPC_ERROR_UNREF(why);
330}
331
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700332/* Might be called multiple times */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800333static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
334 fd_shutdown_internal(fd, why, false);
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700335}
336
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800337static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700338 bool already_closed, const char* reason) {
339 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800340 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700341
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800342 if (!fd->read_closure->IsShutdown()) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800343 fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700344 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000345 }
346
Craig Tillerc67cc992017-04-27 10:15:51 -0700347 /* If release_fd is not NULL, we should be relinquishing control of the file
348 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700349 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700350 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700351 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700352 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700353 }
354
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800355 GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700356
Craig Tiller4509c472017-04-27 19:05:13 +0000357 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800358 fd->read_closure->DestroyEvent();
359 fd->write_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700360
Craig Tiller4509c472017-04-27 19:05:13 +0000361 gpr_mu_lock(&fd_freelist_mu);
362 fd->freelist_next = fd_freelist;
363 fd_freelist = fd;
364 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700365}
366
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800367static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700368 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700369 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700370}
371
Craig Tillerbaa14a92017-11-03 09:09:36 -0700372static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800373 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700374}
375
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800376static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
377 fd->read_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700378}
379
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800380static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
381 fd->write_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700382}
383
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800384static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
385 fd->read_closure->SetReady();
Craig Tiller4509c472017-04-27 19:05:13 +0000386 /* Use release store to match with acquire load in fd_get_read_notifier */
387 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
388}
389
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800390static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
Craig Tillerc67cc992017-04-27 10:15:51 -0700391
392/*******************************************************************************
393 * Pollset Definitions
394 */
395
Craig Tiller6de05932017-04-28 09:17:38 -0700396GPR_TLS_DECL(g_current_thread_pollset);
397GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700398
399/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700400static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700401
Craig Tillerbaa14a92017-11-03 09:09:36 -0700402static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700403static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700404
Craig Tillerc67cc992017-04-27 10:15:51 -0700405/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700406static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800407 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700408 pollset->root_worker = worker;
409 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700410 return true;
411 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700412 worker->next = pollset->root_worker;
413 worker->prev = worker->next->prev;
414 worker->next->prev = worker;
415 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700416 return false;
417 }
418}
419
420/* Return true if last in list */
421typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
422
Craig Tillerbaa14a92017-11-03 09:09:36 -0700423static worker_remove_result worker_remove(grpc_pollset* pollset,
424 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700425 if (worker == pollset->root_worker) {
426 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800427 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700428 return EMPTIED;
429 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700430 pollset->root_worker = worker->next;
431 worker->prev->next = worker->next;
432 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700433 return NEW_ROOT;
434 }
435 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700436 worker->prev->next = worker->next;
437 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700438 return REMOVED;
439 }
440}
441
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700442static size_t choose_neighborhood(void) {
443 return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000444}
445
Craig Tillerbaa14a92017-11-03 09:09:36 -0700446static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000447 gpr_tls_init(&g_current_thread_pollset);
448 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700449 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000450 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700451 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000452 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700453 struct epoll_event ev;
454 ev.events = (uint32_t)(EPOLLIN | EPOLLET);
455 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700456 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
457 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000458 return GRPC_OS_ERROR(errno, "epoll_ctl");
459 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700460 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700461 g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
462 g_num_neighborhoods);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700463 for (size_t i = 0; i < g_num_neighborhoods; i++) {
464 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700465 }
Craig Tiller4509c472017-04-27 19:05:13 +0000466 return GRPC_ERROR_NONE;
467}
468
469static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000470 gpr_tls_destroy(&g_current_thread_pollset);
471 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000472 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700473 for (size_t i = 0; i < g_num_neighborhoods; i++) {
474 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700475 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700476 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000477}
478
Craig Tillerbaa14a92017-11-03 09:09:36 -0700479static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700480 gpr_mu_init(&pollset->mu);
481 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700482 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
483 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800484 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700485 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700486 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700487 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800488 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700489 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800490 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700491}
492
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800493static void pollset_destroy(grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000494 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000495 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700496 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000497 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700498 retry_lock_neighborhood:
499 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000500 gpr_mu_lock(&pollset->mu);
501 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700502 if (pollset->neighborhood != neighborhood) {
503 gpr_mu_unlock(&neighborhood->mu);
504 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000505 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700506 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000507 }
508 pollset->prev->next = pollset->next;
509 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700510 if (pollset == pollset->neighborhood->active_root) {
511 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800512 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000513 }
Craig Tillerba550da2017-05-01 14:26:31 +0000514 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700515 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700516 }
Craig Tillere00d7332017-05-01 15:43:51 +0000517 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700518 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000519}
520
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800521static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
yang-gdf92a642017-08-21 22:38:45 -0700522 GPR_TIMER_BEGIN("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700523 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800524 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700525 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000526 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800527 GRPC_STATS_INC_POLLSET_KICK();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700528 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700529 case KICKED:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800530 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Craig Tiller55624a32017-05-26 08:14:44 -0700531 break;
532 case UNKICKED:
533 SET_KICK_STATE(worker, KICKED);
534 if (worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800535 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -0700536 gpr_cv_signal(&worker->cv);
537 }
538 break;
539 case DESIGNATED_POLLER:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800540 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
Craig Tiller55624a32017-05-26 08:14:44 -0700541 SET_KICK_STATE(worker, KICKED);
542 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700543 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700544 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000545 }
546
Craig Tiller32f90ee2017-04-28 12:46:41 -0700547 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000548 } while (worker != pollset->root_worker);
549 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700550 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
551 // in the else case
yang-gdf92a642017-08-21 22:38:45 -0700552 GPR_TIMER_END("pollset_kick_all", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000553 return error;
554}
555
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800556static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800557 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000558 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700559 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800560 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800561 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000562 }
563}
564
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800565static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
yang-gdf92a642017-08-21 22:38:45 -0700566 GPR_TIMER_BEGIN("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800567 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700568 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000569 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700570 pollset->shutting_down = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800571 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
572 pollset_maybe_finish_shutdown(pollset);
yang-gdf92a642017-08-21 22:38:45 -0700573 GPR_TIMER_END("pollset_shutdown", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000574}
575
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800576static int poll_deadline_to_millis_timeout(grpc_millis millis) {
Craig Tiller20397792017-07-18 11:35:27 -0700577 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800578 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700579 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700580 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700581 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000582 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700583 } else {
Craig Tiller20397792017-07-18 11:35:27 -0700584 return (int)delta;
Craig Tiller4509c472017-04-27 19:05:13 +0000585 }
Craig Tiller4509c472017-04-27 19:05:13 +0000586}
587
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700588/* Process the epoll events found by do_epoll_wait() function.
589 - g_epoll_set.cursor points to the index of the first event to be processed
590 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
591 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000592
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700593 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
594 called by g_active_poller thread. So there is no need for synchronization
595 when accessing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800596static grpc_error* process_epoll_events(grpc_pollset* pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700597 static const char* err_desc = "process_events";
598 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700599
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700600 GPR_TIMER_BEGIN("process_epoll_events", 0);
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700601 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
602 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
603 for (int idx = 0;
604 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700605 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700606 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700607 struct epoll_event* ev = &g_epoll_set.events[c];
608 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700609
Craig Tiller4509c472017-04-27 19:05:13 +0000610 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000611 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
612 err_desc);
613 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700614 grpc_fd* fd = (grpc_fd*)(data_ptr);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700615 bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
616 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
617 bool write_ev = (ev->events & EPOLLOUT) != 0;
618
Craig Tiller4509c472017-04-27 19:05:13 +0000619 if (read_ev || cancel) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800620 fd_become_readable(fd, pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000621 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700622
Craig Tiller4509c472017-04-27 19:05:13 +0000623 if (write_ev || cancel) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800624 fd_become_writable(fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000625 }
626 }
627 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700628 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700629 GPR_TIMER_END("process_epoll_events", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000630 return error;
631}
632
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700633/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
634 "process" any of the events yet; that is done in process_epoll_events().
635 *See process_epoll_events() function for more details.
636
637 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
638 (i.e the designated poller thread) will be calling this function. So there is
639 no need for any synchronization when accesing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800640static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700641 GPR_TIMER_BEGIN("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000642
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700643 int r;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800644 int timeout = poll_deadline_to_millis_timeout(deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000645 if (timeout != 0) {
646 GRPC_SCHEDULING_START_BLOCKING_REGION;
647 }
Craig Tiller4509c472017-04-27 19:05:13 +0000648 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800649 GRPC_STATS_INC_SYSCALL_POLL();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700650 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
651 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000652 } while (r < 0 && errno == EINTR);
653 if (timeout != 0) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800654 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller4509c472017-04-27 19:05:13 +0000655 }
656
657 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
658
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800659 GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700660
ncteisen3cffe1f2017-11-10 13:56:23 -0800661 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700662 gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000663 }
664
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700665 gpr_atm_rel_store(&g_epoll_set.num_events, r);
666 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700667
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700668 GPR_TIMER_END("do_epoll_wait", 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700669 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000670}
671
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800672static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700673 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700674 grpc_millis deadline) {
yang-gdf92a642017-08-21 22:38:45 -0700675 GPR_TIMER_BEGIN("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800676 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000677 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700678 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700679 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000680 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000681
ncteisen3cffe1f2017-11-10 13:56:23 -0800682 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700683 gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
684 }
685
Craig Tiller32f90ee2017-04-28 12:46:41 -0700686 if (pollset->seen_inactive) {
687 // pollset has been observed to be inactive, we need to move back to the
688 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000689 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700690 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000691 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700692 pollset->reassigning_neighborhood = true;
693 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000694 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700695 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700696 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000697 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700698 retry_lock_neighborhood:
699 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700700 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800701 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700702 gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700703 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700704 is_reassigning);
705 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700706 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700707 if (neighborhood != pollset->neighborhood) {
708 gpr_mu_unlock(&neighborhood->mu);
709 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000710 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700711 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000712 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700713
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700714 /* In the brief time we released the pollset locks above, the worker MAY
715 have been kicked. In this case, the worker should get out of this
716 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700717 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700718
719 On a side note, the only way a worker's kick state could have changed
720 at this point is if it were "kicked specifically". Since the worker has
721 not added itself to the pollset yet (by calling worker_insert()), it is
722 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700723 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700724 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800725 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700726 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700727 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700728 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700729 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
730 SET_KICK_STATE(worker, DESIGNATED_POLLER);
731 }
732 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700733 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700734 pollset->prev = pollset->next->prev;
735 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700736 }
Craig Tiller4509c472017-04-27 19:05:13 +0000737 }
738 }
Craig Tillere00d7332017-05-01 15:43:51 +0000739 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700740 GPR_ASSERT(pollset->reassigning_neighborhood);
741 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000742 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700743 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700744 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700745
Craig Tiller32f90ee2017-04-28 12:46:41 -0700746 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000747 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700748 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000749 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700750 worker->initialized_cv = true;
751 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700752 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800753 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700754 gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700755 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700756 pollset->shutting_down);
757 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700758
Craig Tiller20397792017-07-18 11:35:27 -0700759 if (gpr_cv_wait(&worker->cv, &pollset->mu,
Sree Kuchibhotla54961bb2017-12-04 12:50:27 -0800760 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700761 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700762 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
763 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700764 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700765 }
Craig Tillerba550da2017-05-01 14:26:31 +0000766 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800767 grpc_core::ExecCtx::Get()->InvalidateNow();
Craig Tiller4509c472017-04-27 19:05:13 +0000768 }
769
ncteisen3cffe1f2017-11-10 13:56:23 -0800770 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700771 gpr_log(GPR_ERROR,
772 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
773 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700774 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700775 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700776 }
Craig Tiller4509c472017-04-27 19:05:13 +0000777
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700778 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700779 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700780 * 2. When doing gpr_cv_wait()
781 * It is possible that 'kicked_without_poller' was set to true during (1) and
782 * 'shutting_down' is set to true during (1) or (2). If either of them is
783 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700784 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
785 * case; especially when the worker is the DESIGNATED_POLLER */
786
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700787 if (pollset->kicked_without_poller) {
788 pollset->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700789 GPR_TIMER_END("begin_worker", 0);
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700790 return false;
791 }
792
yang-gdf92a642017-08-21 22:38:45 -0700793 GPR_TIMER_END("begin_worker", 0);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700794 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000795}
796
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700797static bool check_neighborhood_for_available_poller(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800798 pollset_neighborhood* neighborhood) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700799 GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700800 bool found_worker = false;
801 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700802 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800803 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700804 break;
805 }
806 gpr_mu_lock(&inspect->mu);
807 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700808 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800809 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000810 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700811 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000812 case UNKICKED:
813 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
814 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800815 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700816 gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
817 inspect_worker);
818 }
Craig Tiller55624a32017-05-26 08:14:44 -0700819 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000820 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700821 GPR_TIMER_MARK("signal worker", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800822 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tillerba550da2017-05-01 14:26:31 +0000823 gpr_cv_signal(&inspect_worker->cv);
824 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700825 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800826 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700827 gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
828 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000829 }
Craig Tillerba550da2017-05-01 14:26:31 +0000830 // even if we didn't win the cas, there's a worker, we can stop
831 found_worker = true;
832 break;
833 case KICKED:
834 break;
835 case DESIGNATED_POLLER:
836 found_worker = true; // ok, so someone else found the worker, but
837 // we'll accept that
838 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700839 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000840 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700841 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000842 }
843 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800844 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700845 gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
846 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700847 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700848 if (inspect == neighborhood->active_root) {
849 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800850 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000851 }
852 inspect->next->prev = inspect->prev;
853 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800854 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700855 }
856 gpr_mu_unlock(&inspect->mu);
857 } while (!found_worker);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700858 GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700859 return found_worker;
860}
861
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800862static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700863 grpc_pollset_worker** worker_hdl) {
yang-gdf92a642017-08-21 22:38:45 -0700864 GPR_TIMER_BEGIN("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800865 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700866 gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
867 }
Craig Tiller4782d922017-11-10 09:53:21 -0800868 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700869 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700870 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700871 grpc_closure_list_move(&worker->schedule_on_end_work,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800872 grpc_core::ExecCtx::Get()->closure_list());
Craig Tiller8502ecb2017-04-28 14:22:01 -0700873 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700874 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800875 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700876 gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
877 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000878 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700879 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700880 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800881 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700882 gpr_cv_signal(&worker->next->cv);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800883 if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700884 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800885 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller8502ecb2017-04-28 14:22:01 -0700886 gpr_mu_lock(&pollset->mu);
887 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700888 } else {
889 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700890 size_t poller_neighborhood_idx =
891 (size_t)(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000892 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700893 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700894 bool scan_state[MAX_NEIGHBORHOODS];
895 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700896 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700897 &g_neighborhoods[(poller_neighborhood_idx + i) %
898 g_num_neighborhoods];
899 if (gpr_mu_trylock(&neighborhood->mu)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800900 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700901 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000902 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700903 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000904 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700905 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700906 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700907 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000908 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700909 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700910 &g_neighborhoods[(poller_neighborhood_idx + i) %
911 g_num_neighborhoods];
912 gpr_mu_lock(&neighborhood->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800913 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700914 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700915 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800916 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700917 gpr_mu_lock(&pollset->mu);
918 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800919 } else if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller50da5ec2017-05-01 13:51:14 -0700920 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800921 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller50da5ec2017-05-01 13:51:14 -0700922 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000923 }
924 if (worker->initialized_cv) {
925 gpr_cv_destroy(&worker->cv);
926 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800927 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700928 gpr_log(GPR_DEBUG, " .. remove worker");
929 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700930 if (EMPTIED == worker_remove(pollset, worker)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800931 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000932 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000933 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
yang-gdf92a642017-08-21 22:38:45 -0700934 GPR_TIMER_END("end_worker", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000935}
936
937/* pollset->po.mu lock must be held by the caller before calling this.
938 The function pollset_work() may temporarily release the lock (pollset->po.mu)
939 during the course of its execution but it will always re-acquire the lock and
940 ensure that it is held by the time the function returns */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800941static grpc_error* pollset_work(grpc_pollset* ps,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700942 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700943 grpc_millis deadline) {
Craig Tiller4509c472017-04-27 19:05:13 +0000944 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700945 grpc_error* error = GRPC_ERROR_NONE;
946 static const char* err_desc = "pollset_work";
yang-gdf92a642017-08-21 22:38:45 -0700947 GPR_TIMER_BEGIN("pollset_work", 0);
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700948 if (ps->kicked_without_poller) {
949 ps->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700950 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000951 return GRPC_ERROR_NONE;
952 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700953
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800954 if (begin_worker(ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700955 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000956 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700957 GPR_ASSERT(!ps->shutting_down);
958 GPR_ASSERT(!ps->seen_inactive);
959
960 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700961 /* This is the designated polling thread at this point and should ideally do
962 polling. However, if there are unprocessed events left from a previous
963 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
964 process the pending epoll events.
965
966 The reason for decoupling do_epoll_wait and process_epoll_events is to
967 better distrubute the work (i.e handling epoll events) across multiple
968 threads
969
970 process_epoll_events() returns very quickly: It just queues the work on
971 exec_ctx but does not execute it (the actual exectution or more
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800972 accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
973 AFTER selecting a designated poller). So we are not waiting long periods
974 without a designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700975 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
976 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800977 append_error(&error, do_epoll_wait(ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700978 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800979 append_error(&error, process_epoll_events(ps), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700980
981 gpr_mu_lock(&ps->mu); /* lock */
982
Craig Tiller4509c472017-04-27 19:05:13 +0000983 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700984 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700985 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000986 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800987 end_worker(ps, &worker, worker_hdl);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700988
Craig Tiller8502ecb2017-04-28 14:22:01 -0700989 gpr_tls_set(&g_current_thread_pollset, 0);
yang-gdf92a642017-08-21 22:38:45 -0700990 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000991 return error;
992}
993
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800994static grpc_error* pollset_kick(grpc_pollset* pollset,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700995 grpc_pollset_worker* specific_worker) {
yang-gdf92a642017-08-21 22:38:45 -0700996 GPR_TIMER_BEGIN("pollset_kick", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800997 GRPC_STATS_INC_POLLSET_KICK();
Craig Tillerbaa14a92017-11-03 09:09:36 -0700998 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -0800999 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +00001000 gpr_strvec log;
1001 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001002 char* tmp;
1003 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
1004 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
1005 (void*)gpr_tls_get(&g_current_thread_worker),
1006 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +00001007 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -08001008 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001009 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001010 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001011 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001012 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001013 gpr_strvec_add(&log, tmp);
1014 }
Craig Tiller4782d922017-11-10 09:53:21 -08001015 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001016 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001017 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001018 gpr_strvec_add(&log, tmp);
1019 }
Craig Tiller4782d922017-11-10 09:53:21 -08001020 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001021 gpr_strvec_destroy(&log);
Craig Tiller830e82a2017-05-31 16:26:27 -07001022 gpr_log(GPR_ERROR, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001023 gpr_free(tmp);
1024 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001025
Craig Tiller4782d922017-11-10 09:53:21 -08001026 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001027 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001028 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001029 if (root_worker == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001030 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
Craig Tiller4509c472017-04-27 19:05:13 +00001031 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001032 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001033 gpr_log(GPR_ERROR, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001034 }
yang-gdf92a642017-08-21 22:38:45 -07001035 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001036 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001037 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001038 if (root_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001039 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001040 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001041 gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
1042 }
1043 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001044 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001045 } else if (next_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001046 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001047 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001048 gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
1049 }
1050 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001051 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001052 } else if (root_worker ==
1053 next_worker && // only try and wake up a poller if
1054 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001055 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001056 &g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001057 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001058 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001059 gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001060 }
Craig Tiller55624a32017-05-26 08:14:44 -07001061 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001062 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1063 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001064 } else if (next_worker->state == UNKICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001065 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001066 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001067 gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001068 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001069 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001070 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001071 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001072 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001073 } else if (next_worker->state == DESIGNATED_POLLER) {
1074 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001075 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001076 gpr_log(
1077 GPR_ERROR,
1078 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1079 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001080 }
Craig Tiller55624a32017-05-26 08:14:44 -07001081 SET_KICK_STATE(root_worker, KICKED);
1082 if (root_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001083 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -07001084 gpr_cv_signal(&root_worker->cv);
1085 }
yang-gdf92a642017-08-21 22:38:45 -07001086 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001087 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001088 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001089 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001090 gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001091 root_worker);
1092 }
Craig Tiller55624a32017-05-26 08:14:44 -07001093 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001094 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1095 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001096 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001097 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001098 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001099 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001100 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001101 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001102 }
1103 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001104 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001105 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001106 gpr_log(GPR_ERROR, " .. kicked while waking up");
1107 }
yang-gdf92a642017-08-21 22:38:45 -07001108 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001109 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001110
1111 GPR_UNREACHABLE_CODE(goto done);
1112 }
1113
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001114 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001115 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001116 gpr_log(GPR_ERROR, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001117 }
yang-gdf92a642017-08-21 22:38:45 -07001118 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001119 } else if (gpr_tls_get(&g_current_thread_worker) ==
1120 (intptr_t)specific_worker) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001121 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001122 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001123 gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001124 }
Craig Tiller55624a32017-05-26 08:14:44 -07001125 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001126 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001127 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001128 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001129 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001130 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001131 gpr_log(GPR_ERROR, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001132 }
Craig Tiller55624a32017-05-26 08:14:44 -07001133 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001134 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1135 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001136 } else if (specific_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001137 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001138 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001139 gpr_log(GPR_ERROR, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001140 }
Craig Tiller55624a32017-05-26 08:14:44 -07001141 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001142 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001143 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001144 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001145 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001146 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001147 gpr_log(GPR_ERROR, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001148 }
Craig Tiller55624a32017-05-26 08:14:44 -07001149 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001150 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001151 }
yang-gdf92a642017-08-21 22:38:45 -07001152done:
1153 GPR_TIMER_END("pollset_kick", 0);
1154 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001155}
1156
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001157static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001158
Craig Tiller4509c472017-04-27 19:05:13 +00001159/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001160 * Pollset-set Definitions
1161 */
1162
Craig Tillerbaa14a92017-11-03 09:09:36 -07001163static grpc_pollset_set* pollset_set_create(void) {
1164 return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
Craig Tillerc67cc992017-04-27 10:15:51 -07001165}
1166
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001167static void pollset_set_destroy(grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001168
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001169static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001170
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001171static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001172
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001173static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001174
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001175static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001176
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001177static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001178 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001179
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001180static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001181 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001182
1183/*******************************************************************************
1184 * Event engine binding
1185 */
1186
1187static void shutdown_engine(void) {
1188 fd_global_shutdown();
1189 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001190 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001191}
1192
1193static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001194 sizeof(grpc_pollset),
Craig Tillerc67cc992017-04-27 10:15:51 -07001195
Yash Tibrewal533d1182017-09-18 10:48:22 -07001196 fd_create,
1197 fd_wrapped_fd,
1198 fd_orphan,
1199 fd_shutdown,
1200 fd_notify_on_read,
1201 fd_notify_on_write,
1202 fd_is_shutdown,
1203 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001204
Yash Tibrewal533d1182017-09-18 10:48:22 -07001205 pollset_init,
1206 pollset_shutdown,
1207 pollset_destroy,
1208 pollset_work,
1209 pollset_kick,
1210 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001211
Yash Tibrewal533d1182017-09-18 10:48:22 -07001212 pollset_set_create,
1213 pollset_set_destroy,
1214 pollset_set_add_pollset,
1215 pollset_set_del_pollset,
1216 pollset_set_add_pollset_set,
1217 pollset_set_del_pollset_set,
1218 pollset_set_add_fd,
1219 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001220
Yash Tibrewal533d1182017-09-18 10:48:22 -07001221 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001222};
1223
1224/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001225 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1226 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001227const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001228 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001229 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001230 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001231 }
1232
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001233 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001234 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001235 }
1236
Craig Tillerc67cc992017-04-27 10:15:51 -07001237 fd_global_init();
1238
1239 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001240 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001241 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001242 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001243 }
1244
1245 return &vtable;
1246}
1247
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001248#else /* defined(GRPC_LINUX_EPOLL) */
Craig Tillerc67cc992017-04-27 10:15:51 -07001249#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001250#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001251/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1252 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001253const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001254 return nullptr;
Craig Tiller9ddb3152017-04-27 21:32:56 +00001255}
Craig Tillerc67cc992017-04-27 10:15:51 -07001256#endif /* defined(GRPC_POSIX_SOCKET) */
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001257#endif /* !defined(GRPC_LINUX_EPOLL) */