blob: 86a0243d2efbffebdbbb8bf19e0a4295ce1999bd [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
Alexander Polcyndb3e8982018-02-21 16:59:24 -080019#include <grpc/support/port_platform.h>
20
Craig Tillerc67cc992017-04-27 10:15:51 -070021#include "src/core/lib/iomgr/port.h"
22
yang-gceb24752017-11-07 12:06:37 -080023#include <grpc/support/log.h>
24
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080025/* This polling engine is only relevant on linux kernels supporting epoll
26 epoll_create() or epoll_create1() */
Mehrdad Afsharifb669002018-01-17 15:37:56 -080027#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000028#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070029
30#include <assert.h>
31#include <errno.h>
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080032#include <fcntl.h>
Craig Tiller20397792017-07-18 11:35:27 -070033#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070034#include <poll.h>
35#include <pthread.h>
36#include <string.h>
37#include <sys/epoll.h>
38#include <sys/socket.h>
39#include <unistd.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070042#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070043#include <grpc/support/string_util.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070044
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070045#include "src/core/lib/debug/stats.h"
Mark D. Rothdbdf4952018-01-18 11:21:12 -080046#include "src/core/lib/gpr/string.h"
Vijay Paib6cf1232018-01-25 21:02:26 -080047#include "src/core/lib/gpr/tls.h"
Vijay Paid4d0a302018-01-25 13:24:03 -080048#include "src/core/lib/gpr/useful.h"
Mark D. Roth4f2b0fd2018-01-19 12:12:23 -080049#include "src/core/lib/gprpp/manual_constructor.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070050#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070051#include "src/core/lib/iomgr/ev_posix.h"
52#include "src/core/lib/iomgr/iomgr_internal.h"
53#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070054#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070055#include "src/core/lib/profiling/timers.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070056
Craig Tillerc67cc992017-04-27 10:15:51 -070057static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070058
59/*******************************************************************************
60 * Singleton epoll set related fields
61 */
62
63#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070064#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070065
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070066/* NOTE ON SYNCHRONIZATION:
67 * - Fields in this struct are only modified by the designated poller. Hence
68 * there is no need for any locks to protect the struct.
69 * - num_events and cursor fields have to be of atomic type to provide memory
70 * visibility guarantees only. i.e In case of multiple pollers, the designated
71 * polling thread keeps changing; the thread that wrote these values may be
72 * different from the thread reading the values
73 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070074typedef struct epoll_set {
75 int epfd;
76
77 /* The epoll_events after the last call to epoll_wait() */
78 struct epoll_event events[MAX_EPOLL_EVENTS];
79
80 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070081 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070082
83 /* Index of the first event in epoll_events that has to be processed. This
84 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070085 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070086} epoll_set;
87
88/* The global singleton epoll set */
89static epoll_set g_epoll_set;
90
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080091static int epoll_create_and_cloexec() {
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080092#ifdef GRPC_LINUX_EPOLL_CREATE1
93 int fd = epoll_create1(EPOLL_CLOEXEC);
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080094 if (fd < 0) {
95 gpr_log(GPR_ERROR, "epoll_create1 unavailable");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080096 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080097#else
98 int fd = epoll_create(MAX_EPOLL_EVENTS);
99 if (fd < 0) {
100 gpr_log(GPR_ERROR, "epoll_create unavailable");
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800101 } else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
102 gpr_log(GPR_ERROR, "fcntl following epoll_create failed");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800103 return -1;
104 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800105#endif
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800106 return fd;
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800107}
108
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700109/* Must be called *only* once */
110static bool epoll_set_init() {
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800111 g_epoll_set.epfd = epoll_create_and_cloexec();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700112 if (g_epoll_set.epfd < 0) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700113 return false;
114 }
115
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700116 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
117 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
118 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700119 return true;
120}
121
122/* epoll_set_init() MUST be called before calling this. */
123static void epoll_set_shutdown() {
124 if (g_epoll_set.epfd >= 0) {
125 close(g_epoll_set.epfd);
126 g_epoll_set.epfd = -1;
127 }
128}
Craig Tillerc67cc992017-04-27 10:15:51 -0700129
130/*******************************************************************************
131 * Fd Declarations
132 */
133
134struct grpc_fd {
135 int fd;
136
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800137 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
138 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700139 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700140
Craig Tillerbaa14a92017-11-03 09:09:36 -0700141 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700142
143 /* The pollset that last noticed that the fd is readable. The actual type
144 * stored in this is (grpc_pollset *) */
145 gpr_atm read_notifier_pollset;
146
147 grpc_iomgr_object iomgr_object;
148};
149
150static void fd_global_init(void);
151static void fd_global_shutdown(void);
152
153/*******************************************************************************
154 * Pollset Declarations
155 */
156
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700157typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700158
Craig Tillerbaa14a92017-11-03 09:09:36 -0700159static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700160 switch (st) {
161 case UNKICKED:
162 return "UNKICKED";
163 case KICKED:
164 return "KICKED";
165 case DESIGNATED_POLLER:
166 return "DESIGNATED_POLLER";
167 }
168 GPR_UNREACHABLE_CODE(return "UNKNOWN");
169}
170
Craig Tillerc67cc992017-04-27 10:15:51 -0700171struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700172 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700173 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700174 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700175 grpc_pollset_worker* next;
176 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700177 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700178 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700179};
180
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700181#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700182 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700183 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700184 (worker)->kick_state_mutator = __LINE__; \
185 } while (false)
186
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700187#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000188
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700189typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700190 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700191 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700192 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700193} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700194
Craig Tillerc67cc992017-04-27 10:15:51 -0700195struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700196 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700197 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700198 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700199 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000200 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700201
202 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700203 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700204 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700205 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700206 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700207
208 /* Number of workers who are *about-to* attach themselves to the pollset
209 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000210 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700211
Craig Tillerbaa14a92017-11-03 09:09:36 -0700212 grpc_pollset* next;
213 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700214};
215
216/*******************************************************************************
217 * Pollset-set Declarations
218 */
Craig Tiller6de05932017-04-28 09:17:38 -0700219
Craig Tiller61f96c12017-05-12 13:36:39 -0700220struct grpc_pollset_set {
221 char unused;
222};
Craig Tillerc67cc992017-04-27 10:15:51 -0700223
224/*******************************************************************************
225 * Common helpers
226 */
227
Craig Tillerbaa14a92017-11-03 09:09:36 -0700228static bool append_error(grpc_error** composite, grpc_error* error,
229 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700230 if (error == GRPC_ERROR_NONE) return true;
231 if (*composite == GRPC_ERROR_NONE) {
232 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
233 }
234 *composite = grpc_error_add_child(*composite, error);
235 return false;
236}
237
238/*******************************************************************************
239 * Fd Definitions
240 */
241
242/* We need to keep a freelist not because of any concerns of malloc performance
243 * but instead so that implementations with multiple threads in (for example)
244 * epoll_wait deal with the race between pollset removal and incoming poll
245 * notifications.
246 *
247 * The problem is that the poller ultimately holds a reference to this
248 * object, so it is very difficult to know when is safe to free it, at least
249 * without some expensive synchronization.
250 *
251 * If we keep the object freelisted, in the worst case losing this race just
252 * becomes a spurious read notification on a reused fd.
253 */
254
255/* The alarm system needs to be able to wakeup 'some poller' sometimes
256 * (specifically when a new alarm needs to be triggered earlier than the next
257 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
258 * case occurs. */
259
Craig Tiller4782d922017-11-10 09:53:21 -0800260static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700261static gpr_mu fd_freelist_mu;
262
Craig Tillerc67cc992017-04-27 10:15:51 -0700263static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
264
265static void fd_global_shutdown(void) {
266 gpr_mu_lock(&fd_freelist_mu);
267 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800268 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700269 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700270 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700271 gpr_free(fd);
272 }
273 gpr_mu_destroy(&fd_freelist_mu);
274}
275
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700276static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
Craig Tiller4782d922017-11-10 09:53:21 -0800277 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700278
279 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800280 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700281 new_fd = fd_freelist;
282 fd_freelist = fd_freelist->freelist_next;
283 }
284 gpr_mu_unlock(&fd_freelist_mu);
285
Craig Tiller4782d922017-11-10 09:53:21 -0800286 if (new_fd == nullptr) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800287 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
yang-g26521b32017-11-17 17:15:37 -0800288 new_fd->read_closure.Init();
289 new_fd->write_closure.Init();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700290 new_fd->error_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700291 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700292 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800293 new_fd->read_closure->InitEvent();
294 new_fd->write_closure->InitEvent();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700295 new_fd->error_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700296 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
297
Craig Tiller4782d922017-11-10 09:53:21 -0800298 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700299
Craig Tillerbaa14a92017-11-03 09:09:36 -0700300 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700301 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
302 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700303#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800304 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700305 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
306 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700307#endif
308 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000309
Yash Tibrewal533d1182017-09-18 10:48:22 -0700310 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800311 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
Yash Tibrewal21e36032018-06-05 10:55:13 -0700312 /* Use the least significant bit of ev.data.ptr to store track_err. We expect
313 * the addresses to be word aligned. We need to store track_err to avoid
Yash Tibrewal935ae7d2018-06-08 14:10:14 -0700314 * synchronization issues when accessing it after receiving an event.
315 * Accessing fd would be a data race there because the fd might have been
316 * returned to the free list at that point. */
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700317 ev.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(new_fd) |
318 (track_err ? 1 : 0));
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700319 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000320 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
321 }
322
Craig Tillerc67cc992017-04-27 10:15:51 -0700323 return new_fd;
324}
325
Craig Tillerbaa14a92017-11-03 09:09:36 -0700326static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700327
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700328/* if 'releasing_fd' is true, it means that we are going to detach the internal
329 * fd from grpc_fd structure (i.e which means we should not be calling
330 * shutdown() syscall on that fd) */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800331static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
332 bool releasing_fd) {
333 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700334 if (!releasing_fd) {
335 shutdown(fd->fd, SHUT_RDWR);
336 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800337 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700338 fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000339 }
340 GRPC_ERROR_UNREF(why);
341}
342
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700343/* Might be called multiple times */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800344static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
345 fd_shutdown_internal(fd, why, false);
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700346}
347
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800348static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
Alexander Polcyn4e8a2f52018-05-31 00:28:45 -0700349 const char* reason) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700350 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800351 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700352
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800353 if (!fd->read_closure->IsShutdown()) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800354 fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700355 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000356 }
357
Craig Tillerc67cc992017-04-27 10:15:51 -0700358 /* If release_fd is not NULL, we should be relinquishing control of the file
359 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700360 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700361 *release_fd = fd->fd;
Alexander Polcyn4e8a2f52018-05-31 00:28:45 -0700362 } else {
Craig Tillerc67cc992017-04-27 10:15:51 -0700363 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700364 }
365
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800366 GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700367
Craig Tiller4509c472017-04-27 19:05:13 +0000368 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800369 fd->read_closure->DestroyEvent();
370 fd->write_closure->DestroyEvent();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700371 fd->error_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700372
Craig Tiller4509c472017-04-27 19:05:13 +0000373 gpr_mu_lock(&fd_freelist_mu);
374 fd->freelist_next = fd_freelist;
375 fd_freelist = fd;
376 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700377}
378
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800379static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700380 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700381 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700382}
383
Craig Tillerbaa14a92017-11-03 09:09:36 -0700384static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800385 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700386}
387
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800388static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
Yash Tibrewal0ec69732018-08-13 10:48:41 -0700389 fd->read_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700390}
391
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800392static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
Yash Tibrewal0ec69732018-08-13 10:48:41 -0700393 fd->write_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700394}
395
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700396static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
Yash Tibrewal0ec69732018-08-13 10:48:41 -0700397 fd->error_closure->NotifyOn(closure);
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700398}
399
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800400static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
401 fd->read_closure->SetReady();
Craig Tiller4509c472017-04-27 19:05:13 +0000402 /* Use release store to match with acquire load in fd_get_read_notifier */
403 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
404}
405
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800406static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
Craig Tillerc67cc992017-04-27 10:15:51 -0700407
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700408static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
409
Craig Tillerc67cc992017-04-27 10:15:51 -0700410/*******************************************************************************
411 * Pollset Definitions
412 */
413
Craig Tiller6de05932017-04-28 09:17:38 -0700414GPR_TLS_DECL(g_current_thread_pollset);
415GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700416
417/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700418static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700419
Craig Tillerbaa14a92017-11-03 09:09:36 -0700420static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700421static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700422
Craig Tillerc67cc992017-04-27 10:15:51 -0700423/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700424static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800425 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700426 pollset->root_worker = worker;
427 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700428 return true;
429 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700430 worker->next = pollset->root_worker;
431 worker->prev = worker->next->prev;
432 worker->next->prev = worker;
433 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700434 return false;
435 }
436}
437
438/* Return true if last in list */
439typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
440
Craig Tillerbaa14a92017-11-03 09:09:36 -0700441static worker_remove_result worker_remove(grpc_pollset* pollset,
442 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700443 if (worker == pollset->root_worker) {
444 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800445 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700446 return EMPTIED;
447 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700448 pollset->root_worker = worker->next;
449 worker->prev->next = worker->next;
450 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700451 return NEW_ROOT;
452 }
453 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700454 worker->prev->next = worker->next;
455 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700456 return REMOVED;
457 }
458}
459
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700460static size_t choose_neighborhood(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800461 return static_cast<size_t>(gpr_cpu_current_cpu()) % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000462}
463
Craig Tillerbaa14a92017-11-03 09:09:36 -0700464static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000465 gpr_tls_init(&g_current_thread_pollset);
466 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700467 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000468 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700469 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000470 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700471 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800472 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
Yash Tibrewal533d1182017-09-18 10:48:22 -0700473 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700474 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
475 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000476 return GRPC_OS_ERROR(errno, "epoll_ctl");
477 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700478 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Noah Eisen4d20a662018-02-09 09:34:04 -0800479 g_neighborhoods = static_cast<pollset_neighborhood*>(
480 gpr_zalloc(sizeof(*g_neighborhoods) * g_num_neighborhoods));
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700481 for (size_t i = 0; i < g_num_neighborhoods; i++) {
482 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700483 }
Craig Tiller4509c472017-04-27 19:05:13 +0000484 return GRPC_ERROR_NONE;
485}
486
487static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000488 gpr_tls_destroy(&g_current_thread_pollset);
489 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000490 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700491 for (size_t i = 0; i < g_num_neighborhoods; i++) {
492 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700493 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700494 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000495}
496
Craig Tillerbaa14a92017-11-03 09:09:36 -0700497static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700498 gpr_mu_init(&pollset->mu);
499 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700500 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
501 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800502 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700503 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700504 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700505 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800506 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700507 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800508 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700509}
510
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800511static void pollset_destroy(grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000512 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000513 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700514 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000515 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700516 retry_lock_neighborhood:
517 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000518 gpr_mu_lock(&pollset->mu);
519 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700520 if (pollset->neighborhood != neighborhood) {
521 gpr_mu_unlock(&neighborhood->mu);
522 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000523 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700524 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000525 }
526 pollset->prev->next = pollset->next;
527 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700528 if (pollset == pollset->neighborhood->active_root) {
529 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800530 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000531 }
Craig Tillerba550da2017-05-01 14:26:31 +0000532 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700533 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700534 }
Craig Tillere00d7332017-05-01 15:43:51 +0000535 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700536 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000537}
538
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800539static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800540 GPR_TIMER_SCOPE("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700541 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800542 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700543 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000544 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800545 GRPC_STATS_INC_POLLSET_KICK();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700546 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700547 case KICKED:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800548 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Craig Tiller55624a32017-05-26 08:14:44 -0700549 break;
550 case UNKICKED:
551 SET_KICK_STATE(worker, KICKED);
552 if (worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800553 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -0700554 gpr_cv_signal(&worker->cv);
555 }
556 break;
557 case DESIGNATED_POLLER:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800558 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
Craig Tiller55624a32017-05-26 08:14:44 -0700559 SET_KICK_STATE(worker, KICKED);
560 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700561 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700562 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000563 }
564
Craig Tiller32f90ee2017-04-28 12:46:41 -0700565 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000566 } while (worker != pollset->root_worker);
567 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700568 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
569 // in the else case
Craig Tiller4509c472017-04-27 19:05:13 +0000570 return error;
571}
572
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800573static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800574 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000575 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700576 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800577 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800578 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000579 }
580}
581
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800582static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
yang-gce1cfea2018-01-31 15:59:50 -0800583 GPR_TIMER_SCOPE("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800584 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700585 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000586 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700587 pollset->shutting_down = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800588 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
589 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000590}
591
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800592static int poll_deadline_to_millis_timeout(grpc_millis millis) {
Craig Tiller20397792017-07-18 11:35:27 -0700593 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800594 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700595 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700596 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700597 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000598 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700599 } else {
Noah Eisenbe82e642018-02-09 09:16:55 -0800600 return static_cast<int>(delta);
Craig Tiller4509c472017-04-27 19:05:13 +0000601 }
Craig Tiller4509c472017-04-27 19:05:13 +0000602}
603
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700604/* Process the epoll events found by do_epoll_wait() function.
605 - g_epoll_set.cursor points to the index of the first event to be processed
606 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
607 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000608
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700609 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
610 called by g_active_poller thread. So there is no need for synchronization
611 when accessing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800612static grpc_error* process_epoll_events(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800613 GPR_TIMER_SCOPE("process_epoll_events", 0);
614
Craig Tillerbaa14a92017-11-03 09:09:36 -0700615 static const char* err_desc = "process_events";
616 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700617 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
618 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
619 for (int idx = 0;
620 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700621 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700622 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700623 struct epoll_event* ev = &g_epoll_set.events[c];
624 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700625
Craig Tiller4509c472017-04-27 19:05:13 +0000626 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000627 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
628 err_desc);
629 } else {
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700630 grpc_fd* fd = reinterpret_cast<grpc_fd*>(
631 reinterpret_cast<intptr_t>(data_ptr) & ~static_cast<intptr_t>(1));
632 bool track_err =
Yash Tibrewal21e36032018-06-05 10:55:13 -0700633 reinterpret_cast<intptr_t>(data_ptr) & static_cast<intptr_t>(1);
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700634 bool cancel = (ev->events & EPOLLHUP) != 0;
635 bool error = (ev->events & EPOLLERR) != 0;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700636 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
637 bool write_ev = (ev->events & EPOLLOUT) != 0;
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700638 bool err_fallback = error && !track_err;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700639
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700640 if (error && !err_fallback) {
641 fd_has_errors(fd);
642 }
643
644 if (read_ev || cancel || err_fallback) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800645 fd_become_readable(fd, pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000646 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700647
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700648 if (write_ev || cancel || err_fallback) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800649 fd_become_writable(fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000650 }
651 }
652 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700653 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Craig Tiller4509c472017-04-27 19:05:13 +0000654 return error;
655}
656
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700657/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
658 "process" any of the events yet; that is done in process_epoll_events().
659 *See process_epoll_events() function for more details.
660
661 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
662 (i.e the designated poller thread) will be calling this function. So there is
663 no need for any synchronization when accesing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800664static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800665 GPR_TIMER_SCOPE("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000666
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700667 int r;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800668 int timeout = poll_deadline_to_millis_timeout(deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000669 if (timeout != 0) {
670 GRPC_SCHEDULING_START_BLOCKING_REGION;
671 }
Craig Tiller4509c472017-04-27 19:05:13 +0000672 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800673 GRPC_STATS_INC_SYSCALL_POLL();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700674 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
675 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000676 } while (r < 0 && errno == EINTR);
677 if (timeout != 0) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800678 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller4509c472017-04-27 19:05:13 +0000679 }
680
681 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
682
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800683 GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700684
ncteisen3cffe1f2017-11-10 13:56:23 -0800685 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700686 gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000687 }
688
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700689 gpr_atm_rel_store(&g_epoll_set.num_events, r);
690 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700691
692 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000693}
694
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800695static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700696 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700697 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800698 GPR_TIMER_SCOPE("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800699 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000700 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700701 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700702 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000703 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000704
ncteisen3cffe1f2017-11-10 13:56:23 -0800705 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700706 gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700707 }
708
Craig Tiller32f90ee2017-04-28 12:46:41 -0700709 if (pollset->seen_inactive) {
710 // pollset has been observed to be inactive, we need to move back to the
711 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000712 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700713 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000714 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700715 pollset->reassigning_neighborhood = true;
716 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000717 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700718 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700719 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000720 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700721 retry_lock_neighborhood:
722 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700723 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800724 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700725 gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700726 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700727 is_reassigning);
728 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700729 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700730 if (neighborhood != pollset->neighborhood) {
731 gpr_mu_unlock(&neighborhood->mu);
732 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000733 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700734 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000735 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700736
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700737 /* In the brief time we released the pollset locks above, the worker MAY
738 have been kicked. In this case, the worker should get out of this
739 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700740 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700741
742 On a side note, the only way a worker's kick state could have changed
743 at this point is if it were "kicked specifically". Since the worker has
744 not added itself to the pollset yet (by calling worker_insert()), it is
745 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700746 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700747 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800748 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700749 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700750 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700751 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700752 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
753 SET_KICK_STATE(worker, DESIGNATED_POLLER);
754 }
755 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700756 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700757 pollset->prev = pollset->next->prev;
758 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700759 }
Craig Tiller4509c472017-04-27 19:05:13 +0000760 }
761 }
Craig Tillere00d7332017-05-01 15:43:51 +0000762 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700763 GPR_ASSERT(pollset->reassigning_neighborhood);
764 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000765 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700766 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700767 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700768
Craig Tiller32f90ee2017-04-28 12:46:41 -0700769 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000770 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700771 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000772 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700773 worker->initialized_cv = true;
774 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700775 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800776 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700777 gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700778 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700779 pollset->shutting_down);
780 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700781
Craig Tiller20397792017-07-18 11:35:27 -0700782 if (gpr_cv_wait(&worker->cv, &pollset->mu,
Sree Kuchibhotla54961bb2017-12-04 12:50:27 -0800783 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700784 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700785 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
786 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700787 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700788 }
Craig Tillerba550da2017-05-01 14:26:31 +0000789 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800790 grpc_core::ExecCtx::Get()->InvalidateNow();
Craig Tiller4509c472017-04-27 19:05:13 +0000791 }
792
ncteisen3cffe1f2017-11-10 13:56:23 -0800793 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700794 gpr_log(GPR_INFO,
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700795 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
796 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700797 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700798 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700799 }
Craig Tiller4509c472017-04-27 19:05:13 +0000800
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700801 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700802 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700803 * 2. When doing gpr_cv_wait()
804 * It is possible that 'kicked_without_poller' was set to true during (1) and
805 * 'shutting_down' is set to true during (1) or (2). If either of them is
806 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700807 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
808 * case; especially when the worker is the DESIGNATED_POLLER */
809
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700810 if (pollset->kicked_without_poller) {
811 pollset->kicked_without_poller = false;
812 return false;
813 }
814
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700815 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000816}
817
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700818static bool check_neighborhood_for_available_poller(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800819 pollset_neighborhood* neighborhood) {
yang-gce1cfea2018-01-31 15:59:50 -0800820 GPR_TIMER_SCOPE("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700821 bool found_worker = false;
822 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700823 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800824 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700825 break;
826 }
827 gpr_mu_lock(&inspect->mu);
828 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700829 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800830 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000831 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700832 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000833 case UNKICKED:
834 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
835 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800836 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700837 gpr_log(GPR_INFO, " .. choose next poller to be %p",
Craig Tiller830e82a2017-05-31 16:26:27 -0700838 inspect_worker);
839 }
Craig Tiller55624a32017-05-26 08:14:44 -0700840 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000841 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700842 GPR_TIMER_MARK("signal worker", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800843 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tillerba550da2017-05-01 14:26:31 +0000844 gpr_cv_signal(&inspect_worker->cv);
845 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700846 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800847 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700848 gpr_log(GPR_INFO, " .. beaten to choose next poller");
Craig Tiller830e82a2017-05-31 16:26:27 -0700849 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000850 }
Craig Tillerba550da2017-05-01 14:26:31 +0000851 // even if we didn't win the cas, there's a worker, we can stop
852 found_worker = true;
853 break;
854 case KICKED:
855 break;
856 case DESIGNATED_POLLER:
857 found_worker = true; // ok, so someone else found the worker, but
858 // we'll accept that
859 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700860 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000861 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700862 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000863 }
864 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800865 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700866 gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect);
Craig Tiller830e82a2017-05-31 16:26:27 -0700867 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700868 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700869 if (inspect == neighborhood->active_root) {
870 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800871 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000872 }
873 inspect->next->prev = inspect->prev;
874 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800875 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700876 }
877 gpr_mu_unlock(&inspect->mu);
878 } while (!found_worker);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700879 return found_worker;
880}
881
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800882static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700883 grpc_pollset_worker** worker_hdl) {
yang-gce1cfea2018-01-31 15:59:50 -0800884 GPR_TIMER_SCOPE("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800885 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700886 gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700887 }
Craig Tiller4782d922017-11-10 09:53:21 -0800888 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700889 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700890 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700891 grpc_closure_list_move(&worker->schedule_on_end_work,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800892 grpc_core::ExecCtx::Get()->closure_list());
Craig Tiller8502ecb2017-04-28 14:22:01 -0700893 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700894 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800895 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700896 gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700897 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000898 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700899 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700900 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800901 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700902 gpr_cv_signal(&worker->next->cv);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800903 if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700904 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800905 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller8502ecb2017-04-28 14:22:01 -0700906 gpr_mu_lock(&pollset->mu);
907 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700908 } else {
909 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700910 size_t poller_neighborhood_idx =
Noah Eisenbe82e642018-02-09 09:16:55 -0800911 static_cast<size_t>(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000912 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700913 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700914 bool scan_state[MAX_NEIGHBORHOODS];
915 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700916 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700917 &g_neighborhoods[(poller_neighborhood_idx + i) %
918 g_num_neighborhoods];
919 if (gpr_mu_trylock(&neighborhood->mu)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800920 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700921 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000922 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700923 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000924 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700925 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700926 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700927 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000928 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700929 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700930 &g_neighborhoods[(poller_neighborhood_idx + i) %
931 g_num_neighborhoods];
932 gpr_mu_lock(&neighborhood->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800933 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700934 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700935 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800936 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700937 gpr_mu_lock(&pollset->mu);
938 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800939 } else if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller50da5ec2017-05-01 13:51:14 -0700940 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800941 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller50da5ec2017-05-01 13:51:14 -0700942 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000943 }
944 if (worker->initialized_cv) {
945 gpr_cv_destroy(&worker->cv);
946 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800947 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700948 gpr_log(GPR_INFO, " .. remove worker");
Craig Tiller830e82a2017-05-31 16:26:27 -0700949 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700950 if (EMPTIED == worker_remove(pollset, worker)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800951 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000952 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000953 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000954}
955
956/* pollset->po.mu lock must be held by the caller before calling this.
957 The function pollset_work() may temporarily release the lock (pollset->po.mu)
958 during the course of its execution but it will always re-acquire the lock and
959 ensure that it is held by the time the function returns */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800960static grpc_error* pollset_work(grpc_pollset* ps,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700961 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700962 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800963 GPR_TIMER_SCOPE("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000964 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700965 grpc_error* error = GRPC_ERROR_NONE;
966 static const char* err_desc = "pollset_work";
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700967 if (ps->kicked_without_poller) {
968 ps->kicked_without_poller = false;
Craig Tiller4509c472017-04-27 19:05:13 +0000969 return GRPC_ERROR_NONE;
970 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700971
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800972 if (begin_worker(ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700973 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000974 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700975 GPR_ASSERT(!ps->shutting_down);
976 GPR_ASSERT(!ps->seen_inactive);
977
978 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700979 /* This is the designated polling thread at this point and should ideally do
980 polling. However, if there are unprocessed events left from a previous
981 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
982 process the pending epoll events.
983
984 The reason for decoupling do_epoll_wait and process_epoll_events is to
985 better distrubute the work (i.e handling epoll events) across multiple
986 threads
987
988 process_epoll_events() returns very quickly: It just queues the work on
989 exec_ctx but does not execute it (the actual exectution or more
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800990 accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
991 AFTER selecting a designated poller). So we are not waiting long periods
992 without a designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700993 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
994 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800995 append_error(&error, do_epoll_wait(ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700996 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800997 append_error(&error, process_epoll_events(ps), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700998
999 gpr_mu_lock(&ps->mu); /* lock */
1000
Craig Tiller4509c472017-04-27 19:05:13 +00001001 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -07001002 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001003 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +00001004 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001005 end_worker(ps, &worker, worker_hdl);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001006
Craig Tiller8502ecb2017-04-28 14:22:01 -07001007 gpr_tls_set(&g_current_thread_pollset, 0);
Craig Tiller4509c472017-04-27 19:05:13 +00001008 return error;
1009}
1010
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001011static grpc_error* pollset_kick(grpc_pollset* pollset,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001012 grpc_pollset_worker* specific_worker) {
yang-gce1cfea2018-01-31 15:59:50 -08001013 GPR_TIMER_SCOPE("pollset_kick", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001014 GRPC_STATS_INC_POLLSET_KICK();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001015 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -08001016 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +00001017 gpr_strvec log;
1018 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001019 char* tmp;
1020 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
1021 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
1022 (void*)gpr_tls_get(&g_current_thread_worker),
1023 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +00001024 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -08001025 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001026 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001027 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001028 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001029 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001030 gpr_strvec_add(&log, tmp);
1031 }
Craig Tiller4782d922017-11-10 09:53:21 -08001032 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001033 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001034 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001035 gpr_strvec_add(&log, tmp);
1036 }
Craig Tiller4782d922017-11-10 09:53:21 -08001037 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001038 gpr_strvec_destroy(&log);
yang-g69b4e4c2018-01-24 14:36:20 -08001039 gpr_log(GPR_DEBUG, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001040 gpr_free(tmp);
1041 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001042
Craig Tiller4782d922017-11-10 09:53:21 -08001043 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001044 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001045 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001046 if (root_worker == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001047 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
Craig Tiller4509c472017-04-27 19:05:13 +00001048 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001049 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001050 gpr_log(GPR_INFO, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001051 }
yang-gdf92a642017-08-21 22:38:45 -07001052 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001053 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001054 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001055 if (root_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001056 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001057 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001058 gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001059 }
1060 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001061 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001062 } else if (next_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001063 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001064 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001065 gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001066 }
1067 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001068 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001069 } else if (root_worker ==
1070 next_worker && // only try and wake up a poller if
1071 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001072 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001073 &g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001074 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001075 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001076 gpr_log(GPR_INFO, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001077 }
Craig Tiller55624a32017-05-26 08:14:44 -07001078 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001079 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1080 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001081 } else if (next_worker->state == UNKICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001082 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001083 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001084 gpr_log(GPR_INFO, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001085 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001086 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001087 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001088 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001089 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001090 } else if (next_worker->state == DESIGNATED_POLLER) {
1091 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001092 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001093 gpr_log(
Mark D. Roth48854d22018-04-25 13:05:26 -07001094 GPR_INFO,
Craig Tiller830e82a2017-05-31 16:26:27 -07001095 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1096 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001097 }
Craig Tiller55624a32017-05-26 08:14:44 -07001098 SET_KICK_STATE(root_worker, KICKED);
1099 if (root_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001100 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -07001101 gpr_cv_signal(&root_worker->cv);
1102 }
yang-gdf92a642017-08-21 22:38:45 -07001103 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001104 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001105 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001106 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001107 gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001108 root_worker);
1109 }
Craig Tiller55624a32017-05-26 08:14:44 -07001110 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001111 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1112 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001113 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001114 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001115 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001116 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001117 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001118 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001119 }
1120 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001121 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001122 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001123 gpr_log(GPR_INFO, " .. kicked while waking up");
Craig Tiller830e82a2017-05-31 16:26:27 -07001124 }
yang-gdf92a642017-08-21 22:38:45 -07001125 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001126 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001127
1128 GPR_UNREACHABLE_CODE(goto done);
1129 }
1130
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001131 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001132 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001133 gpr_log(GPR_INFO, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001134 }
yang-gdf92a642017-08-21 22:38:45 -07001135 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001136 } else if (gpr_tls_get(&g_current_thread_worker) ==
1137 (intptr_t)specific_worker) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001138 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001139 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001140 gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001141 }
Craig Tiller55624a32017-05-26 08:14:44 -07001142 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001143 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001144 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001145 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001146 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001147 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001148 gpr_log(GPR_INFO, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001149 }
Craig Tiller55624a32017-05-26 08:14:44 -07001150 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001151 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1152 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001153 } else if (specific_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001154 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001155 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001156 gpr_log(GPR_INFO, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001157 }
Craig Tiller55624a32017-05-26 08:14:44 -07001158 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001159 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001160 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001161 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001162 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001163 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001164 gpr_log(GPR_INFO, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001165 }
Craig Tiller55624a32017-05-26 08:14:44 -07001166 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001167 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001168 }
yang-gdf92a642017-08-21 22:38:45 -07001169done:
yang-gdf92a642017-08-21 22:38:45 -07001170 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001171}
1172
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001173static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001174
Craig Tiller4509c472017-04-27 19:05:13 +00001175/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001176 * Pollset-set Definitions
1177 */
1178
Craig Tillerbaa14a92017-11-03 09:09:36 -07001179static grpc_pollset_set* pollset_set_create(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001180 return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
Craig Tillerc67cc992017-04-27 10:15:51 -07001181}
1182
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001183static void pollset_set_destroy(grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001184
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001185static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001186
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001187static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001188
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001189static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001190
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001191static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001192
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001193static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001194 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001195
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001196static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001197 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001198
1199/*******************************************************************************
1200 * Event engine binding
1201 */
1202
1203static void shutdown_engine(void) {
1204 fd_global_shutdown();
1205 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001206 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001207}
1208
1209static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001210 sizeof(grpc_pollset),
Yash Tibrewaladc733f2018-04-02 18:32:06 -07001211 true,
Craig Tillerc67cc992017-04-27 10:15:51 -07001212
Yash Tibrewal533d1182017-09-18 10:48:22 -07001213 fd_create,
1214 fd_wrapped_fd,
1215 fd_orphan,
1216 fd_shutdown,
1217 fd_notify_on_read,
1218 fd_notify_on_write,
Yash Tibrewaladc733f2018-04-02 18:32:06 -07001219 fd_notify_on_error,
Yash Tibrewal533d1182017-09-18 10:48:22 -07001220 fd_is_shutdown,
1221 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001222
Yash Tibrewal533d1182017-09-18 10:48:22 -07001223 pollset_init,
1224 pollset_shutdown,
1225 pollset_destroy,
1226 pollset_work,
1227 pollset_kick,
1228 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001229
Yash Tibrewal533d1182017-09-18 10:48:22 -07001230 pollset_set_create,
1231 pollset_set_destroy,
1232 pollset_set_add_pollset,
1233 pollset_set_del_pollset,
1234 pollset_set_add_pollset_set,
1235 pollset_set_del_pollset_set,
1236 pollset_set_add_fd,
1237 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001238
Yash Tibrewal533d1182017-09-18 10:48:22 -07001239 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001240};
1241
1242/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001243 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1244 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001245const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001246 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001247 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001248 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001249 }
1250
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001251 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001252 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001253 }
1254
Craig Tillerc67cc992017-04-27 10:15:51 -07001255 fd_global_init();
1256
1257 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001258 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001259 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001260 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001261 }
1262
1263 return &vtable;
1264}
1265
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001266#else /* defined(GRPC_LINUX_EPOLL) */
Muxi Yan67ff4052018-05-15 12:36:10 -07001267#if defined(GRPC_POSIX_SOCKET_EV_EPOLL1)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001268#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001269/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1270 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001271const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001272 return nullptr;
Craig Tiller9ddb3152017-04-27 21:32:56 +00001273}
Muxi Yanfe5989e2018-05-16 13:36:21 -07001274#endif /* defined(GRPC_POSIX_SOCKET_EV_EPOLL1) */
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001275#endif /* !defined(GRPC_LINUX_EPOLL) */