blob: 0f2d136b8af98773de5290325b5763bf540b89f7 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
Alexander Polcyndb3e8982018-02-21 16:59:24 -080019#include <grpc/support/port_platform.h>
20
Craig Tillerc67cc992017-04-27 10:15:51 -070021#include "src/core/lib/iomgr/port.h"
22
yang-gceb24752017-11-07 12:06:37 -080023#include <grpc/support/log.h>
24
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080025/* This polling engine is only relevant on linux kernels supporting epoll
26 epoll_create() or epoll_create1() */
Mehrdad Afsharifb669002018-01-17 15:37:56 -080027#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000028#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070029
30#include <assert.h>
31#include <errno.h>
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080032#include <fcntl.h>
Craig Tiller20397792017-07-18 11:35:27 -070033#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070034#include <poll.h>
35#include <pthread.h>
36#include <string.h>
37#include <sys/epoll.h>
38#include <sys/socket.h>
39#include <unistd.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070042#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070043#include <grpc/support/string_util.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070044
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070045#include "src/core/lib/debug/stats.h"
Mark D. Rothdbdf4952018-01-18 11:21:12 -080046#include "src/core/lib/gpr/string.h"
Vijay Paib6cf1232018-01-25 21:02:26 -080047#include "src/core/lib/gpr/tls.h"
Vijay Paid4d0a302018-01-25 13:24:03 -080048#include "src/core/lib/gpr/useful.h"
Mark D. Roth4f2b0fd2018-01-19 12:12:23 -080049#include "src/core/lib/gprpp/manual_constructor.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070050#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070051#include "src/core/lib/iomgr/ev_posix.h"
52#include "src/core/lib/iomgr/iomgr_internal.h"
53#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070054#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070055#include "src/core/lib/profiling/timers.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070056
Craig Tillerc67cc992017-04-27 10:15:51 -070057static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070058
59/*******************************************************************************
60 * Singleton epoll set related fields
61 */
62
63#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070064#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070065
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070066/* NOTE ON SYNCHRONIZATION:
67 * - Fields in this struct are only modified by the designated poller. Hence
68 * there is no need for any locks to protect the struct.
69 * - num_events and cursor fields have to be of atomic type to provide memory
70 * visibility guarantees only. i.e In case of multiple pollers, the designated
71 * polling thread keeps changing; the thread that wrote these values may be
72 * different from the thread reading the values
73 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070074typedef struct epoll_set {
75 int epfd;
76
77 /* The epoll_events after the last call to epoll_wait() */
78 struct epoll_event events[MAX_EPOLL_EVENTS];
79
80 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070081 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070082
83 /* Index of the first event in epoll_events that has to be processed. This
84 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070085 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070086} epoll_set;
87
88/* The global singleton epoll set */
89static epoll_set g_epoll_set;
90
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080091static int epoll_create_and_cloexec() {
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080092#ifdef GRPC_LINUX_EPOLL_CREATE1
93 int fd = epoll_create1(EPOLL_CLOEXEC);
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080094 if (fd < 0) {
95 gpr_log(GPR_ERROR, "epoll_create1 unavailable");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080096 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080097#else
98 int fd = epoll_create(MAX_EPOLL_EVENTS);
99 if (fd < 0) {
100 gpr_log(GPR_ERROR, "epoll_create unavailable");
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800101 } else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
102 gpr_log(GPR_ERROR, "fcntl following epoll_create failed");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800103 return -1;
104 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800105#endif
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800106 return fd;
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800107}
108
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700109/* Must be called *only* once */
110static bool epoll_set_init() {
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800111 g_epoll_set.epfd = epoll_create_and_cloexec();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700112 if (g_epoll_set.epfd < 0) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700113 return false;
114 }
115
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700116 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
117 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
118 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700119 return true;
120}
121
122/* epoll_set_init() MUST be called before calling this. */
123static void epoll_set_shutdown() {
124 if (g_epoll_set.epfd >= 0) {
125 close(g_epoll_set.epfd);
126 g_epoll_set.epfd = -1;
127 }
128}
Craig Tillerc67cc992017-04-27 10:15:51 -0700129
130/*******************************************************************************
131 * Fd Declarations
132 */
133
134struct grpc_fd {
135 int fd;
136
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800137 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
138 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700139 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700140
Craig Tillerbaa14a92017-11-03 09:09:36 -0700141 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700142
143 /* The pollset that last noticed that the fd is readable. The actual type
144 * stored in this is (grpc_pollset *) */
145 gpr_atm read_notifier_pollset;
146
147 grpc_iomgr_object iomgr_object;
148};
149
150static void fd_global_init(void);
151static void fd_global_shutdown(void);
152
153/*******************************************************************************
154 * Pollset Declarations
155 */
156
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700157typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700158
Craig Tillerbaa14a92017-11-03 09:09:36 -0700159static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700160 switch (st) {
161 case UNKICKED:
162 return "UNKICKED";
163 case KICKED:
164 return "KICKED";
165 case DESIGNATED_POLLER:
166 return "DESIGNATED_POLLER";
167 }
168 GPR_UNREACHABLE_CODE(return "UNKNOWN");
169}
170
Craig Tillerc67cc992017-04-27 10:15:51 -0700171struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700172 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700173 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700174 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700175 grpc_pollset_worker* next;
176 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700177 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700178 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700179};
180
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700181#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700182 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700183 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700184 (worker)->kick_state_mutator = __LINE__; \
185 } while (false)
186
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700187#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000188
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700189typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700190 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700191 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700192 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700193} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700194
Craig Tillerc67cc992017-04-27 10:15:51 -0700195struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700196 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700197 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700198 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700199 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000200 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700201
202 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700203 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700204 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700205 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700206 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700207
208 /* Number of workers who are *about-to* attach themselves to the pollset
209 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000210 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700211
Craig Tillerbaa14a92017-11-03 09:09:36 -0700212 grpc_pollset* next;
213 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700214};
215
216/*******************************************************************************
217 * Pollset-set Declarations
218 */
Craig Tiller6de05932017-04-28 09:17:38 -0700219
Craig Tiller61f96c12017-05-12 13:36:39 -0700220struct grpc_pollset_set {
221 char unused;
222};
Craig Tillerc67cc992017-04-27 10:15:51 -0700223
224/*******************************************************************************
225 * Common helpers
226 */
227
Craig Tillerbaa14a92017-11-03 09:09:36 -0700228static bool append_error(grpc_error** composite, grpc_error* error,
229 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700230 if (error == GRPC_ERROR_NONE) return true;
231 if (*composite == GRPC_ERROR_NONE) {
232 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
233 }
234 *composite = grpc_error_add_child(*composite, error);
235 return false;
236}
237
238/*******************************************************************************
239 * Fd Definitions
240 */
241
242/* We need to keep a freelist not because of any concerns of malloc performance
243 * but instead so that implementations with multiple threads in (for example)
244 * epoll_wait deal with the race between pollset removal and incoming poll
245 * notifications.
246 *
247 * The problem is that the poller ultimately holds a reference to this
248 * object, so it is very difficult to know when is safe to free it, at least
249 * without some expensive synchronization.
250 *
251 * If we keep the object freelisted, in the worst case losing this race just
252 * becomes a spurious read notification on a reused fd.
253 */
254
255/* The alarm system needs to be able to wakeup 'some poller' sometimes
256 * (specifically when a new alarm needs to be triggered earlier than the next
257 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
258 * case occurs. */
259
Craig Tiller4782d922017-11-10 09:53:21 -0800260static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700261static gpr_mu fd_freelist_mu;
262
Craig Tillerc67cc992017-04-27 10:15:51 -0700263static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
264
265static void fd_global_shutdown(void) {
266 gpr_mu_lock(&fd_freelist_mu);
267 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800268 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700269 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700270 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700271 gpr_free(fd);
272 }
273 gpr_mu_destroy(&fd_freelist_mu);
274}
275
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700276static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
Craig Tiller4782d922017-11-10 09:53:21 -0800277 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700278
279 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800280 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700281 new_fd = fd_freelist;
282 fd_freelist = fd_freelist->freelist_next;
283 }
284 gpr_mu_unlock(&fd_freelist_mu);
285
Craig Tiller4782d922017-11-10 09:53:21 -0800286 if (new_fd == nullptr) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800287 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
yang-g26521b32017-11-17 17:15:37 -0800288 new_fd->read_closure.Init();
289 new_fd->write_closure.Init();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700290 new_fd->error_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700291 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700292 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800293 new_fd->read_closure->InitEvent();
294 new_fd->write_closure->InitEvent();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700295 new_fd->error_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700296 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
297
Craig Tiller4782d922017-11-10 09:53:21 -0800298 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700299
Craig Tillerbaa14a92017-11-03 09:09:36 -0700300 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700301 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
302 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700303#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800304 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700305 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
306 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700307#endif
308 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000309
Yash Tibrewal533d1182017-09-18 10:48:22 -0700310 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800311 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700312 /* Use the least significant bit of ev.data.ptr to store track_err. */
313 ev.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(new_fd) |
314 (track_err ? 1 : 0));
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700315 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000316 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
317 }
318
Craig Tillerc67cc992017-04-27 10:15:51 -0700319 return new_fd;
320}
321
Craig Tillerbaa14a92017-11-03 09:09:36 -0700322static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700323
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700324/* if 'releasing_fd' is true, it means that we are going to detach the internal
325 * fd from grpc_fd structure (i.e which means we should not be calling
326 * shutdown() syscall on that fd) */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800327static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
328 bool releasing_fd) {
329 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700330 if (!releasing_fd) {
331 shutdown(fd->fd, SHUT_RDWR);
332 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800333 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700334 fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000335 }
336 GRPC_ERROR_UNREF(why);
337}
338
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700339/* Might be called multiple times */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800340static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
341 fd_shutdown_internal(fd, why, false);
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700342}
343
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800344static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700345 bool already_closed, const char* reason) {
346 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800347 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700348
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800349 if (!fd->read_closure->IsShutdown()) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800350 fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700351 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000352 }
353
Craig Tillerc67cc992017-04-27 10:15:51 -0700354 /* If release_fd is not NULL, we should be relinquishing control of the file
355 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700356 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700357 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700358 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700359 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700360 }
361
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800362 GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700363
Craig Tiller4509c472017-04-27 19:05:13 +0000364 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800365 fd->read_closure->DestroyEvent();
366 fd->write_closure->DestroyEvent();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700367 fd->error_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700368
Craig Tiller4509c472017-04-27 19:05:13 +0000369 gpr_mu_lock(&fd_freelist_mu);
370 fd->freelist_next = fd_freelist;
371 fd_freelist = fd;
372 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700373}
374
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800375static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700376 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700377 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700378}
379
Craig Tillerbaa14a92017-11-03 09:09:36 -0700380static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800381 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700382}
383
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800384static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
385 fd->read_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700386}
387
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800388static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
389 fd->write_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700390}
391
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700392static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
393 fd->error_closure->NotifyOn(closure);
394}
395
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800396static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
397 fd->read_closure->SetReady();
Craig Tiller4509c472017-04-27 19:05:13 +0000398 /* Use release store to match with acquire load in fd_get_read_notifier */
399 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
400}
401
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800402static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
Craig Tillerc67cc992017-04-27 10:15:51 -0700403
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700404static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
405
Craig Tillerc67cc992017-04-27 10:15:51 -0700406/*******************************************************************************
407 * Pollset Definitions
408 */
409
Craig Tiller6de05932017-04-28 09:17:38 -0700410GPR_TLS_DECL(g_current_thread_pollset);
411GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700412
413/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700414static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700415
Craig Tillerbaa14a92017-11-03 09:09:36 -0700416static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700417static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700418
Craig Tillerc67cc992017-04-27 10:15:51 -0700419/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700420static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800421 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700422 pollset->root_worker = worker;
423 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700424 return true;
425 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700426 worker->next = pollset->root_worker;
427 worker->prev = worker->next->prev;
428 worker->next->prev = worker;
429 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700430 return false;
431 }
432}
433
434/* Return true if last in list */
435typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
436
Craig Tillerbaa14a92017-11-03 09:09:36 -0700437static worker_remove_result worker_remove(grpc_pollset* pollset,
438 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700439 if (worker == pollset->root_worker) {
440 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800441 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700442 return EMPTIED;
443 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700444 pollset->root_worker = worker->next;
445 worker->prev->next = worker->next;
446 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700447 return NEW_ROOT;
448 }
449 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700450 worker->prev->next = worker->next;
451 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700452 return REMOVED;
453 }
454}
455
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700456static size_t choose_neighborhood(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800457 return static_cast<size_t>(gpr_cpu_current_cpu()) % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000458}
459
Craig Tillerbaa14a92017-11-03 09:09:36 -0700460static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000461 gpr_tls_init(&g_current_thread_pollset);
462 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700463 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000464 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700465 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000466 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700467 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800468 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
Yash Tibrewal533d1182017-09-18 10:48:22 -0700469 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700470 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
471 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000472 return GRPC_OS_ERROR(errno, "epoll_ctl");
473 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700474 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Noah Eisen4d20a662018-02-09 09:34:04 -0800475 g_neighborhoods = static_cast<pollset_neighborhood*>(
476 gpr_zalloc(sizeof(*g_neighborhoods) * g_num_neighborhoods));
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700477 for (size_t i = 0; i < g_num_neighborhoods; i++) {
478 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700479 }
Craig Tiller4509c472017-04-27 19:05:13 +0000480 return GRPC_ERROR_NONE;
481}
482
483static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000484 gpr_tls_destroy(&g_current_thread_pollset);
485 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000486 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700487 for (size_t i = 0; i < g_num_neighborhoods; i++) {
488 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700489 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700490 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000491}
492
Craig Tillerbaa14a92017-11-03 09:09:36 -0700493static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700494 gpr_mu_init(&pollset->mu);
495 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700496 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
497 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800498 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700499 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700500 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700501 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800502 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700503 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800504 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700505}
506
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800507static void pollset_destroy(grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000508 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000509 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700510 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000511 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700512 retry_lock_neighborhood:
513 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000514 gpr_mu_lock(&pollset->mu);
515 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700516 if (pollset->neighborhood != neighborhood) {
517 gpr_mu_unlock(&neighborhood->mu);
518 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000519 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700520 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000521 }
522 pollset->prev->next = pollset->next;
523 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700524 if (pollset == pollset->neighborhood->active_root) {
525 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800526 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000527 }
Craig Tillerba550da2017-05-01 14:26:31 +0000528 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700529 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700530 }
Craig Tillere00d7332017-05-01 15:43:51 +0000531 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700532 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000533}
534
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800535static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800536 GPR_TIMER_SCOPE("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700537 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800538 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700539 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000540 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800541 GRPC_STATS_INC_POLLSET_KICK();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700542 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700543 case KICKED:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800544 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Craig Tiller55624a32017-05-26 08:14:44 -0700545 break;
546 case UNKICKED:
547 SET_KICK_STATE(worker, KICKED);
548 if (worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800549 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -0700550 gpr_cv_signal(&worker->cv);
551 }
552 break;
553 case DESIGNATED_POLLER:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800554 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
Craig Tiller55624a32017-05-26 08:14:44 -0700555 SET_KICK_STATE(worker, KICKED);
556 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700557 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700558 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000559 }
560
Craig Tiller32f90ee2017-04-28 12:46:41 -0700561 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000562 } while (worker != pollset->root_worker);
563 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700564 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
565 // in the else case
Craig Tiller4509c472017-04-27 19:05:13 +0000566 return error;
567}
568
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800569static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800570 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000571 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700572 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800573 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800574 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000575 }
576}
577
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800578static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
yang-gce1cfea2018-01-31 15:59:50 -0800579 GPR_TIMER_SCOPE("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800580 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700581 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000582 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700583 pollset->shutting_down = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800584 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
585 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000586}
587
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800588static int poll_deadline_to_millis_timeout(grpc_millis millis) {
Craig Tiller20397792017-07-18 11:35:27 -0700589 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800590 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700591 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700592 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700593 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000594 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700595 } else {
Noah Eisenbe82e642018-02-09 09:16:55 -0800596 return static_cast<int>(delta);
Craig Tiller4509c472017-04-27 19:05:13 +0000597 }
Craig Tiller4509c472017-04-27 19:05:13 +0000598}
599
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700600/* Process the epoll events found by do_epoll_wait() function.
601 - g_epoll_set.cursor points to the index of the first event to be processed
602 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
603 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000604
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700605 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
606 called by g_active_poller thread. So there is no need for synchronization
607 when accessing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800608static grpc_error* process_epoll_events(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800609 GPR_TIMER_SCOPE("process_epoll_events", 0);
610
Craig Tillerbaa14a92017-11-03 09:09:36 -0700611 static const char* err_desc = "process_events";
612 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700613 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
614 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
615 for (int idx = 0;
616 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700617 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700618 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700619 struct epoll_event* ev = &g_epoll_set.events[c];
620 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700621
Craig Tiller4509c472017-04-27 19:05:13 +0000622 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000623 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
624 err_desc);
625 } else {
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700626 grpc_fd* fd = reinterpret_cast<grpc_fd*>(
627 reinterpret_cast<intptr_t>(data_ptr) & ~static_cast<intptr_t>(1));
628 bool track_err =
629 reinterpret_cast<intptr_t>(data_ptr) & ~static_cast<intptr_t>(1);
630 bool cancel = (ev->events & EPOLLHUP) != 0;
631 bool error = (ev->events & EPOLLERR) != 0;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700632 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
633 bool write_ev = (ev->events & EPOLLOUT) != 0;
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700634 bool err_fallback = error && !track_err;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700635
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700636 if (error && !err_fallback) {
637 fd_has_errors(fd);
638 }
639
640 if (read_ev || cancel || err_fallback) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800641 fd_become_readable(fd, pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000642 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700643
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700644 if (write_ev || cancel || err_fallback) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800645 fd_become_writable(fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000646 }
647 }
648 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700649 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Craig Tiller4509c472017-04-27 19:05:13 +0000650 return error;
651}
652
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700653/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
654 "process" any of the events yet; that is done in process_epoll_events().
655 *See process_epoll_events() function for more details.
656
657 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
658 (i.e the designated poller thread) will be calling this function. So there is
659 no need for any synchronization when accesing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800660static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800661 GPR_TIMER_SCOPE("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000662
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700663 int r;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800664 int timeout = poll_deadline_to_millis_timeout(deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000665 if (timeout != 0) {
666 GRPC_SCHEDULING_START_BLOCKING_REGION;
667 }
Craig Tiller4509c472017-04-27 19:05:13 +0000668 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800669 GRPC_STATS_INC_SYSCALL_POLL();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700670 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
671 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000672 } while (r < 0 && errno == EINTR);
673 if (timeout != 0) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800674 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller4509c472017-04-27 19:05:13 +0000675 }
676
677 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
678
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800679 GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700680
ncteisen3cffe1f2017-11-10 13:56:23 -0800681 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700682 gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000683 }
684
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700685 gpr_atm_rel_store(&g_epoll_set.num_events, r);
686 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700687
688 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000689}
690
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800691static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700692 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700693 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800694 GPR_TIMER_SCOPE("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800695 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000696 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700697 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700698 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000699 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000700
ncteisen3cffe1f2017-11-10 13:56:23 -0800701 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800702 gpr_log(GPR_DEBUG, "PS:%p BEGIN_STARTS:%p", pollset, worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700703 }
704
Craig Tiller32f90ee2017-04-28 12:46:41 -0700705 if (pollset->seen_inactive) {
706 // pollset has been observed to be inactive, we need to move back to the
707 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000708 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700709 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000710 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700711 pollset->reassigning_neighborhood = true;
712 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000713 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700714 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700715 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000716 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700717 retry_lock_neighborhood:
718 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700719 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800720 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800721 gpr_log(GPR_DEBUG, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700722 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700723 is_reassigning);
724 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700725 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700726 if (neighborhood != pollset->neighborhood) {
727 gpr_mu_unlock(&neighborhood->mu);
728 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000729 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700730 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000731 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700732
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700733 /* In the brief time we released the pollset locks above, the worker MAY
734 have been kicked. In this case, the worker should get out of this
735 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700736 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700737
738 On a side note, the only way a worker's kick state could have changed
739 at this point is if it were "kicked specifically". Since the worker has
740 not added itself to the pollset yet (by calling worker_insert()), it is
741 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700742 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700743 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800744 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700745 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700746 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700747 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700748 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
749 SET_KICK_STATE(worker, DESIGNATED_POLLER);
750 }
751 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700752 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700753 pollset->prev = pollset->next->prev;
754 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700755 }
Craig Tiller4509c472017-04-27 19:05:13 +0000756 }
757 }
Craig Tillere00d7332017-05-01 15:43:51 +0000758 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700759 GPR_ASSERT(pollset->reassigning_neighborhood);
760 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000761 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700762 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700763 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700764
Craig Tiller32f90ee2017-04-28 12:46:41 -0700765 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000766 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700767 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000768 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700769 worker->initialized_cv = true;
770 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700771 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800772 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800773 gpr_log(GPR_DEBUG, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700774 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700775 pollset->shutting_down);
776 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700777
Craig Tiller20397792017-07-18 11:35:27 -0700778 if (gpr_cv_wait(&worker->cv, &pollset->mu,
Sree Kuchibhotla54961bb2017-12-04 12:50:27 -0800779 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700780 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700781 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
782 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700783 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700784 }
Craig Tillerba550da2017-05-01 14:26:31 +0000785 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800786 grpc_core::ExecCtx::Get()->InvalidateNow();
Craig Tiller4509c472017-04-27 19:05:13 +0000787 }
788
ncteisen3cffe1f2017-11-10 13:56:23 -0800789 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -0800790 gpr_log(GPR_DEBUG,
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700791 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
792 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700793 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700794 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700795 }
Craig Tiller4509c472017-04-27 19:05:13 +0000796
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700797 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700798 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700799 * 2. When doing gpr_cv_wait()
800 * It is possible that 'kicked_without_poller' was set to true during (1) and
801 * 'shutting_down' is set to true during (1) or (2). If either of them is
802 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700803 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
804 * case; especially when the worker is the DESIGNATED_POLLER */
805
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700806 if (pollset->kicked_without_poller) {
807 pollset->kicked_without_poller = false;
808 return false;
809 }
810
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700811 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000812}
813
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700814static bool check_neighborhood_for_available_poller(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800815 pollset_neighborhood* neighborhood) {
yang-gce1cfea2018-01-31 15:59:50 -0800816 GPR_TIMER_SCOPE("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700817 bool found_worker = false;
818 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700819 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800820 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700821 break;
822 }
823 gpr_mu_lock(&inspect->mu);
824 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700825 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800826 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000827 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700828 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000829 case UNKICKED:
830 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
831 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800832 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700833 gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
834 inspect_worker);
835 }
Craig Tiller55624a32017-05-26 08:14:44 -0700836 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000837 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700838 GPR_TIMER_MARK("signal worker", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800839 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tillerba550da2017-05-01 14:26:31 +0000840 gpr_cv_signal(&inspect_worker->cv);
841 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700842 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800843 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700844 gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
845 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000846 }
Craig Tillerba550da2017-05-01 14:26:31 +0000847 // even if we didn't win the cas, there's a worker, we can stop
848 found_worker = true;
849 break;
850 case KICKED:
851 break;
852 case DESIGNATED_POLLER:
853 found_worker = true; // ok, so someone else found the worker, but
854 // we'll accept that
855 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700856 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000857 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700858 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000859 }
860 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800861 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700862 gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
863 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700864 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700865 if (inspect == neighborhood->active_root) {
866 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800867 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000868 }
869 inspect->next->prev = inspect->prev;
870 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800871 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700872 }
873 gpr_mu_unlock(&inspect->mu);
874 } while (!found_worker);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700875 return found_worker;
876}
877
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800878static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700879 grpc_pollset_worker** worker_hdl) {
yang-gce1cfea2018-01-31 15:59:50 -0800880 GPR_TIMER_SCOPE("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800881 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700882 gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
883 }
Craig Tiller4782d922017-11-10 09:53:21 -0800884 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700885 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700886 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700887 grpc_closure_list_move(&worker->schedule_on_end_work,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800888 grpc_core::ExecCtx::Get()->closure_list());
Craig Tiller8502ecb2017-04-28 14:22:01 -0700889 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700890 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800891 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700892 gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
893 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000894 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700895 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700896 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800897 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700898 gpr_cv_signal(&worker->next->cv);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800899 if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700900 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800901 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller8502ecb2017-04-28 14:22:01 -0700902 gpr_mu_lock(&pollset->mu);
903 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700904 } else {
905 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700906 size_t poller_neighborhood_idx =
Noah Eisenbe82e642018-02-09 09:16:55 -0800907 static_cast<size_t>(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000908 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700909 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700910 bool scan_state[MAX_NEIGHBORHOODS];
911 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700912 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700913 &g_neighborhoods[(poller_neighborhood_idx + i) %
914 g_num_neighborhoods];
915 if (gpr_mu_trylock(&neighborhood->mu)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800916 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700917 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000918 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700919 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000920 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700921 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700922 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700923 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000924 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700925 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700926 &g_neighborhoods[(poller_neighborhood_idx + i) %
927 g_num_neighborhoods];
928 gpr_mu_lock(&neighborhood->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800929 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700930 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700931 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800932 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700933 gpr_mu_lock(&pollset->mu);
934 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800935 } else if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller50da5ec2017-05-01 13:51:14 -0700936 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800937 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller50da5ec2017-05-01 13:51:14 -0700938 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000939 }
940 if (worker->initialized_cv) {
941 gpr_cv_destroy(&worker->cv);
942 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800943 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700944 gpr_log(GPR_DEBUG, " .. remove worker");
945 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700946 if (EMPTIED == worker_remove(pollset, worker)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800947 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000948 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000949 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000950}
951
952/* pollset->po.mu lock must be held by the caller before calling this.
953 The function pollset_work() may temporarily release the lock (pollset->po.mu)
954 during the course of its execution but it will always re-acquire the lock and
955 ensure that it is held by the time the function returns */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800956static grpc_error* pollset_work(grpc_pollset* ps,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700957 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700958 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800959 GPR_TIMER_SCOPE("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000960 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700961 grpc_error* error = GRPC_ERROR_NONE;
962 static const char* err_desc = "pollset_work";
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700963 if (ps->kicked_without_poller) {
964 ps->kicked_without_poller = false;
Craig Tiller4509c472017-04-27 19:05:13 +0000965 return GRPC_ERROR_NONE;
966 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700967
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800968 if (begin_worker(ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700969 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000970 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700971 GPR_ASSERT(!ps->shutting_down);
972 GPR_ASSERT(!ps->seen_inactive);
973
974 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700975 /* This is the designated polling thread at this point and should ideally do
976 polling. However, if there are unprocessed events left from a previous
977 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
978 process the pending epoll events.
979
980 The reason for decoupling do_epoll_wait and process_epoll_events is to
981 better distrubute the work (i.e handling epoll events) across multiple
982 threads
983
984 process_epoll_events() returns very quickly: It just queues the work on
985 exec_ctx but does not execute it (the actual exectution or more
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800986 accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
987 AFTER selecting a designated poller). So we are not waiting long periods
988 without a designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700989 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
990 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800991 append_error(&error, do_epoll_wait(ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700992 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800993 append_error(&error, process_epoll_events(ps), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700994
995 gpr_mu_lock(&ps->mu); /* lock */
996
Craig Tiller4509c472017-04-27 19:05:13 +0000997 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700998 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700999 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +00001000 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001001 end_worker(ps, &worker, worker_hdl);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001002
Craig Tiller8502ecb2017-04-28 14:22:01 -07001003 gpr_tls_set(&g_current_thread_pollset, 0);
Craig Tiller4509c472017-04-27 19:05:13 +00001004 return error;
1005}
1006
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001007static grpc_error* pollset_kick(grpc_pollset* pollset,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001008 grpc_pollset_worker* specific_worker) {
yang-gce1cfea2018-01-31 15:59:50 -08001009 GPR_TIMER_SCOPE("pollset_kick", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001010 GRPC_STATS_INC_POLLSET_KICK();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001011 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -08001012 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +00001013 gpr_strvec log;
1014 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001015 char* tmp;
1016 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
1017 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
1018 (void*)gpr_tls_get(&g_current_thread_worker),
1019 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +00001020 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -08001021 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001022 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001023 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001024 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001025 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001026 gpr_strvec_add(&log, tmp);
1027 }
Craig Tiller4782d922017-11-10 09:53:21 -08001028 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001029 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001030 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001031 gpr_strvec_add(&log, tmp);
1032 }
Craig Tiller4782d922017-11-10 09:53:21 -08001033 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001034 gpr_strvec_destroy(&log);
yang-g69b4e4c2018-01-24 14:36:20 -08001035 gpr_log(GPR_DEBUG, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001036 gpr_free(tmp);
1037 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001038
Craig Tiller4782d922017-11-10 09:53:21 -08001039 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001040 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001041 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001042 if (root_worker == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001043 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
Craig Tiller4509c472017-04-27 19:05:13 +00001044 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001045 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001046 gpr_log(GPR_DEBUG, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001047 }
yang-gdf92a642017-08-21 22:38:45 -07001048 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001049 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001050 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001051 if (root_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001052 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001053 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001054 gpr_log(GPR_DEBUG, " .. already kicked %p", root_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001055 }
1056 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001057 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001058 } else if (next_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001059 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001060 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001061 gpr_log(GPR_DEBUG, " .. already kicked %p", next_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001062 }
1063 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001064 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001065 } else if (root_worker ==
1066 next_worker && // only try and wake up a poller if
1067 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001068 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001069 &g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001070 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001071 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001072 gpr_log(GPR_DEBUG, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001073 }
Craig Tiller55624a32017-05-26 08:14:44 -07001074 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001075 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1076 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001077 } else if (next_worker->state == UNKICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001078 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001079 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001080 gpr_log(GPR_DEBUG, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001081 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001082 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001083 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001084 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001085 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001086 } else if (next_worker->state == DESIGNATED_POLLER) {
1087 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001088 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001089 gpr_log(
yang-g69b4e4c2018-01-24 14:36:20 -08001090 GPR_DEBUG,
Craig Tiller830e82a2017-05-31 16:26:27 -07001091 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1092 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001093 }
Craig Tiller55624a32017-05-26 08:14:44 -07001094 SET_KICK_STATE(root_worker, KICKED);
1095 if (root_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001096 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -07001097 gpr_cv_signal(&root_worker->cv);
1098 }
yang-gdf92a642017-08-21 22:38:45 -07001099 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001100 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001101 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001102 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001103 gpr_log(GPR_DEBUG, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001104 root_worker);
1105 }
Craig Tiller55624a32017-05-26 08:14:44 -07001106 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001107 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1108 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001109 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001110 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001111 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001112 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001113 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001114 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001115 }
1116 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001117 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001118 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001119 gpr_log(GPR_DEBUG, " .. kicked while waking up");
Craig Tiller830e82a2017-05-31 16:26:27 -07001120 }
yang-gdf92a642017-08-21 22:38:45 -07001121 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001122 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001123
1124 GPR_UNREACHABLE_CODE(goto done);
1125 }
1126
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001127 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001128 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001129 gpr_log(GPR_DEBUG, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001130 }
yang-gdf92a642017-08-21 22:38:45 -07001131 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001132 } else if (gpr_tls_get(&g_current_thread_worker) ==
1133 (intptr_t)specific_worker) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001134 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001135 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001136 gpr_log(GPR_DEBUG, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001137 }
Craig Tiller55624a32017-05-26 08:14:44 -07001138 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001139 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001140 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001141 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001142 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001143 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001144 gpr_log(GPR_DEBUG, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001145 }
Craig Tiller55624a32017-05-26 08:14:44 -07001146 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001147 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1148 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001149 } else if (specific_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001150 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001151 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001152 gpr_log(GPR_DEBUG, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001153 }
Craig Tiller55624a32017-05-26 08:14:44 -07001154 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001155 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001156 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001157 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001158 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001159 if (grpc_polling_trace.enabled()) {
yang-g69b4e4c2018-01-24 14:36:20 -08001160 gpr_log(GPR_DEBUG, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001161 }
Craig Tiller55624a32017-05-26 08:14:44 -07001162 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001163 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001164 }
yang-gdf92a642017-08-21 22:38:45 -07001165done:
yang-gdf92a642017-08-21 22:38:45 -07001166 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001167}
1168
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001169static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001170
Craig Tiller4509c472017-04-27 19:05:13 +00001171/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001172 * Pollset-set Definitions
1173 */
1174
Craig Tillerbaa14a92017-11-03 09:09:36 -07001175static grpc_pollset_set* pollset_set_create(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001176 return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
Craig Tillerc67cc992017-04-27 10:15:51 -07001177}
1178
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001179static void pollset_set_destroy(grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001180
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001181static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001182
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001183static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001184
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001185static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001186
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001187static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001188
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001189static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001190 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001191
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001192static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001193 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001194
1195/*******************************************************************************
1196 * Event engine binding
1197 */
1198
1199static void shutdown_engine(void) {
1200 fd_global_shutdown();
1201 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001202 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001203}
1204
1205static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001206 sizeof(grpc_pollset),
Yash Tibrewaladc733f2018-04-02 18:32:06 -07001207 true,
Craig Tillerc67cc992017-04-27 10:15:51 -07001208
Yash Tibrewal533d1182017-09-18 10:48:22 -07001209 fd_create,
1210 fd_wrapped_fd,
1211 fd_orphan,
1212 fd_shutdown,
1213 fd_notify_on_read,
1214 fd_notify_on_write,
Yash Tibrewaladc733f2018-04-02 18:32:06 -07001215 fd_notify_on_error,
Yash Tibrewal533d1182017-09-18 10:48:22 -07001216 fd_is_shutdown,
1217 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001218
Yash Tibrewal533d1182017-09-18 10:48:22 -07001219 pollset_init,
1220 pollset_shutdown,
1221 pollset_destroy,
1222 pollset_work,
1223 pollset_kick,
1224 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001225
Yash Tibrewal533d1182017-09-18 10:48:22 -07001226 pollset_set_create,
1227 pollset_set_destroy,
1228 pollset_set_add_pollset,
1229 pollset_set_del_pollset,
1230 pollset_set_add_pollset_set,
1231 pollset_set_del_pollset_set,
1232 pollset_set_add_fd,
1233 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001234
Yash Tibrewal533d1182017-09-18 10:48:22 -07001235 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001236};
1237
1238/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001239 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1240 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001241const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001242 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001243 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001244 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001245 }
1246
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001247 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001248 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001249 }
1250
Craig Tillerc67cc992017-04-27 10:15:51 -07001251 fd_global_init();
1252
1253 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001254 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001255 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001256 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001257 }
1258
1259 return &vtable;
1260}
1261
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001262#else /* defined(GRPC_LINUX_EPOLL) */
Craig Tillerc67cc992017-04-27 10:15:51 -07001263#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001264#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001265/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1266 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001267const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001268 return nullptr;
Craig Tiller9ddb3152017-04-27 21:32:56 +00001269}
Craig Tillerc67cc992017-04-27 10:15:51 -07001270#endif /* defined(GRPC_POSIX_SOCKET) */
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001271#endif /* !defined(GRPC_LINUX_EPOLL) */