blob: 1c495b47bcc0924685b5194abae38d78518d6a6b [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
Alexander Polcyndb3e8982018-02-21 16:59:24 -080019#include <grpc/support/port_platform.h>
20
Craig Tillerc67cc992017-04-27 10:15:51 -070021#include "src/core/lib/iomgr/port.h"
22
yang-gceb24752017-11-07 12:06:37 -080023#include <grpc/support/log.h>
24
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080025/* This polling engine is only relevant on linux kernels supporting epoll
26 epoll_create() or epoll_create1() */
Mehrdad Afsharifb669002018-01-17 15:37:56 -080027#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000028#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070029
30#include <assert.h>
31#include <errno.h>
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080032#include <fcntl.h>
Craig Tiller20397792017-07-18 11:35:27 -070033#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070034#include <poll.h>
35#include <pthread.h>
36#include <string.h>
37#include <sys/epoll.h>
38#include <sys/socket.h>
39#include <unistd.h>
40
41#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070042#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070043#include <grpc/support/string_util.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070044
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070045#include "src/core/lib/debug/stats.h"
Mark D. Rothdbdf4952018-01-18 11:21:12 -080046#include "src/core/lib/gpr/string.h"
Vijay Paib6cf1232018-01-25 21:02:26 -080047#include "src/core/lib/gpr/tls.h"
Vijay Paid4d0a302018-01-25 13:24:03 -080048#include "src/core/lib/gpr/useful.h"
Mark D. Roth4f2b0fd2018-01-19 12:12:23 -080049#include "src/core/lib/gprpp/manual_constructor.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070050#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070051#include "src/core/lib/iomgr/ev_posix.h"
52#include "src/core/lib/iomgr/iomgr_internal.h"
53#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070054#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070055#include "src/core/lib/profiling/timers.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070056
Craig Tillerc67cc992017-04-27 10:15:51 -070057static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070058
59/*******************************************************************************
60 * Singleton epoll set related fields
61 */
62
63#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070064#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070065
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070066/* NOTE ON SYNCHRONIZATION:
67 * - Fields in this struct are only modified by the designated poller. Hence
68 * there is no need for any locks to protect the struct.
69 * - num_events and cursor fields have to be of atomic type to provide memory
70 * visibility guarantees only. i.e In case of multiple pollers, the designated
71 * polling thread keeps changing; the thread that wrote these values may be
72 * different from the thread reading the values
73 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070074typedef struct epoll_set {
75 int epfd;
76
77 /* The epoll_events after the last call to epoll_wait() */
78 struct epoll_event events[MAX_EPOLL_EVENTS];
79
80 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070081 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070082
83 /* Index of the first event in epoll_events that has to be processed. This
84 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070085 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070086} epoll_set;
87
88/* The global singleton epoll set */
89static epoll_set g_epoll_set;
90
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080091static int epoll_create_and_cloexec() {
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080092#ifdef GRPC_LINUX_EPOLL_CREATE1
93 int fd = epoll_create1(EPOLL_CLOEXEC);
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -080094 if (fd < 0) {
95 gpr_log(GPR_ERROR, "epoll_create1 unavailable");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080096 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -080097#else
98 int fd = epoll_create(MAX_EPOLL_EVENTS);
99 if (fd < 0) {
100 gpr_log(GPR_ERROR, "epoll_create unavailable");
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800101 } else if (fcntl(fd, F_SETFD, FD_CLOEXEC) != 0) {
102 gpr_log(GPR_ERROR, "fcntl following epoll_create failed");
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800103 return -1;
104 }
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800105#endif
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800106 return fd;
Mehrdad Afshari1957fd02018-01-16 17:22:01 -0800107}
108
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700109/* Must be called *only* once */
110static bool epoll_set_init() {
Mehrdad Afshari8b0e9fb2018-01-17 13:42:26 -0800111 g_epoll_set.epfd = epoll_create_and_cloexec();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700112 if (g_epoll_set.epfd < 0) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700113 return false;
114 }
115
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700116 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
117 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
118 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700119 return true;
120}
121
122/* epoll_set_init() MUST be called before calling this. */
123static void epoll_set_shutdown() {
124 if (g_epoll_set.epfd >= 0) {
125 close(g_epoll_set.epfd);
126 g_epoll_set.epfd = -1;
127 }
128}
Craig Tillerc67cc992017-04-27 10:15:51 -0700129
130/*******************************************************************************
131 * Fd Declarations
132 */
133
134struct grpc_fd {
135 int fd;
136
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800137 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
138 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700139 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> error_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700140
Craig Tillerbaa14a92017-11-03 09:09:36 -0700141 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700142
143 /* The pollset that last noticed that the fd is readable. The actual type
144 * stored in this is (grpc_pollset *) */
145 gpr_atm read_notifier_pollset;
146
147 grpc_iomgr_object iomgr_object;
148};
149
150static void fd_global_init(void);
151static void fd_global_shutdown(void);
152
153/*******************************************************************************
154 * Pollset Declarations
155 */
156
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700157typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700158
Craig Tillerbaa14a92017-11-03 09:09:36 -0700159static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700160 switch (st) {
161 case UNKICKED:
162 return "UNKICKED";
163 case KICKED:
164 return "KICKED";
165 case DESIGNATED_POLLER:
166 return "DESIGNATED_POLLER";
167 }
168 GPR_UNREACHABLE_CODE(return "UNKNOWN");
169}
170
Craig Tillerc67cc992017-04-27 10:15:51 -0700171struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700172 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700173 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700174 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700175 grpc_pollset_worker* next;
176 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700177 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700178 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700179};
180
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700181#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700182 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700183 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700184 (worker)->kick_state_mutator = __LINE__; \
185 } while (false)
186
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700187#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000188
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700189typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700190 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700191 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700192 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700193} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700194
Craig Tillerc67cc992017-04-27 10:15:51 -0700195struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700196 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700197 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700198 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700199 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000200 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700201
202 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700203 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700204 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700205 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700206 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700207
208 /* Number of workers who are *about-to* attach themselves to the pollset
209 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000210 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700211
Craig Tillerbaa14a92017-11-03 09:09:36 -0700212 grpc_pollset* next;
213 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700214};
215
216/*******************************************************************************
217 * Pollset-set Declarations
218 */
Craig Tiller6de05932017-04-28 09:17:38 -0700219
Craig Tiller61f96c12017-05-12 13:36:39 -0700220struct grpc_pollset_set {
221 char unused;
222};
Craig Tillerc67cc992017-04-27 10:15:51 -0700223
224/*******************************************************************************
225 * Common helpers
226 */
227
Craig Tillerbaa14a92017-11-03 09:09:36 -0700228static bool append_error(grpc_error** composite, grpc_error* error,
229 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700230 if (error == GRPC_ERROR_NONE) return true;
231 if (*composite == GRPC_ERROR_NONE) {
232 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
233 }
234 *composite = grpc_error_add_child(*composite, error);
235 return false;
236}
237
238/*******************************************************************************
239 * Fd Definitions
240 */
241
242/* We need to keep a freelist not because of any concerns of malloc performance
243 * but instead so that implementations with multiple threads in (for example)
244 * epoll_wait deal with the race between pollset removal and incoming poll
245 * notifications.
246 *
247 * The problem is that the poller ultimately holds a reference to this
248 * object, so it is very difficult to know when is safe to free it, at least
249 * without some expensive synchronization.
250 *
251 * If we keep the object freelisted, in the worst case losing this race just
252 * becomes a spurious read notification on a reused fd.
253 */
254
255/* The alarm system needs to be able to wakeup 'some poller' sometimes
256 * (specifically when a new alarm needs to be triggered earlier than the next
257 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
258 * case occurs. */
259
Craig Tiller4782d922017-11-10 09:53:21 -0800260static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700261static gpr_mu fd_freelist_mu;
262
Craig Tillerc67cc992017-04-27 10:15:51 -0700263static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
264
265static void fd_global_shutdown(void) {
266 gpr_mu_lock(&fd_freelist_mu);
267 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800268 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700269 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700270 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700271 gpr_free(fd);
272 }
273 gpr_mu_destroy(&fd_freelist_mu);
274}
275
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700276static grpc_fd* fd_create(int fd, const char* name, bool track_err) {
Craig Tiller4782d922017-11-10 09:53:21 -0800277 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700278
279 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800280 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700281 new_fd = fd_freelist;
282 fd_freelist = fd_freelist->freelist_next;
283 }
284 gpr_mu_unlock(&fd_freelist_mu);
285
Craig Tiller4782d922017-11-10 09:53:21 -0800286 if (new_fd == nullptr) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800287 new_fd = static_cast<grpc_fd*>(gpr_malloc(sizeof(grpc_fd)));
yang-g26521b32017-11-17 17:15:37 -0800288 new_fd->read_closure.Init();
289 new_fd->write_closure.Init();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700290 new_fd->error_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700291 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700292 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800293 new_fd->read_closure->InitEvent();
294 new_fd->write_closure->InitEvent();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700295 new_fd->error_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700296 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
297
Craig Tiller4782d922017-11-10 09:53:21 -0800298 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700299
Craig Tillerbaa14a92017-11-03 09:09:36 -0700300 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700301 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
302 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700303#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800304 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700305 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
306 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700307#endif
308 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000309
Yash Tibrewal533d1182017-09-18 10:48:22 -0700310 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800311 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLOUT | EPOLLET);
Yash Tibrewal21e36032018-06-05 10:55:13 -0700312 /* Use the least significant bit of ev.data.ptr to store track_err. We expect
313 * the addresses to be word aligned. We need to store track_err to avoid
314 * synchronization issues when accessing it after receiving an event. */
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700315 ev.data.ptr = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(new_fd) |
316 (track_err ? 1 : 0));
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700317 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000318 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
319 }
320
Craig Tillerc67cc992017-04-27 10:15:51 -0700321 return new_fd;
322}
323
Craig Tillerbaa14a92017-11-03 09:09:36 -0700324static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700325
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700326/* if 'releasing_fd' is true, it means that we are going to detach the internal
327 * fd from grpc_fd structure (i.e which means we should not be calling
328 * shutdown() syscall on that fd) */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800329static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
330 bool releasing_fd) {
331 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700332 if (!releasing_fd) {
333 shutdown(fd->fd, SHUT_RDWR);
334 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800335 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700336 fd->error_closure->SetShutdown(GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000337 }
338 GRPC_ERROR_UNREF(why);
339}
340
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700341/* Might be called multiple times */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800342static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
343 fd_shutdown_internal(fd, why, false);
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700344}
345
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800346static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700347 bool already_closed, const char* reason) {
348 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800349 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700350
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800351 if (!fd->read_closure->IsShutdown()) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800352 fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700353 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000354 }
355
Craig Tillerc67cc992017-04-27 10:15:51 -0700356 /* If release_fd is not NULL, we should be relinquishing control of the file
357 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700358 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700359 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700360 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700361 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700362 }
363
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800364 GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700365
Craig Tiller4509c472017-04-27 19:05:13 +0000366 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800367 fd->read_closure->DestroyEvent();
368 fd->write_closure->DestroyEvent();
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700369 fd->error_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700370
Craig Tiller4509c472017-04-27 19:05:13 +0000371 gpr_mu_lock(&fd_freelist_mu);
372 fd->freelist_next = fd_freelist;
373 fd_freelist = fd;
374 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700375}
376
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800377static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700378 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700379 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700380}
381
Craig Tillerbaa14a92017-11-03 09:09:36 -0700382static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800383 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700384}
385
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800386static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
387 fd->read_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700388}
389
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800390static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
391 fd->write_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700392}
393
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700394static void fd_notify_on_error(grpc_fd* fd, grpc_closure* closure) {
395 fd->error_closure->NotifyOn(closure);
396}
397
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800398static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
399 fd->read_closure->SetReady();
Craig Tiller4509c472017-04-27 19:05:13 +0000400 /* Use release store to match with acquire load in fd_get_read_notifier */
401 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
402}
403
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800404static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
Craig Tillerc67cc992017-04-27 10:15:51 -0700405
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700406static void fd_has_errors(grpc_fd* fd) { fd->error_closure->SetReady(); }
407
Craig Tillerc67cc992017-04-27 10:15:51 -0700408/*******************************************************************************
409 * Pollset Definitions
410 */
411
Craig Tiller6de05932017-04-28 09:17:38 -0700412GPR_TLS_DECL(g_current_thread_pollset);
413GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700414
415/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700416static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700417
Craig Tillerbaa14a92017-11-03 09:09:36 -0700418static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700419static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700420
Craig Tillerc67cc992017-04-27 10:15:51 -0700421/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700422static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800423 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700424 pollset->root_worker = worker;
425 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700426 return true;
427 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700428 worker->next = pollset->root_worker;
429 worker->prev = worker->next->prev;
430 worker->next->prev = worker;
431 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700432 return false;
433 }
434}
435
436/* Return true if last in list */
437typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
438
Craig Tillerbaa14a92017-11-03 09:09:36 -0700439static worker_remove_result worker_remove(grpc_pollset* pollset,
440 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700441 if (worker == pollset->root_worker) {
442 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800443 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700444 return EMPTIED;
445 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700446 pollset->root_worker = worker->next;
447 worker->prev->next = worker->next;
448 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700449 return NEW_ROOT;
450 }
451 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700452 worker->prev->next = worker->next;
453 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700454 return REMOVED;
455 }
456}
457
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700458static size_t choose_neighborhood(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -0800459 return static_cast<size_t>(gpr_cpu_current_cpu()) % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000460}
461
Craig Tillerbaa14a92017-11-03 09:09:36 -0700462static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000463 gpr_tls_init(&g_current_thread_pollset);
464 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700465 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000466 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700467 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000468 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700469 struct epoll_event ev;
Noah Eisenbe82e642018-02-09 09:16:55 -0800470 ev.events = static_cast<uint32_t>(EPOLLIN | EPOLLET);
Yash Tibrewal533d1182017-09-18 10:48:22 -0700471 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700472 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
473 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000474 return GRPC_OS_ERROR(errno, "epoll_ctl");
475 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700476 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Noah Eisen4d20a662018-02-09 09:34:04 -0800477 g_neighborhoods = static_cast<pollset_neighborhood*>(
478 gpr_zalloc(sizeof(*g_neighborhoods) * g_num_neighborhoods));
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700479 for (size_t i = 0; i < g_num_neighborhoods; i++) {
480 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700481 }
Craig Tiller4509c472017-04-27 19:05:13 +0000482 return GRPC_ERROR_NONE;
483}
484
485static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000486 gpr_tls_destroy(&g_current_thread_pollset);
487 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000488 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700489 for (size_t i = 0; i < g_num_neighborhoods; i++) {
490 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700491 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700492 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000493}
494
Craig Tillerbaa14a92017-11-03 09:09:36 -0700495static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700496 gpr_mu_init(&pollset->mu);
497 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700498 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
499 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800500 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700501 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700502 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700503 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800504 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700505 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800506 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700507}
508
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800509static void pollset_destroy(grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000510 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000511 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700512 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000513 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700514 retry_lock_neighborhood:
515 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000516 gpr_mu_lock(&pollset->mu);
517 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700518 if (pollset->neighborhood != neighborhood) {
519 gpr_mu_unlock(&neighborhood->mu);
520 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000521 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700522 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000523 }
524 pollset->prev->next = pollset->next;
525 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700526 if (pollset == pollset->neighborhood->active_root) {
527 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800528 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000529 }
Craig Tillerba550da2017-05-01 14:26:31 +0000530 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700531 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700532 }
Craig Tillere00d7332017-05-01 15:43:51 +0000533 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700534 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000535}
536
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800537static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800538 GPR_TIMER_SCOPE("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700539 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800540 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700541 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000542 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800543 GRPC_STATS_INC_POLLSET_KICK();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700544 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700545 case KICKED:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800546 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Craig Tiller55624a32017-05-26 08:14:44 -0700547 break;
548 case UNKICKED:
549 SET_KICK_STATE(worker, KICKED);
550 if (worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800551 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -0700552 gpr_cv_signal(&worker->cv);
553 }
554 break;
555 case DESIGNATED_POLLER:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800556 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
Craig Tiller55624a32017-05-26 08:14:44 -0700557 SET_KICK_STATE(worker, KICKED);
558 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700559 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700560 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000561 }
562
Craig Tiller32f90ee2017-04-28 12:46:41 -0700563 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000564 } while (worker != pollset->root_worker);
565 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700566 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
567 // in the else case
Craig Tiller4509c472017-04-27 19:05:13 +0000568 return error;
569}
570
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800571static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800572 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000573 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700574 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800575 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800576 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000577 }
578}
579
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800580static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
yang-gce1cfea2018-01-31 15:59:50 -0800581 GPR_TIMER_SCOPE("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800582 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700583 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000584 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700585 pollset->shutting_down = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800586 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
587 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000588}
589
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800590static int poll_deadline_to_millis_timeout(grpc_millis millis) {
Craig Tiller20397792017-07-18 11:35:27 -0700591 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800592 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700593 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700594 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700595 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000596 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700597 } else {
Noah Eisenbe82e642018-02-09 09:16:55 -0800598 return static_cast<int>(delta);
Craig Tiller4509c472017-04-27 19:05:13 +0000599 }
Craig Tiller4509c472017-04-27 19:05:13 +0000600}
601
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700602/* Process the epoll events found by do_epoll_wait() function.
603 - g_epoll_set.cursor points to the index of the first event to be processed
604 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
605 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000606
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700607 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
608 called by g_active_poller thread. So there is no need for synchronization
609 when accessing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800610static grpc_error* process_epoll_events(grpc_pollset* pollset) {
yang-gce1cfea2018-01-31 15:59:50 -0800611 GPR_TIMER_SCOPE("process_epoll_events", 0);
612
Craig Tillerbaa14a92017-11-03 09:09:36 -0700613 static const char* err_desc = "process_events";
614 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700615 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
616 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
617 for (int idx = 0;
618 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700619 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700620 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700621 struct epoll_event* ev = &g_epoll_set.events[c];
622 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700623
Craig Tiller4509c472017-04-27 19:05:13 +0000624 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000625 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
626 err_desc);
627 } else {
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700628 grpc_fd* fd = reinterpret_cast<grpc_fd*>(
629 reinterpret_cast<intptr_t>(data_ptr) & ~static_cast<intptr_t>(1));
630 bool track_err =
Yash Tibrewal21e36032018-06-05 10:55:13 -0700631 reinterpret_cast<intptr_t>(data_ptr) & static_cast<intptr_t>(1);
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700632 bool cancel = (ev->events & EPOLLHUP) != 0;
633 bool error = (ev->events & EPOLLERR) != 0;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700634 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
635 bool write_ev = (ev->events & EPOLLOUT) != 0;
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700636 bool err_fallback = error && !track_err;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700637
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700638 if (error && !err_fallback) {
639 fd_has_errors(fd);
640 }
641
642 if (read_ev || cancel || err_fallback) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800643 fd_become_readable(fd, pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000644 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700645
Yash Tibrewaladc733f2018-04-02 18:32:06 -0700646 if (write_ev || cancel || err_fallback) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800647 fd_become_writable(fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000648 }
649 }
650 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700651 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Craig Tiller4509c472017-04-27 19:05:13 +0000652 return error;
653}
654
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700655/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
656 "process" any of the events yet; that is done in process_epoll_events().
657 *See process_epoll_events() function for more details.
658
659 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
660 (i.e the designated poller thread) will be calling this function. So there is
661 no need for any synchronization when accesing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800662static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800663 GPR_TIMER_SCOPE("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000664
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700665 int r;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800666 int timeout = poll_deadline_to_millis_timeout(deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000667 if (timeout != 0) {
668 GRPC_SCHEDULING_START_BLOCKING_REGION;
669 }
Craig Tiller4509c472017-04-27 19:05:13 +0000670 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800671 GRPC_STATS_INC_SYSCALL_POLL();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700672 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
673 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000674 } while (r < 0 && errno == EINTR);
675 if (timeout != 0) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800676 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller4509c472017-04-27 19:05:13 +0000677 }
678
679 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
680
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800681 GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700682
ncteisen3cffe1f2017-11-10 13:56:23 -0800683 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700684 gpr_log(GPR_INFO, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000685 }
686
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700687 gpr_atm_rel_store(&g_epoll_set.num_events, r);
688 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700689
690 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000691}
692
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800693static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700694 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700695 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800696 GPR_TIMER_SCOPE("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800697 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000698 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700699 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700700 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000701 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000702
ncteisen3cffe1f2017-11-10 13:56:23 -0800703 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700704 gpr_log(GPR_INFO, "PS:%p BEGIN_STARTS:%p", pollset, worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700705 }
706
Craig Tiller32f90ee2017-04-28 12:46:41 -0700707 if (pollset->seen_inactive) {
708 // pollset has been observed to be inactive, we need to move back to the
709 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000710 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700711 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000712 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700713 pollset->reassigning_neighborhood = true;
714 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000715 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700716 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700717 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000718 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700719 retry_lock_neighborhood:
720 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700721 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800722 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700723 gpr_log(GPR_INFO, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700724 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700725 is_reassigning);
726 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700727 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700728 if (neighborhood != pollset->neighborhood) {
729 gpr_mu_unlock(&neighborhood->mu);
730 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000731 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700732 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000733 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700734
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700735 /* In the brief time we released the pollset locks above, the worker MAY
736 have been kicked. In this case, the worker should get out of this
737 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700738 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700739
740 On a side note, the only way a worker's kick state could have changed
741 at this point is if it were "kicked specifically". Since the worker has
742 not added itself to the pollset yet (by calling worker_insert()), it is
743 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700744 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700745 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800746 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700747 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700748 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700749 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700750 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
751 SET_KICK_STATE(worker, DESIGNATED_POLLER);
752 }
753 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700754 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700755 pollset->prev = pollset->next->prev;
756 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700757 }
Craig Tiller4509c472017-04-27 19:05:13 +0000758 }
759 }
Craig Tillere00d7332017-05-01 15:43:51 +0000760 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700761 GPR_ASSERT(pollset->reassigning_neighborhood);
762 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000763 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700764 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700765 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700766
Craig Tiller32f90ee2017-04-28 12:46:41 -0700767 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000768 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700769 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000770 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700771 worker->initialized_cv = true;
772 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700773 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800774 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700775 gpr_log(GPR_INFO, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700776 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700777 pollset->shutting_down);
778 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700779
Craig Tiller20397792017-07-18 11:35:27 -0700780 if (gpr_cv_wait(&worker->cv, &pollset->mu,
Sree Kuchibhotla54961bb2017-12-04 12:50:27 -0800781 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700782 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700783 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
784 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700785 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700786 }
Craig Tillerba550da2017-05-01 14:26:31 +0000787 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800788 grpc_core::ExecCtx::Get()->InvalidateNow();
Craig Tiller4509c472017-04-27 19:05:13 +0000789 }
790
ncteisen3cffe1f2017-11-10 13:56:23 -0800791 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700792 gpr_log(GPR_INFO,
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700793 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
794 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700795 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700796 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700797 }
Craig Tiller4509c472017-04-27 19:05:13 +0000798
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700799 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700800 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700801 * 2. When doing gpr_cv_wait()
802 * It is possible that 'kicked_without_poller' was set to true during (1) and
803 * 'shutting_down' is set to true during (1) or (2). If either of them is
804 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700805 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
806 * case; especially when the worker is the DESIGNATED_POLLER */
807
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700808 if (pollset->kicked_without_poller) {
809 pollset->kicked_without_poller = false;
810 return false;
811 }
812
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700813 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000814}
815
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700816static bool check_neighborhood_for_available_poller(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800817 pollset_neighborhood* neighborhood) {
yang-gce1cfea2018-01-31 15:59:50 -0800818 GPR_TIMER_SCOPE("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700819 bool found_worker = false;
820 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700821 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800822 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700823 break;
824 }
825 gpr_mu_lock(&inspect->mu);
826 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700827 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800828 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000829 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700830 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000831 case UNKICKED:
832 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
833 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800834 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700835 gpr_log(GPR_INFO, " .. choose next poller to be %p",
Craig Tiller830e82a2017-05-31 16:26:27 -0700836 inspect_worker);
837 }
Craig Tiller55624a32017-05-26 08:14:44 -0700838 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000839 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700840 GPR_TIMER_MARK("signal worker", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800841 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tillerba550da2017-05-01 14:26:31 +0000842 gpr_cv_signal(&inspect_worker->cv);
843 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700844 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800845 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700846 gpr_log(GPR_INFO, " .. beaten to choose next poller");
Craig Tiller830e82a2017-05-31 16:26:27 -0700847 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000848 }
Craig Tillerba550da2017-05-01 14:26:31 +0000849 // even if we didn't win the cas, there's a worker, we can stop
850 found_worker = true;
851 break;
852 case KICKED:
853 break;
854 case DESIGNATED_POLLER:
855 found_worker = true; // ok, so someone else found the worker, but
856 // we'll accept that
857 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700858 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000859 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700860 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000861 }
862 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800863 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700864 gpr_log(GPR_INFO, " .. mark pollset %p inactive", inspect);
Craig Tiller830e82a2017-05-31 16:26:27 -0700865 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700866 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700867 if (inspect == neighborhood->active_root) {
868 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800869 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000870 }
871 inspect->next->prev = inspect->prev;
872 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800873 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700874 }
875 gpr_mu_unlock(&inspect->mu);
876 } while (!found_worker);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700877 return found_worker;
878}
879
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800880static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700881 grpc_pollset_worker** worker_hdl) {
yang-gce1cfea2018-01-31 15:59:50 -0800882 GPR_TIMER_SCOPE("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800883 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700884 gpr_log(GPR_INFO, "PS:%p END_WORKER:%p", pollset, worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700885 }
Craig Tiller4782d922017-11-10 09:53:21 -0800886 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700887 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700888 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700889 grpc_closure_list_move(&worker->schedule_on_end_work,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800890 grpc_core::ExecCtx::Get()->closure_list());
Craig Tiller8502ecb2017-04-28 14:22:01 -0700891 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700892 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800893 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700894 gpr_log(GPR_INFO, " .. choose next poller to be peer %p", worker);
Craig Tiller830e82a2017-05-31 16:26:27 -0700895 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000896 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700897 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700898 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800899 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700900 gpr_cv_signal(&worker->next->cv);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800901 if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700902 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800903 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller8502ecb2017-04-28 14:22:01 -0700904 gpr_mu_lock(&pollset->mu);
905 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700906 } else {
907 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700908 size_t poller_neighborhood_idx =
Noah Eisenbe82e642018-02-09 09:16:55 -0800909 static_cast<size_t>(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000910 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700911 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700912 bool scan_state[MAX_NEIGHBORHOODS];
913 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700914 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700915 &g_neighborhoods[(poller_neighborhood_idx + i) %
916 g_num_neighborhoods];
917 if (gpr_mu_trylock(&neighborhood->mu)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800918 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700919 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000920 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700921 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000922 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700923 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700924 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700925 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000926 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700927 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700928 &g_neighborhoods[(poller_neighborhood_idx + i) %
929 g_num_neighborhoods];
930 gpr_mu_lock(&neighborhood->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800931 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700932 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700933 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800934 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700935 gpr_mu_lock(&pollset->mu);
936 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800937 } else if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller50da5ec2017-05-01 13:51:14 -0700938 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800939 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller50da5ec2017-05-01 13:51:14 -0700940 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000941 }
942 if (worker->initialized_cv) {
943 gpr_cv_destroy(&worker->cv);
944 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800945 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -0700946 gpr_log(GPR_INFO, " .. remove worker");
Craig Tiller830e82a2017-05-31 16:26:27 -0700947 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700948 if (EMPTIED == worker_remove(pollset, worker)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800949 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000950 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000951 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller4509c472017-04-27 19:05:13 +0000952}
953
954/* pollset->po.mu lock must be held by the caller before calling this.
955 The function pollset_work() may temporarily release the lock (pollset->po.mu)
956 during the course of its execution but it will always re-acquire the lock and
957 ensure that it is held by the time the function returns */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800958static grpc_error* pollset_work(grpc_pollset* ps,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700959 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700960 grpc_millis deadline) {
yang-gce1cfea2018-01-31 15:59:50 -0800961 GPR_TIMER_SCOPE("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000962 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700963 grpc_error* error = GRPC_ERROR_NONE;
964 static const char* err_desc = "pollset_work";
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700965 if (ps->kicked_without_poller) {
966 ps->kicked_without_poller = false;
Craig Tiller4509c472017-04-27 19:05:13 +0000967 return GRPC_ERROR_NONE;
968 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700969
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800970 if (begin_worker(ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700971 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000972 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700973 GPR_ASSERT(!ps->shutting_down);
974 GPR_ASSERT(!ps->seen_inactive);
975
976 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700977 /* This is the designated polling thread at this point and should ideally do
978 polling. However, if there are unprocessed events left from a previous
979 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
980 process the pending epoll events.
981
982 The reason for decoupling do_epoll_wait and process_epoll_events is to
983 better distrubute the work (i.e handling epoll events) across multiple
984 threads
985
986 process_epoll_events() returns very quickly: It just queues the work on
987 exec_ctx but does not execute it (the actual exectution or more
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800988 accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
989 AFTER selecting a designated poller). So we are not waiting long periods
990 without a designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700991 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
992 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800993 append_error(&error, do_epoll_wait(ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700994 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800995 append_error(&error, process_epoll_events(ps), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700996
997 gpr_mu_lock(&ps->mu); /* lock */
998
Craig Tiller4509c472017-04-27 19:05:13 +0000999 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -07001000 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001001 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +00001002 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001003 end_worker(ps, &worker, worker_hdl);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001004
Craig Tiller8502ecb2017-04-28 14:22:01 -07001005 gpr_tls_set(&g_current_thread_pollset, 0);
Craig Tiller4509c472017-04-27 19:05:13 +00001006 return error;
1007}
1008
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001009static grpc_error* pollset_kick(grpc_pollset* pollset,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001010 grpc_pollset_worker* specific_worker) {
yang-gce1cfea2018-01-31 15:59:50 -08001011 GPR_TIMER_SCOPE("pollset_kick", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001012 GRPC_STATS_INC_POLLSET_KICK();
Craig Tillerbaa14a92017-11-03 09:09:36 -07001013 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -08001014 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +00001015 gpr_strvec log;
1016 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001017 char* tmp;
1018 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
1019 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
1020 (void*)gpr_tls_get(&g_current_thread_worker),
1021 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +00001022 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -08001023 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001024 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001025 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001026 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001027 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001028 gpr_strvec_add(&log, tmp);
1029 }
Craig Tiller4782d922017-11-10 09:53:21 -08001030 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001031 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001032 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001033 gpr_strvec_add(&log, tmp);
1034 }
Craig Tiller4782d922017-11-10 09:53:21 -08001035 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001036 gpr_strvec_destroy(&log);
yang-g69b4e4c2018-01-24 14:36:20 -08001037 gpr_log(GPR_DEBUG, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001038 gpr_free(tmp);
1039 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001040
Craig Tiller4782d922017-11-10 09:53:21 -08001041 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001042 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001043 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001044 if (root_worker == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001045 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
Craig Tiller4509c472017-04-27 19:05:13 +00001046 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001047 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001048 gpr_log(GPR_INFO, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001049 }
yang-gdf92a642017-08-21 22:38:45 -07001050 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001051 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001052 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001053 if (root_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001054 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001055 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001056 gpr_log(GPR_INFO, " .. already kicked %p", root_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001057 }
1058 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001059 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001060 } else if (next_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001061 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001062 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001063 gpr_log(GPR_INFO, " .. already kicked %p", next_worker);
Craig Tiller830e82a2017-05-31 16:26:27 -07001064 }
1065 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001066 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001067 } else if (root_worker ==
1068 next_worker && // only try and wake up a poller if
1069 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001070 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001071 &g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001072 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001073 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001074 gpr_log(GPR_INFO, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001075 }
Craig Tiller55624a32017-05-26 08:14:44 -07001076 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001077 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1078 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001079 } else if (next_worker->state == UNKICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001080 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001081 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001082 gpr_log(GPR_INFO, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001083 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001084 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001085 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001086 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001087 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001088 } else if (next_worker->state == DESIGNATED_POLLER) {
1089 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001090 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001091 gpr_log(
Mark D. Roth48854d22018-04-25 13:05:26 -07001092 GPR_INFO,
Craig Tiller830e82a2017-05-31 16:26:27 -07001093 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1094 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001095 }
Craig Tiller55624a32017-05-26 08:14:44 -07001096 SET_KICK_STATE(root_worker, KICKED);
1097 if (root_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001098 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -07001099 gpr_cv_signal(&root_worker->cv);
1100 }
yang-gdf92a642017-08-21 22:38:45 -07001101 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001102 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001103 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001104 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001105 gpr_log(GPR_INFO, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001106 root_worker);
1107 }
Craig Tiller55624a32017-05-26 08:14:44 -07001108 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001109 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1110 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001111 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001112 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001113 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001114 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001115 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001116 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001117 }
1118 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001119 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001120 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001121 gpr_log(GPR_INFO, " .. kicked while waking up");
Craig Tiller830e82a2017-05-31 16:26:27 -07001122 }
yang-gdf92a642017-08-21 22:38:45 -07001123 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001124 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001125
1126 GPR_UNREACHABLE_CODE(goto done);
1127 }
1128
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001129 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001130 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001131 gpr_log(GPR_INFO, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001132 }
yang-gdf92a642017-08-21 22:38:45 -07001133 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001134 } else if (gpr_tls_get(&g_current_thread_worker) ==
1135 (intptr_t)specific_worker) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001136 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001137 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001138 gpr_log(GPR_INFO, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001139 }
Craig Tiller55624a32017-05-26 08:14:44 -07001140 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001141 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001142 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001143 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001144 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001145 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001146 gpr_log(GPR_INFO, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001147 }
Craig Tiller55624a32017-05-26 08:14:44 -07001148 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001149 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1150 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001151 } else if (specific_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001152 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001153 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001154 gpr_log(GPR_INFO, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001155 }
Craig Tiller55624a32017-05-26 08:14:44 -07001156 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001157 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001158 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001159 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001160 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001161 if (grpc_polling_trace.enabled()) {
Mark D. Roth48854d22018-04-25 13:05:26 -07001162 gpr_log(GPR_INFO, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001163 }
Craig Tiller55624a32017-05-26 08:14:44 -07001164 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001165 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001166 }
yang-gdf92a642017-08-21 22:38:45 -07001167done:
yang-gdf92a642017-08-21 22:38:45 -07001168 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001169}
1170
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001171static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001172
Craig Tiller4509c472017-04-27 19:05:13 +00001173/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001174 * Pollset-set Definitions
1175 */
1176
Craig Tillerbaa14a92017-11-03 09:09:36 -07001177static grpc_pollset_set* pollset_set_create(void) {
Noah Eisenbe82e642018-02-09 09:16:55 -08001178 return (grpc_pollset_set*)(static_cast<intptr_t>(0xdeafbeef));
Craig Tillerc67cc992017-04-27 10:15:51 -07001179}
1180
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001181static void pollset_set_destroy(grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001182
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001183static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001184
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001185static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001186
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001187static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001188
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001189static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001190
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001191static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001192 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001193
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001194static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001195 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001196
1197/*******************************************************************************
1198 * Event engine binding
1199 */
1200
1201static void shutdown_engine(void) {
1202 fd_global_shutdown();
1203 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001204 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001205}
1206
1207static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001208 sizeof(grpc_pollset),
Yash Tibrewaladc733f2018-04-02 18:32:06 -07001209 true,
Craig Tillerc67cc992017-04-27 10:15:51 -07001210
Yash Tibrewal533d1182017-09-18 10:48:22 -07001211 fd_create,
1212 fd_wrapped_fd,
1213 fd_orphan,
1214 fd_shutdown,
1215 fd_notify_on_read,
1216 fd_notify_on_write,
Yash Tibrewaladc733f2018-04-02 18:32:06 -07001217 fd_notify_on_error,
Yash Tibrewal533d1182017-09-18 10:48:22 -07001218 fd_is_shutdown,
1219 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001220
Yash Tibrewal533d1182017-09-18 10:48:22 -07001221 pollset_init,
1222 pollset_shutdown,
1223 pollset_destroy,
1224 pollset_work,
1225 pollset_kick,
1226 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001227
Yash Tibrewal533d1182017-09-18 10:48:22 -07001228 pollset_set_create,
1229 pollset_set_destroy,
1230 pollset_set_add_pollset,
1231 pollset_set_del_pollset,
1232 pollset_set_add_pollset_set,
1233 pollset_set_del_pollset_set,
1234 pollset_set_add_fd,
1235 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001236
Yash Tibrewal533d1182017-09-18 10:48:22 -07001237 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001238};
1239
1240/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001241 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1242 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001243const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001244 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001245 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001246 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001247 }
1248
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001249 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001250 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001251 }
1252
Craig Tillerc67cc992017-04-27 10:15:51 -07001253 fd_global_init();
1254
1255 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001256 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001257 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001258 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001259 }
1260
1261 return &vtable;
1262}
1263
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001264#else /* defined(GRPC_LINUX_EPOLL) */
Craig Tillerc67cc992017-04-27 10:15:51 -07001265#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001266#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001267/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1268 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001269const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001270 return nullptr;
Craig Tiller9ddb3152017-04-27 21:32:56 +00001271}
Craig Tillerc67cc992017-04-27 10:15:51 -07001272#endif /* defined(GRPC_POSIX_SOCKET) */
Mehrdad Afsharifb669002018-01-17 15:37:56 -08001273#endif /* !defined(GRPC_LINUX_EPOLL) */