blob: a52bedeb7a400261ce0cbfb76d2790c6f63b8be3 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
19#include "src/core/lib/iomgr/port.h"
20
yang-gceb24752017-11-07 12:06:37 -080021#include <grpc/support/log.h>
22
Craig Tillerc67cc992017-04-27 10:15:51 -070023/* This polling engine is only relevant on linux kernels supporting epoll() */
24#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000025#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070026
27#include <assert.h>
28#include <errno.h>
Craig Tiller20397792017-07-18 11:35:27 -070029#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070030#include <poll.h>
31#include <pthread.h>
32#include <string.h>
33#include <sys/epoll.h>
34#include <sys/socket.h>
35#include <unistd.h>
36
37#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070038#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070039#include <grpc/support/string_util.h>
40#include <grpc/support/tls.h>
41#include <grpc/support/useful.h>
42
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070043#include "src/core/lib/debug/stats.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070044#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070045#include "src/core/lib/iomgr/ev_posix.h"
46#include "src/core/lib/iomgr/iomgr_internal.h"
47#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070048#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070049#include "src/core/lib/profiling/timers.h"
Craig Tillerfbf61bb2017-11-08 11:50:14 -080050#include "src/core/lib/support/manual_constructor.h"
Craig Tillerb89bac02017-05-26 15:20:32 +000051#include "src/core/lib/support/string.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070052
Craig Tillerc67cc992017-04-27 10:15:51 -070053static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070054
55/*******************************************************************************
56 * Singleton epoll set related fields
57 */
58
59#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070060#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070061
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070062/* NOTE ON SYNCHRONIZATION:
63 * - Fields in this struct are only modified by the designated poller. Hence
64 * there is no need for any locks to protect the struct.
65 * - num_events and cursor fields have to be of atomic type to provide memory
66 * visibility guarantees only. i.e In case of multiple pollers, the designated
67 * polling thread keeps changing; the thread that wrote these values may be
68 * different from the thread reading the values
69 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070070typedef struct epoll_set {
71 int epfd;
72
73 /* The epoll_events after the last call to epoll_wait() */
74 struct epoll_event events[MAX_EPOLL_EVENTS];
75
76 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070077 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070078
79 /* Index of the first event in epoll_events that has to be processed. This
80 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070081 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070082} epoll_set;
83
84/* The global singleton epoll set */
85static epoll_set g_epoll_set;
86
87/* Must be called *only* once */
88static bool epoll_set_init() {
89 g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC);
90 if (g_epoll_set.epfd < 0) {
91 gpr_log(GPR_ERROR, "epoll unavailable");
92 return false;
93 }
94
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070095 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
96 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
97 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070098 return true;
99}
100
101/* epoll_set_init() MUST be called before calling this. */
102static void epoll_set_shutdown() {
103 if (g_epoll_set.epfd >= 0) {
104 close(g_epoll_set.epfd);
105 g_epoll_set.epfd = -1;
106 }
107}
Craig Tillerc67cc992017-04-27 10:15:51 -0700108
109/*******************************************************************************
110 * Fd Declarations
111 */
112
113struct grpc_fd {
114 int fd;
115
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800116 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
117 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700118
Craig Tillerbaa14a92017-11-03 09:09:36 -0700119 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700120
121 /* The pollset that last noticed that the fd is readable. The actual type
122 * stored in this is (grpc_pollset *) */
123 gpr_atm read_notifier_pollset;
124
125 grpc_iomgr_object iomgr_object;
126};
127
128static void fd_global_init(void);
129static void fd_global_shutdown(void);
130
131/*******************************************************************************
132 * Pollset Declarations
133 */
134
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700135typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700136
Craig Tillerbaa14a92017-11-03 09:09:36 -0700137static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700138 switch (st) {
139 case UNKICKED:
140 return "UNKICKED";
141 case KICKED:
142 return "KICKED";
143 case DESIGNATED_POLLER:
144 return "DESIGNATED_POLLER";
145 }
146 GPR_UNREACHABLE_CODE(return "UNKNOWN");
147}
148
Craig Tillerc67cc992017-04-27 10:15:51 -0700149struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700150 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700151 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700152 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700153 grpc_pollset_worker* next;
154 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700155 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700156 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700157};
158
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700159#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700160 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700161 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700162 (worker)->kick_state_mutator = __LINE__; \
163 } while (false)
164
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700165#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000166
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700167typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700168 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700169 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700170 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700171} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700172
Craig Tillerc67cc992017-04-27 10:15:51 -0700173struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700174 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700175 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700176 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700177 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000178 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700179
180 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700181 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700182 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700183 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700184 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700185
186 /* Number of workers who are *about-to* attach themselves to the pollset
187 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000188 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700189
Craig Tillerbaa14a92017-11-03 09:09:36 -0700190 grpc_pollset* next;
191 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700192};
193
194/*******************************************************************************
195 * Pollset-set Declarations
196 */
Craig Tiller6de05932017-04-28 09:17:38 -0700197
Craig Tiller61f96c12017-05-12 13:36:39 -0700198struct grpc_pollset_set {
199 char unused;
200};
Craig Tillerc67cc992017-04-27 10:15:51 -0700201
202/*******************************************************************************
203 * Common helpers
204 */
205
Craig Tillerbaa14a92017-11-03 09:09:36 -0700206static bool append_error(grpc_error** composite, grpc_error* error,
207 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700208 if (error == GRPC_ERROR_NONE) return true;
209 if (*composite == GRPC_ERROR_NONE) {
210 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
211 }
212 *composite = grpc_error_add_child(*composite, error);
213 return false;
214}
215
216/*******************************************************************************
217 * Fd Definitions
218 */
219
220/* We need to keep a freelist not because of any concerns of malloc performance
221 * but instead so that implementations with multiple threads in (for example)
222 * epoll_wait deal with the race between pollset removal and incoming poll
223 * notifications.
224 *
225 * The problem is that the poller ultimately holds a reference to this
226 * object, so it is very difficult to know when is safe to free it, at least
227 * without some expensive synchronization.
228 *
229 * If we keep the object freelisted, in the worst case losing this race just
230 * becomes a spurious read notification on a reused fd.
231 */
232
233/* The alarm system needs to be able to wakeup 'some poller' sometimes
234 * (specifically when a new alarm needs to be triggered earlier than the next
235 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
236 * case occurs. */
237
Craig Tiller4782d922017-11-10 09:53:21 -0800238static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700239static gpr_mu fd_freelist_mu;
240
Craig Tillerc67cc992017-04-27 10:15:51 -0700241static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
242
243static void fd_global_shutdown(void) {
244 gpr_mu_lock(&fd_freelist_mu);
245 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800246 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700247 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700248 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700249 gpr_free(fd);
250 }
251 gpr_mu_destroy(&fd_freelist_mu);
252}
253
Craig Tillerbaa14a92017-11-03 09:09:36 -0700254static grpc_fd* fd_create(int fd, const char* name) {
Craig Tiller4782d922017-11-10 09:53:21 -0800255 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700256
257 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800258 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700259 new_fd = fd_freelist;
260 fd_freelist = fd_freelist->freelist_next;
261 }
262 gpr_mu_unlock(&fd_freelist_mu);
263
Craig Tiller4782d922017-11-10 09:53:21 -0800264 if (new_fd == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700265 new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
yang-g26521b32017-11-17 17:15:37 -0800266 new_fd->read_closure.Init();
267 new_fd->write_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700268 }
269
Craig Tillerc67cc992017-04-27 10:15:51 -0700270 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800271 new_fd->read_closure->InitEvent();
272 new_fd->write_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700273 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
274
Craig Tiller4782d922017-11-10 09:53:21 -0800275 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700276
Craig Tillerbaa14a92017-11-03 09:09:36 -0700277 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700278 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
279 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700280#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800281 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700282 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
283 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700284#endif
285 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000286
Yash Tibrewal533d1182017-09-18 10:48:22 -0700287 struct epoll_event ev;
288 ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
289 ev.data.ptr = new_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700290 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000291 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
292 }
293
Craig Tillerc67cc992017-04-27 10:15:51 -0700294 return new_fd;
295}
296
Craig Tillerbaa14a92017-11-03 09:09:36 -0700297static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700298
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700299/* if 'releasing_fd' is true, it means that we are going to detach the internal
300 * fd from grpc_fd structure (i.e which means we should not be calling
301 * shutdown() syscall on that fd) */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700302static void fd_shutdown_internal(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
303 grpc_error* why, bool releasing_fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800304 if (fd->read_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700305 if (!releasing_fd) {
306 shutdown(fd->fd, SHUT_RDWR);
307 }
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800308 fd->write_closure->SetShutdown(exec_ctx, GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000309 }
310 GRPC_ERROR_UNREF(why);
311}
312
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700313/* Might be called multiple times */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700314static void fd_shutdown(grpc_exec_ctx* exec_ctx, grpc_fd* fd, grpc_error* why) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700315 fd_shutdown_internal(exec_ctx, fd, why, false);
316}
317
Craig Tillerbaa14a92017-11-03 09:09:36 -0700318static void fd_orphan(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
319 grpc_closure* on_done, int* release_fd,
320 bool already_closed, const char* reason) {
321 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800322 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700323
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800324 if (!fd->read_closure->IsShutdown()) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700325 fd_shutdown_internal(exec_ctx, fd,
326 GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
327 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000328 }
329
Craig Tillerc67cc992017-04-27 10:15:51 -0700330 /* If release_fd is not NULL, we should be relinquishing control of the file
331 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700332 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700333 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700334 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700335 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700336 }
337
ncteisen969b46e2017-06-08 14:57:11 -0700338 GRPC_CLOSURE_SCHED(exec_ctx, on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700339
Craig Tiller4509c472017-04-27 19:05:13 +0000340 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800341 fd->read_closure->DestroyEvent();
342 fd->write_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700343
Craig Tiller4509c472017-04-27 19:05:13 +0000344 gpr_mu_lock(&fd_freelist_mu);
345 fd->freelist_next = fd_freelist;
346 fd_freelist = fd;
347 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700348}
349
Craig Tillerbaa14a92017-11-03 09:09:36 -0700350static grpc_pollset* fd_get_read_notifier_pollset(grpc_exec_ctx* exec_ctx,
351 grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700352 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700353 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700354}
355
Craig Tillerbaa14a92017-11-03 09:09:36 -0700356static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800357 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700358}
359
Craig Tillerbaa14a92017-11-03 09:09:36 -0700360static void fd_notify_on_read(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
361 grpc_closure* closure) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800362 fd->read_closure->NotifyOn(exec_ctx, closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700363}
364
Craig Tillerbaa14a92017-11-03 09:09:36 -0700365static void fd_notify_on_write(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
366 grpc_closure* closure) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800367 fd->write_closure->NotifyOn(exec_ctx, closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700368}
369
Craig Tillerbaa14a92017-11-03 09:09:36 -0700370static void fd_become_readable(grpc_exec_ctx* exec_ctx, grpc_fd* fd,
371 grpc_pollset* notifier) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800372 fd->read_closure->SetReady(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +0000373 /* Use release store to match with acquire load in fd_get_read_notifier */
374 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
375}
376
Craig Tillerbaa14a92017-11-03 09:09:36 -0700377static void fd_become_writable(grpc_exec_ctx* exec_ctx, grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800378 fd->write_closure->SetReady(exec_ctx);
Craig Tillerc67cc992017-04-27 10:15:51 -0700379}
380
381/*******************************************************************************
382 * Pollset Definitions
383 */
384
Craig Tiller6de05932017-04-28 09:17:38 -0700385GPR_TLS_DECL(g_current_thread_pollset);
386GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700387
388/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700389static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700390
Craig Tillerbaa14a92017-11-03 09:09:36 -0700391static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700392static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700393
Craig Tillerc67cc992017-04-27 10:15:51 -0700394/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700395static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800396 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700397 pollset->root_worker = worker;
398 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700399 return true;
400 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700401 worker->next = pollset->root_worker;
402 worker->prev = worker->next->prev;
403 worker->next->prev = worker;
404 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700405 return false;
406 }
407}
408
409/* Return true if last in list */
410typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
411
Craig Tillerbaa14a92017-11-03 09:09:36 -0700412static worker_remove_result worker_remove(grpc_pollset* pollset,
413 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700414 if (worker == pollset->root_worker) {
415 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800416 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700417 return EMPTIED;
418 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700419 pollset->root_worker = worker->next;
420 worker->prev->next = worker->next;
421 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700422 return NEW_ROOT;
423 }
424 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700425 worker->prev->next = worker->next;
426 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700427 return REMOVED;
428 }
429}
430
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700431static size_t choose_neighborhood(void) {
432 return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000433}
434
Craig Tillerbaa14a92017-11-03 09:09:36 -0700435static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000436 gpr_tls_init(&g_current_thread_pollset);
437 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700438 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000439 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700440 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000441 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700442 struct epoll_event ev;
443 ev.events = (uint32_t)(EPOLLIN | EPOLLET);
444 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700445 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
446 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000447 return GRPC_OS_ERROR(errno, "epoll_ctl");
448 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700449 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700450 g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
451 g_num_neighborhoods);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700452 for (size_t i = 0; i < g_num_neighborhoods; i++) {
453 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700454 }
Craig Tiller4509c472017-04-27 19:05:13 +0000455 return GRPC_ERROR_NONE;
456}
457
458static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000459 gpr_tls_destroy(&g_current_thread_pollset);
460 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000461 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700462 for (size_t i = 0; i < g_num_neighborhoods; i++) {
463 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700464 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700465 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000466}
467
Craig Tillerbaa14a92017-11-03 09:09:36 -0700468static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700469 gpr_mu_init(&pollset->mu);
470 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700471 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
472 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800473 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700474 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700475 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700476 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800477 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700478 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800479 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700480}
481
Craig Tillerbaa14a92017-11-03 09:09:36 -0700482static void pollset_destroy(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000483 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000484 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700485 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000486 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700487 retry_lock_neighborhood:
488 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000489 gpr_mu_lock(&pollset->mu);
490 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700491 if (pollset->neighborhood != neighborhood) {
492 gpr_mu_unlock(&neighborhood->mu);
493 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000494 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700495 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000496 }
497 pollset->prev->next = pollset->next;
498 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700499 if (pollset == pollset->neighborhood->active_root) {
500 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800501 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000502 }
Craig Tillerba550da2017-05-01 14:26:31 +0000503 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700504 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700505 }
Craig Tillere00d7332017-05-01 15:43:51 +0000506 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700507 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000508}
509
Craig Tillerbaa14a92017-11-03 09:09:36 -0700510static grpc_error* pollset_kick_all(grpc_exec_ctx* exec_ctx,
511 grpc_pollset* pollset) {
yang-gdf92a642017-08-21 22:38:45 -0700512 GPR_TIMER_BEGIN("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700513 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800514 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700515 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000516 do {
Craig Tiller0ff222a2017-09-01 09:41:43 -0700517 GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700518 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700519 case KICKED:
Craig Tiller480f5d82017-09-13 09:36:07 -0700520 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -0700521 break;
522 case UNKICKED:
523 SET_KICK_STATE(worker, KICKED);
524 if (worker->initialized_cv) {
Craig Tillerebacb2f2017-09-13 12:32:33 -0700525 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -0700526 gpr_cv_signal(&worker->cv);
527 }
528 break;
529 case DESIGNATED_POLLER:
Craig Tiller480f5d82017-09-13 09:36:07 -0700530 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -0700531 SET_KICK_STATE(worker, KICKED);
532 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700533 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700534 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000535 }
536
Craig Tiller32f90ee2017-04-28 12:46:41 -0700537 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000538 } while (worker != pollset->root_worker);
539 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700540 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
541 // in the else case
yang-gdf92a642017-08-21 22:38:45 -0700542 GPR_TIMER_END("pollset_kick_all", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000543 return error;
544}
545
Craig Tillerbaa14a92017-11-03 09:09:36 -0700546static void pollset_maybe_finish_shutdown(grpc_exec_ctx* exec_ctx,
547 grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800548 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000549 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700550 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
ncteisen969b46e2017-06-08 14:57:11 -0700551 GRPC_CLOSURE_SCHED(exec_ctx, pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800552 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000553 }
554}
555
Craig Tillerbaa14a92017-11-03 09:09:36 -0700556static void pollset_shutdown(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
557 grpc_closure* closure) {
yang-gdf92a642017-08-21 22:38:45 -0700558 GPR_TIMER_BEGIN("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800559 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700560 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000561 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700562 pollset->shutting_down = true;
Craig Tiller0ff222a2017-09-01 09:41:43 -0700563 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(exec_ctx, pollset));
Craig Tiller4509c472017-04-27 19:05:13 +0000564 pollset_maybe_finish_shutdown(exec_ctx, pollset);
yang-gdf92a642017-08-21 22:38:45 -0700565 GPR_TIMER_END("pollset_shutdown", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000566}
567
Craig Tillerbaa14a92017-11-03 09:09:36 -0700568static int poll_deadline_to_millis_timeout(grpc_exec_ctx* exec_ctx,
Craig Tiller20397792017-07-18 11:35:27 -0700569 grpc_millis millis) {
570 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
571 grpc_millis delta = millis - grpc_exec_ctx_now(exec_ctx);
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700572 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700573 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700574 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000575 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700576 } else {
Craig Tiller20397792017-07-18 11:35:27 -0700577 return (int)delta;
Craig Tiller4509c472017-04-27 19:05:13 +0000578 }
Craig Tiller4509c472017-04-27 19:05:13 +0000579}
580
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700581/* Process the epoll events found by do_epoll_wait() function.
582 - g_epoll_set.cursor points to the index of the first event to be processed
583 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
584 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000585
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700586 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
587 called by g_active_poller thread. So there is no need for synchronization
588 when accessing fields in g_epoll_set */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700589static grpc_error* process_epoll_events(grpc_exec_ctx* exec_ctx,
590 grpc_pollset* pollset) {
591 static const char* err_desc = "process_events";
592 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700593
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700594 GPR_TIMER_BEGIN("process_epoll_events", 0);
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700595 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
596 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
597 for (int idx = 0;
598 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700599 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700600 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700601 struct epoll_event* ev = &g_epoll_set.events[c];
602 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700603
Craig Tiller4509c472017-04-27 19:05:13 +0000604 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000605 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
606 err_desc);
607 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700608 grpc_fd* fd = (grpc_fd*)(data_ptr);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700609 bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
610 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
611 bool write_ev = (ev->events & EPOLLOUT) != 0;
612
Craig Tiller4509c472017-04-27 19:05:13 +0000613 if (read_ev || cancel) {
614 fd_become_readable(exec_ctx, fd, pollset);
615 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700616
Craig Tiller4509c472017-04-27 19:05:13 +0000617 if (write_ev || cancel) {
618 fd_become_writable(exec_ctx, fd);
619 }
620 }
621 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700622 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700623 GPR_TIMER_END("process_epoll_events", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000624 return error;
625}
626
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700627/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
628 "process" any of the events yet; that is done in process_epoll_events().
629 *See process_epoll_events() function for more details.
630
631 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
632 (i.e the designated poller thread) will be calling this function. So there is
633 no need for any synchronization when accesing fields in g_epoll_set */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700634static grpc_error* do_epoll_wait(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
Craig Tiller20397792017-07-18 11:35:27 -0700635 grpc_millis deadline) {
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700636 GPR_TIMER_BEGIN("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000637
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700638 int r;
Craig Tiller20397792017-07-18 11:35:27 -0700639 int timeout = poll_deadline_to_millis_timeout(exec_ctx, deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000640 if (timeout != 0) {
641 GRPC_SCHEDULING_START_BLOCKING_REGION;
642 }
Craig Tiller4509c472017-04-27 19:05:13 +0000643 do {
Craig Tillerb4bb1cd2017-07-20 14:18:17 -0700644 GRPC_STATS_INC_SYSCALL_POLL(exec_ctx);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700645 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
646 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000647 } while (r < 0 && errno == EINTR);
648 if (timeout != 0) {
Craig Tiller781e91a2017-07-17 16:21:00 -0700649 GRPC_SCHEDULING_END_BLOCKING_REGION_WITH_EXEC_CTX(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +0000650 }
651
652 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
653
Craig Tiller0ff222a2017-09-01 09:41:43 -0700654 GRPC_STATS_INC_POLL_EVENTS_RETURNED(exec_ctx, r);
655
ncteisen3cffe1f2017-11-10 13:56:23 -0800656 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700657 gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000658 }
659
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700660 gpr_atm_rel_store(&g_epoll_set.num_events, r);
661 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700662
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700663 GPR_TIMER_END("do_epoll_wait", 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700664 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000665}
666
Craig Tillerbaa14a92017-11-03 09:09:36 -0700667static bool begin_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
668 grpc_pollset_worker* worker,
669 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700670 grpc_millis deadline) {
yang-gdf92a642017-08-21 22:38:45 -0700671 GPR_TIMER_BEGIN("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800672 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000673 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700674 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700675 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000676 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000677
ncteisen3cffe1f2017-11-10 13:56:23 -0800678 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700679 gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
680 }
681
Craig Tiller32f90ee2017-04-28 12:46:41 -0700682 if (pollset->seen_inactive) {
683 // pollset has been observed to be inactive, we need to move back to the
684 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000685 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700686 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000687 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700688 pollset->reassigning_neighborhood = true;
689 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000690 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700691 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700692 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000693 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700694 retry_lock_neighborhood:
695 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700696 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800697 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700698 gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700699 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700700 is_reassigning);
701 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700702 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700703 if (neighborhood != pollset->neighborhood) {
704 gpr_mu_unlock(&neighborhood->mu);
705 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000706 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700707 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000708 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700709
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700710 /* In the brief time we released the pollset locks above, the worker MAY
711 have been kicked. In this case, the worker should get out of this
712 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700713 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700714
715 On a side note, the only way a worker's kick state could have changed
716 at this point is if it were "kicked specifically". Since the worker has
717 not added itself to the pollset yet (by calling worker_insert()), it is
718 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700719 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700720 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800721 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700722 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700723 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700724 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700725 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
726 SET_KICK_STATE(worker, DESIGNATED_POLLER);
727 }
728 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700729 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700730 pollset->prev = pollset->next->prev;
731 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700732 }
Craig Tiller4509c472017-04-27 19:05:13 +0000733 }
734 }
Craig Tillere00d7332017-05-01 15:43:51 +0000735 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700736 GPR_ASSERT(pollset->reassigning_neighborhood);
737 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000738 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700739 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700740 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700741
Craig Tiller32f90ee2017-04-28 12:46:41 -0700742 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000743 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700744 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000745 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700746 worker->initialized_cv = true;
747 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700748 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800749 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700750 gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700751 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700752 pollset->shutting_down);
753 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700754
Craig Tiller20397792017-07-18 11:35:27 -0700755 if (gpr_cv_wait(&worker->cv, &pollset->mu,
Sree Kuchibhotla54961bb2017-12-04 12:50:27 -0800756 grpc_millis_to_timespec(deadline, GPR_CLOCK_MONOTONIC)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700757 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700758 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
759 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700760 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700761 }
Craig Tillerba550da2017-05-01 14:26:31 +0000762 }
Craig Tiller20397792017-07-18 11:35:27 -0700763 grpc_exec_ctx_invalidate_now(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +0000764 }
765
ncteisen3cffe1f2017-11-10 13:56:23 -0800766 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700767 gpr_log(GPR_ERROR,
768 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
769 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700770 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700771 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700772 }
Craig Tiller4509c472017-04-27 19:05:13 +0000773
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700774 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700775 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700776 * 2. When doing gpr_cv_wait()
777 * It is possible that 'kicked_without_poller' was set to true during (1) and
778 * 'shutting_down' is set to true during (1) or (2). If either of them is
779 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700780 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
781 * case; especially when the worker is the DESIGNATED_POLLER */
782
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700783 if (pollset->kicked_without_poller) {
784 pollset->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700785 GPR_TIMER_END("begin_worker", 0);
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700786 return false;
787 }
788
yang-gdf92a642017-08-21 22:38:45 -0700789 GPR_TIMER_END("begin_worker", 0);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700790 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000791}
792
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700793static bool check_neighborhood_for_available_poller(
Craig Tillerbaa14a92017-11-03 09:09:36 -0700794 grpc_exec_ctx* exec_ctx, pollset_neighborhood* neighborhood) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700795 GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700796 bool found_worker = false;
797 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700798 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800799 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700800 break;
801 }
802 gpr_mu_lock(&inspect->mu);
803 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700804 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800805 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000806 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700807 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000808 case UNKICKED:
809 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
810 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800811 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700812 gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
813 inspect_worker);
814 }
Craig Tiller55624a32017-05-26 08:14:44 -0700815 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000816 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700817 GPR_TIMER_MARK("signal worker", 0);
Craig Tillercf34fa52017-09-13 12:37:01 -0700818 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tillerba550da2017-05-01 14:26:31 +0000819 gpr_cv_signal(&inspect_worker->cv);
820 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700821 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800822 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700823 gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
824 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000825 }
Craig Tillerba550da2017-05-01 14:26:31 +0000826 // even if we didn't win the cas, there's a worker, we can stop
827 found_worker = true;
828 break;
829 case KICKED:
830 break;
831 case DESIGNATED_POLLER:
832 found_worker = true; // ok, so someone else found the worker, but
833 // we'll accept that
834 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700835 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000836 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700837 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000838 }
839 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800840 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700841 gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
842 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700843 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700844 if (inspect == neighborhood->active_root) {
845 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800846 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000847 }
848 inspect->next->prev = inspect->prev;
849 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800850 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700851 }
852 gpr_mu_unlock(&inspect->mu);
853 } while (!found_worker);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700854 GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700855 return found_worker;
856}
857
Craig Tillerbaa14a92017-11-03 09:09:36 -0700858static void end_worker(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
859 grpc_pollset_worker* worker,
860 grpc_pollset_worker** worker_hdl) {
yang-gdf92a642017-08-21 22:38:45 -0700861 GPR_TIMER_BEGIN("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800862 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700863 gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
864 }
Craig Tiller4782d922017-11-10 09:53:21 -0800865 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700866 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700867 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700868 grpc_closure_list_move(&worker->schedule_on_end_work,
869 &exec_ctx->closure_list);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700870 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700871 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800872 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700873 gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
874 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000875 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700876 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700877 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Craig Tiller1a012bb2017-09-13 14:29:00 -0700878 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700879 gpr_cv_signal(&worker->next->cv);
Craig Tiller8502ecb2017-04-28 14:22:01 -0700880 if (grpc_exec_ctx_has_work(exec_ctx)) {
881 gpr_mu_unlock(&pollset->mu);
882 grpc_exec_ctx_flush(exec_ctx);
883 gpr_mu_lock(&pollset->mu);
884 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700885 } else {
886 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700887 size_t poller_neighborhood_idx =
888 (size_t)(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000889 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700890 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700891 bool scan_state[MAX_NEIGHBORHOODS];
892 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700893 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700894 &g_neighborhoods[(poller_neighborhood_idx + i) %
895 g_num_neighborhoods];
896 if (gpr_mu_trylock(&neighborhood->mu)) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700897 found_worker =
Craig Tiller64f8b122017-09-13 12:39:21 -0700898 check_neighborhood_for_available_poller(exec_ctx, neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700899 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000900 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700901 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000902 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700903 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700904 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700905 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000906 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700907 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700908 &g_neighborhoods[(poller_neighborhood_idx + i) %
909 g_num_neighborhoods];
910 gpr_mu_lock(&neighborhood->mu);
Craig Tillercf34fa52017-09-13 12:37:01 -0700911 found_worker =
Craig Tiller64f8b122017-09-13 12:39:21 -0700912 check_neighborhood_for_available_poller(exec_ctx, neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700913 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700914 }
Craig Tiller8502ecb2017-04-28 14:22:01 -0700915 grpc_exec_ctx_flush(exec_ctx);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700916 gpr_mu_lock(&pollset->mu);
917 }
Craig Tiller50da5ec2017-05-01 13:51:14 -0700918 } else if (grpc_exec_ctx_has_work(exec_ctx)) {
919 gpr_mu_unlock(&pollset->mu);
920 grpc_exec_ctx_flush(exec_ctx);
921 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000922 }
923 if (worker->initialized_cv) {
924 gpr_cv_destroy(&worker->cv);
925 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800926 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700927 gpr_log(GPR_DEBUG, " .. remove worker");
928 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700929 if (EMPTIED == worker_remove(pollset, worker)) {
Craig Tiller4509c472017-04-27 19:05:13 +0000930 pollset_maybe_finish_shutdown(exec_ctx, pollset);
931 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000932 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
yang-gdf92a642017-08-21 22:38:45 -0700933 GPR_TIMER_END("end_worker", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000934}
935
936/* pollset->po.mu lock must be held by the caller before calling this.
937 The function pollset_work() may temporarily release the lock (pollset->po.mu)
938 during the course of its execution but it will always re-acquire the lock and
939 ensure that it is held by the time the function returns */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700940static grpc_error* pollset_work(grpc_exec_ctx* exec_ctx, grpc_pollset* ps,
941 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700942 grpc_millis deadline) {
Craig Tiller4509c472017-04-27 19:05:13 +0000943 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700944 grpc_error* error = GRPC_ERROR_NONE;
945 static const char* err_desc = "pollset_work";
yang-gdf92a642017-08-21 22:38:45 -0700946 GPR_TIMER_BEGIN("pollset_work", 0);
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700947 if (ps->kicked_without_poller) {
948 ps->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700949 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000950 return GRPC_ERROR_NONE;
951 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700952
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700953 if (begin_worker(exec_ctx, ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700954 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000955 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700956 GPR_ASSERT(!ps->shutting_down);
957 GPR_ASSERT(!ps->seen_inactive);
958
959 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700960 /* This is the designated polling thread at this point and should ideally do
961 polling. However, if there are unprocessed events left from a previous
962 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
963 process the pending epoll events.
964
965 The reason for decoupling do_epoll_wait and process_epoll_events is to
966 better distrubute the work (i.e handling epoll events) across multiple
967 threads
968
969 process_epoll_events() returns very quickly: It just queues the work on
970 exec_ctx but does not execute it (the actual exectution or more
971 accurately grpc_exec_ctx_flush() happens in end_worker() AFTER selecting
972 a designated poller). So we are not waiting long periods without a
973 designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700974 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
975 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700976 append_error(&error, do_epoll_wait(exec_ctx, ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700977 }
978 append_error(&error, process_epoll_events(exec_ctx, ps), err_desc);
979
980 gpr_mu_lock(&ps->mu); /* lock */
981
Craig Tiller4509c472017-04-27 19:05:13 +0000982 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700983 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700984 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000985 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700986 end_worker(exec_ctx, ps, &worker, worker_hdl);
987
Craig Tiller8502ecb2017-04-28 14:22:01 -0700988 gpr_tls_set(&g_current_thread_pollset, 0);
yang-gdf92a642017-08-21 22:38:45 -0700989 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000990 return error;
991}
992
Craig Tillerbaa14a92017-11-03 09:09:36 -0700993static grpc_error* pollset_kick(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
994 grpc_pollset_worker* specific_worker) {
yang-gdf92a642017-08-21 22:38:45 -0700995 GPR_TIMER_BEGIN("pollset_kick", 0);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700996 GRPC_STATS_INC_POLLSET_KICK(exec_ctx);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700997 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -0800998 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +0000999 gpr_strvec log;
1000 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -07001001 char* tmp;
1002 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
1003 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
1004 (void*)gpr_tls_get(&g_current_thread_worker),
1005 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +00001006 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -08001007 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001008 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001009 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -07001010 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001011 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001012 gpr_strvec_add(&log, tmp);
1013 }
Craig Tiller4782d922017-11-10 09:53:21 -08001014 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001015 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001016 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +00001017 gpr_strvec_add(&log, tmp);
1018 }
Craig Tiller4782d922017-11-10 09:53:21 -08001019 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001020 gpr_strvec_destroy(&log);
Craig Tiller830e82a2017-05-31 16:26:27 -07001021 gpr_log(GPR_ERROR, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001022 gpr_free(tmp);
1023 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001024
Craig Tiller4782d922017-11-10 09:53:21 -08001025 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001026 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001027 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001028 if (root_worker == nullptr) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001029 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER(exec_ctx);
Craig Tiller4509c472017-04-27 19:05:13 +00001030 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001031 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001032 gpr_log(GPR_ERROR, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001033 }
yang-gdf92a642017-08-21 22:38:45 -07001034 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001035 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001036 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001037 if (root_worker->state == KICKED) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001038 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001039 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001040 gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
1041 }
1042 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001043 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001044 } else if (next_worker->state == KICKED) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001045 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001046 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001047 gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
1048 }
1049 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001050 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001051 } else if (root_worker ==
1052 next_worker && // only try and wake up a poller if
1053 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001054 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001055 &g_active_poller)) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001056 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001057 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001058 gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001059 }
Craig Tiller55624a32017-05-26 08:14:44 -07001060 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001061 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1062 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001063 } else if (next_worker->state == UNKICKED) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001064 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001065 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001066 gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001067 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001068 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001069 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001070 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001071 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001072 } else if (next_worker->state == DESIGNATED_POLLER) {
1073 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001074 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001075 gpr_log(
1076 GPR_ERROR,
1077 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1078 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001079 }
Craig Tiller55624a32017-05-26 08:14:44 -07001080 SET_KICK_STATE(root_worker, KICKED);
1081 if (root_worker->initialized_cv) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001082 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
Craig Tiller55624a32017-05-26 08:14:44 -07001083 gpr_cv_signal(&root_worker->cv);
1084 }
yang-gdf92a642017-08-21 22:38:45 -07001085 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001086 } else {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001087 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001088 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001089 gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001090 root_worker);
1091 }
Craig Tiller55624a32017-05-26 08:14:44 -07001092 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001093 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1094 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001095 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001096 } else {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001097 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001098 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001099 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001100 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001101 }
1102 } else {
Craig Tiller1a012bb2017-09-13 14:29:00 -07001103 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001104 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001105 gpr_log(GPR_ERROR, " .. kicked while waking up");
1106 }
yang-gdf92a642017-08-21 22:38:45 -07001107 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001108 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001109
1110 GPR_UNREACHABLE_CODE(goto done);
1111 }
1112
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001113 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001114 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001115 gpr_log(GPR_ERROR, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001116 }
yang-gdf92a642017-08-21 22:38:45 -07001117 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001118 } else if (gpr_tls_get(&g_current_thread_worker) ==
1119 (intptr_t)specific_worker) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001120 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001121 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001122 gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001123 }
Craig Tiller55624a32017-05-26 08:14:44 -07001124 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001125 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001126 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001127 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001128 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001129 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001130 gpr_log(GPR_ERROR, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001131 }
Craig Tiller55624a32017-05-26 08:14:44 -07001132 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001133 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1134 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001135 } else if (specific_worker->initialized_cv) {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001136 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001137 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001138 gpr_log(GPR_ERROR, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001139 }
Craig Tiller55624a32017-05-26 08:14:44 -07001140 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001141 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001142 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001143 } else {
Craig Tiller0ff222a2017-09-01 09:41:43 -07001144 GRPC_STATS_INC_POLLSET_KICKED_AGAIN(exec_ctx);
ncteisen3cffe1f2017-11-10 13:56:23 -08001145 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001146 gpr_log(GPR_ERROR, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001147 }
Craig Tiller55624a32017-05-26 08:14:44 -07001148 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001149 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001150 }
yang-gdf92a642017-08-21 22:38:45 -07001151done:
1152 GPR_TIMER_END("pollset_kick", 0);
1153 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001154}
1155
Craig Tillerbaa14a92017-11-03 09:09:36 -07001156static void pollset_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset* pollset,
1157 grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001158
Craig Tiller4509c472017-04-27 19:05:13 +00001159/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001160 * Pollset-set Definitions
1161 */
1162
Craig Tillerbaa14a92017-11-03 09:09:36 -07001163static grpc_pollset_set* pollset_set_create(void) {
1164 return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
Craig Tillerc67cc992017-04-27 10:15:51 -07001165}
1166
Craig Tillerbaa14a92017-11-03 09:09:36 -07001167static void pollset_set_destroy(grpc_exec_ctx* exec_ctx,
1168 grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001169
Craig Tillerbaa14a92017-11-03 09:09:36 -07001170static void pollset_set_add_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
1171 grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001172
Craig Tillerbaa14a92017-11-03 09:09:36 -07001173static void pollset_set_del_fd(grpc_exec_ctx* exec_ctx, grpc_pollset_set* pss,
1174 grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001175
Craig Tillerbaa14a92017-11-03 09:09:36 -07001176static void pollset_set_add_pollset(grpc_exec_ctx* exec_ctx,
1177 grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001178
Craig Tillerbaa14a92017-11-03 09:09:36 -07001179static void pollset_set_del_pollset(grpc_exec_ctx* exec_ctx,
1180 grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001181
Craig Tillerbaa14a92017-11-03 09:09:36 -07001182static void pollset_set_add_pollset_set(grpc_exec_ctx* exec_ctx,
1183 grpc_pollset_set* bag,
1184 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001185
Craig Tillerbaa14a92017-11-03 09:09:36 -07001186static void pollset_set_del_pollset_set(grpc_exec_ctx* exec_ctx,
1187 grpc_pollset_set* bag,
1188 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001189
1190/*******************************************************************************
1191 * Event engine binding
1192 */
1193
1194static void shutdown_engine(void) {
1195 fd_global_shutdown();
1196 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001197 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001198}
1199
1200static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001201 sizeof(grpc_pollset),
Craig Tillerc67cc992017-04-27 10:15:51 -07001202
Yash Tibrewal533d1182017-09-18 10:48:22 -07001203 fd_create,
1204 fd_wrapped_fd,
1205 fd_orphan,
1206 fd_shutdown,
1207 fd_notify_on_read,
1208 fd_notify_on_write,
1209 fd_is_shutdown,
1210 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001211
Yash Tibrewal533d1182017-09-18 10:48:22 -07001212 pollset_init,
1213 pollset_shutdown,
1214 pollset_destroy,
1215 pollset_work,
1216 pollset_kick,
1217 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001218
Yash Tibrewal533d1182017-09-18 10:48:22 -07001219 pollset_set_create,
1220 pollset_set_destroy,
1221 pollset_set_add_pollset,
1222 pollset_set_del_pollset,
1223 pollset_set_add_pollset_set,
1224 pollset_set_del_pollset_set,
1225 pollset_set_add_fd,
1226 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001227
Yash Tibrewal533d1182017-09-18 10:48:22 -07001228 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001229};
1230
1231/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001232 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1233 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001234const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001235 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001236 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001237 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001238 }
1239
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001240 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001241 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001242 }
1243
Craig Tillerc67cc992017-04-27 10:15:51 -07001244 fd_global_init();
1245
1246 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001247 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001248 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001249 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001250 }
1251
1252 return &vtable;
1253}
1254
1255#else /* defined(GRPC_LINUX_EPOLL) */
1256#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001257#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001258/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1259 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001260const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
yang-g30101b02017-11-06 14:35:30 -08001261 gpr_log(GPR_ERROR,
yang-g56e72572017-11-06 15:54:48 -08001262 "Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
Craig Tiller9ddb3152017-04-27 21:32:56 +00001263 return NULL;
1264}
Craig Tillerc67cc992017-04-27 10:15:51 -07001265#endif /* defined(GRPC_POSIX_SOCKET) */
1266#endif /* !defined(GRPC_LINUX_EPOLL) */