blob: d9e8a30f5e3b360dfa6118e7b3452ee227ff44f0 [file] [log] [blame]
Craig Tillerc67cc992017-04-27 10:15:51 -07001/*
2 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02003 * Copyright 2017 gRPC authors.
Craig Tillerc67cc992017-04-27 10:15:51 -07004 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Craig Tillerc67cc992017-04-27 10:15:51 -07008 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +02009 * http://www.apache.org/licenses/LICENSE-2.0
Craig Tillerc67cc992017-04-27 10:15:51 -070010 *
Jan Tattermusch7897ae92017-06-07 22:57:36 +020011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Craig Tillerc67cc992017-04-27 10:15:51 -070016 *
17 */
18
19#include "src/core/lib/iomgr/port.h"
20
yang-gceb24752017-11-07 12:06:37 -080021#include <grpc/support/log.h>
22
Craig Tillerc67cc992017-04-27 10:15:51 -070023/* This polling engine is only relevant on linux kernels supporting epoll() */
24#ifdef GRPC_LINUX_EPOLL
Craig Tiller4509c472017-04-27 19:05:13 +000025#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070026
27#include <assert.h>
28#include <errno.h>
Craig Tiller20397792017-07-18 11:35:27 -070029#include <limits.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070030#include <poll.h>
31#include <pthread.h>
32#include <string.h>
33#include <sys/epoll.h>
34#include <sys/socket.h>
35#include <unistd.h>
36
37#include <grpc/support/alloc.h>
Craig Tiller6de05932017-04-28 09:17:38 -070038#include <grpc/support/cpu.h>
Craig Tillerc67cc992017-04-27 10:15:51 -070039#include <grpc/support/string_util.h>
40#include <grpc/support/tls.h>
41#include <grpc/support/useful.h>
42
Craig Tillerb4bb1cd2017-07-20 14:18:17 -070043#include "src/core/lib/debug/stats.h"
Craig Tiller6b7c1fb2017-07-19 15:45:03 -070044#include "src/core/lib/iomgr/block_annotate.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070045#include "src/core/lib/iomgr/ev_posix.h"
46#include "src/core/lib/iomgr/iomgr_internal.h"
47#include "src/core/lib/iomgr/lockfree_event.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070048#include "src/core/lib/iomgr/wakeup_fd_posix.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070049#include "src/core/lib/profiling/timers.h"
Craig Tillerfbf61bb2017-11-08 11:50:14 -080050#include "src/core/lib/support/manual_constructor.h"
Craig Tillerb89bac02017-05-26 15:20:32 +000051#include "src/core/lib/support/string.h"
Craig Tillerc67cc992017-04-27 10:15:51 -070052
Craig Tillerc67cc992017-04-27 10:15:51 -070053static grpc_wakeup_fd global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070054
55/*******************************************************************************
56 * Singleton epoll set related fields
57 */
58
59#define MAX_EPOLL_EVENTS 100
Sree Kuchibhotla19614522017-08-25 17:10:10 -070060#define MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION 1
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070061
Sree Kuchibhotlae01940f2017-08-27 18:10:12 -070062/* NOTE ON SYNCHRONIZATION:
63 * - Fields in this struct are only modified by the designated poller. Hence
64 * there is no need for any locks to protect the struct.
65 * - num_events and cursor fields have to be of atomic type to provide memory
66 * visibility guarantees only. i.e In case of multiple pollers, the designated
67 * polling thread keeps changing; the thread that wrote these values may be
68 * different from the thread reading the values
69 */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070070typedef struct epoll_set {
71 int epfd;
72
73 /* The epoll_events after the last call to epoll_wait() */
74 struct epoll_event events[MAX_EPOLL_EVENTS];
75
76 /* The number of epoll_events after the last call to epoll_wait() */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070077 gpr_atm num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070078
79 /* Index of the first event in epoll_events that has to be processed. This
80 * field is only valid if num_events > 0 */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070081 gpr_atm cursor;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070082} epoll_set;
83
84/* The global singleton epoll set */
85static epoll_set g_epoll_set;
86
87/* Must be called *only* once */
88static bool epoll_set_init() {
89 g_epoll_set.epfd = epoll_create1(EPOLL_CLOEXEC);
90 if (g_epoll_set.epfd < 0) {
91 gpr_log(GPR_ERROR, "epoll unavailable");
92 return false;
93 }
94
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -070095 gpr_log(GPR_INFO, "grpc epoll fd: %d", g_epoll_set.epfd);
96 gpr_atm_no_barrier_store(&g_epoll_set.num_events, 0);
97 gpr_atm_no_barrier_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -070098 return true;
99}
100
101/* epoll_set_init() MUST be called before calling this. */
102static void epoll_set_shutdown() {
103 if (g_epoll_set.epfd >= 0) {
104 close(g_epoll_set.epfd);
105 g_epoll_set.epfd = -1;
106 }
107}
Craig Tillerc67cc992017-04-27 10:15:51 -0700108
109/*******************************************************************************
110 * Fd Declarations
111 */
112
113struct grpc_fd {
114 int fd;
115
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800116 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> read_closure;
117 grpc_core::ManualConstructor<grpc_core::LockfreeEvent> write_closure;
Craig Tillerc67cc992017-04-27 10:15:51 -0700118
Craig Tillerbaa14a92017-11-03 09:09:36 -0700119 struct grpc_fd* freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700120
121 /* The pollset that last noticed that the fd is readable. The actual type
122 * stored in this is (grpc_pollset *) */
123 gpr_atm read_notifier_pollset;
124
125 grpc_iomgr_object iomgr_object;
126};
127
128static void fd_global_init(void);
129static void fd_global_shutdown(void);
130
131/*******************************************************************************
132 * Pollset Declarations
133 */
134
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700135typedef enum { UNKICKED, KICKED, DESIGNATED_POLLER } kick_state;
Craig Tillerc67cc992017-04-27 10:15:51 -0700136
Craig Tillerbaa14a92017-11-03 09:09:36 -0700137static const char* kick_state_string(kick_state st) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700138 switch (st) {
139 case UNKICKED:
140 return "UNKICKED";
141 case KICKED:
142 return "KICKED";
143 case DESIGNATED_POLLER:
144 return "DESIGNATED_POLLER";
145 }
146 GPR_UNREACHABLE_CODE(return "UNKNOWN");
147}
148
Craig Tillerc67cc992017-04-27 10:15:51 -0700149struct grpc_pollset_worker {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700150 kick_state state;
Craig Tiller55624a32017-05-26 08:14:44 -0700151 int kick_state_mutator; // which line of code last changed kick state
Craig Tillerc67cc992017-04-27 10:15:51 -0700152 bool initialized_cv;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700153 grpc_pollset_worker* next;
154 grpc_pollset_worker* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700155 gpr_cv cv;
Craig Tiller50da5ec2017-05-01 13:51:14 -0700156 grpc_closure_list schedule_on_end_work;
Craig Tillerc67cc992017-04-27 10:15:51 -0700157};
158
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700159#define SET_KICK_STATE(worker, kick_state) \
Craig Tiller55624a32017-05-26 08:14:44 -0700160 do { \
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700161 (worker)->state = (kick_state); \
Craig Tiller55624a32017-05-26 08:14:44 -0700162 (worker)->kick_state_mutator = __LINE__; \
163 } while (false)
164
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700165#define MAX_NEIGHBORHOODS 1024
Craig Tillerba550da2017-05-01 14:26:31 +0000166
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700167typedef struct pollset_neighborhood {
Craig Tiller6de05932017-04-28 09:17:38 -0700168 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700169 grpc_pollset* active_root;
Craig Tiller6de05932017-04-28 09:17:38 -0700170 char pad[GPR_CACHELINE_SIZE];
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700171} pollset_neighborhood;
Craig Tiller6de05932017-04-28 09:17:38 -0700172
Craig Tillerc67cc992017-04-27 10:15:51 -0700173struct grpc_pollset {
Craig Tiller6de05932017-04-28 09:17:38 -0700174 gpr_mu mu;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700175 pollset_neighborhood* neighborhood;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700176 bool reassigning_neighborhood;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700177 grpc_pollset_worker* root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000178 bool kicked_without_poller;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700179
180 /* Set to true if the pollset is observed to have no workers available to
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700181 poll */
Craig Tiller6de05932017-04-28 09:17:38 -0700182 bool seen_inactive;
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700183 bool shutting_down; /* Is the pollset shutting down ? */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700184 grpc_closure* shutdown_closure; /* Called after after shutdown is complete */
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700185
186 /* Number of workers who are *about-to* attach themselves to the pollset
187 * worker list */
Craig Tillerba550da2017-05-01 14:26:31 +0000188 int begin_refs;
Craig Tiller6de05932017-04-28 09:17:38 -0700189
Craig Tillerbaa14a92017-11-03 09:09:36 -0700190 grpc_pollset* next;
191 grpc_pollset* prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700192};
193
194/*******************************************************************************
195 * Pollset-set Declarations
196 */
Craig Tiller6de05932017-04-28 09:17:38 -0700197
Craig Tiller61f96c12017-05-12 13:36:39 -0700198struct grpc_pollset_set {
199 char unused;
200};
Craig Tillerc67cc992017-04-27 10:15:51 -0700201
202/*******************************************************************************
203 * Common helpers
204 */
205
Craig Tillerbaa14a92017-11-03 09:09:36 -0700206static bool append_error(grpc_error** composite, grpc_error* error,
207 const char* desc) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700208 if (error == GRPC_ERROR_NONE) return true;
209 if (*composite == GRPC_ERROR_NONE) {
210 *composite = GRPC_ERROR_CREATE_FROM_COPIED_STRING(desc);
211 }
212 *composite = grpc_error_add_child(*composite, error);
213 return false;
214}
215
216/*******************************************************************************
217 * Fd Definitions
218 */
219
220/* We need to keep a freelist not because of any concerns of malloc performance
221 * but instead so that implementations with multiple threads in (for example)
222 * epoll_wait deal with the race between pollset removal and incoming poll
223 * notifications.
224 *
225 * The problem is that the poller ultimately holds a reference to this
226 * object, so it is very difficult to know when is safe to free it, at least
227 * without some expensive synchronization.
228 *
229 * If we keep the object freelisted, in the worst case losing this race just
230 * becomes a spurious read notification on a reused fd.
231 */
232
233/* The alarm system needs to be able to wakeup 'some poller' sometimes
234 * (specifically when a new alarm needs to be triggered earlier than the next
235 * alarm 'epoch'). This wakeup_fd gives us something to alert on when such a
236 * case occurs. */
237
Craig Tiller4782d922017-11-10 09:53:21 -0800238static grpc_fd* fd_freelist = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700239static gpr_mu fd_freelist_mu;
240
Craig Tillerc67cc992017-04-27 10:15:51 -0700241static void fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
242
243static void fd_global_shutdown(void) {
244 gpr_mu_lock(&fd_freelist_mu);
245 gpr_mu_unlock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800246 while (fd_freelist != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700247 grpc_fd* fd = fd_freelist;
Craig Tillerc67cc992017-04-27 10:15:51 -0700248 fd_freelist = fd_freelist->freelist_next;
Craig Tillerc67cc992017-04-27 10:15:51 -0700249 gpr_free(fd);
250 }
251 gpr_mu_destroy(&fd_freelist_mu);
252}
253
Craig Tillerbaa14a92017-11-03 09:09:36 -0700254static grpc_fd* fd_create(int fd, const char* name) {
Craig Tiller4782d922017-11-10 09:53:21 -0800255 grpc_fd* new_fd = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700256
257 gpr_mu_lock(&fd_freelist_mu);
Craig Tiller4782d922017-11-10 09:53:21 -0800258 if (fd_freelist != nullptr) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700259 new_fd = fd_freelist;
260 fd_freelist = fd_freelist->freelist_next;
261 }
262 gpr_mu_unlock(&fd_freelist_mu);
263
Craig Tiller4782d922017-11-10 09:53:21 -0800264 if (new_fd == nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700265 new_fd = (grpc_fd*)gpr_malloc(sizeof(grpc_fd));
yang-g26521b32017-11-17 17:15:37 -0800266 new_fd->read_closure.Init();
267 new_fd->write_closure.Init();
Craig Tillerc67cc992017-04-27 10:15:51 -0700268 }
269
Craig Tillerc67cc992017-04-27 10:15:51 -0700270 new_fd->fd = fd;
yang-ged49fe52017-11-20 13:49:54 -0800271 new_fd->read_closure->InitEvent();
272 new_fd->write_closure->InitEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700273 gpr_atm_no_barrier_store(&new_fd->read_notifier_pollset, (gpr_atm)NULL);
274
Craig Tiller4782d922017-11-10 09:53:21 -0800275 new_fd->freelist_next = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700276
Craig Tillerbaa14a92017-11-03 09:09:36 -0700277 char* fd_name;
Craig Tillerc67cc992017-04-27 10:15:51 -0700278 gpr_asprintf(&fd_name, "%s fd=%d", name, fd);
279 grpc_iomgr_register_object(&new_fd->iomgr_object, fd_name);
Noah Eisen264879f2017-06-20 17:14:47 -0700280#ifndef NDEBUG
ncteisen3cffe1f2017-11-10 13:56:23 -0800281 if (grpc_trace_fd_refcount.enabled()) {
Noah Eisen264879f2017-06-20 17:14:47 -0700282 gpr_log(GPR_DEBUG, "FD %d %p create %s", fd, new_fd, fd_name);
283 }
Craig Tillerc67cc992017-04-27 10:15:51 -0700284#endif
285 gpr_free(fd_name);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000286
Yash Tibrewal533d1182017-09-18 10:48:22 -0700287 struct epoll_event ev;
288 ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET);
289 ev.data.ptr = new_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700290 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, fd, &ev) != 0) {
Craig Tiller9ddb3152017-04-27 21:32:56 +0000291 gpr_log(GPR_ERROR, "epoll_ctl failed: %s", strerror(errno));
292 }
293
Craig Tillerc67cc992017-04-27 10:15:51 -0700294 return new_fd;
295}
296
Craig Tillerbaa14a92017-11-03 09:09:36 -0700297static int fd_wrapped_fd(grpc_fd* fd) { return fd->fd; }
Craig Tillerc67cc992017-04-27 10:15:51 -0700298
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700299/* if 'releasing_fd' is true, it means that we are going to detach the internal
300 * fd from grpc_fd structure (i.e which means we should not be calling
301 * shutdown() syscall on that fd) */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800302static void fd_shutdown_internal(grpc_fd* fd, grpc_error* why,
303 bool releasing_fd) {
304 if (fd->read_closure->SetShutdown(GRPC_ERROR_REF(why))) {
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700305 if (!releasing_fd) {
306 shutdown(fd->fd, SHUT_RDWR);
307 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800308 fd->write_closure->SetShutdown(GRPC_ERROR_REF(why));
Craig Tiller9ddb3152017-04-27 21:32:56 +0000309 }
310 GRPC_ERROR_UNREF(why);
311}
312
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700313/* Might be called multiple times */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800314static void fd_shutdown(grpc_fd* fd, grpc_error* why) {
315 fd_shutdown_internal(fd, why, false);
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700316}
317
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800318static void fd_orphan(grpc_fd* fd, grpc_closure* on_done, int* release_fd,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700319 bool already_closed, const char* reason) {
320 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800321 bool is_release_fd = (release_fd != nullptr);
Craig Tillerc67cc992017-04-27 10:15:51 -0700322
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800323 if (!fd->read_closure->IsShutdown()) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800324 fd_shutdown_internal(fd, GRPC_ERROR_CREATE_FROM_COPIED_STRING(reason),
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700325 is_release_fd);
Craig Tiller9ddb3152017-04-27 21:32:56 +0000326 }
327
Craig Tillerc67cc992017-04-27 10:15:51 -0700328 /* If release_fd is not NULL, we should be relinquishing control of the file
329 descriptor fd->fd (but we still own the grpc_fd structure). */
Sree Kuchibhotlaf2641472017-08-02 23:46:40 -0700330 if (is_release_fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700331 *release_fd = fd->fd;
Yuchen Zengd40a7ae2017-07-12 15:59:56 -0700332 } else if (!already_closed) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700333 close(fd->fd);
Craig Tillerc67cc992017-04-27 10:15:51 -0700334 }
335
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800336 GRPC_CLOSURE_SCHED(on_done, GRPC_ERROR_REF(error));
Craig Tillerc67cc992017-04-27 10:15:51 -0700337
Craig Tiller4509c472017-04-27 19:05:13 +0000338 grpc_iomgr_unregister_object(&fd->iomgr_object);
yang-ged49fe52017-11-20 13:49:54 -0800339 fd->read_closure->DestroyEvent();
340 fd->write_closure->DestroyEvent();
Craig Tillerc67cc992017-04-27 10:15:51 -0700341
Craig Tiller4509c472017-04-27 19:05:13 +0000342 gpr_mu_lock(&fd_freelist_mu);
343 fd->freelist_next = fd_freelist;
344 fd_freelist = fd;
345 gpr_mu_unlock(&fd_freelist_mu);
Craig Tillerc67cc992017-04-27 10:15:51 -0700346}
347
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800348static grpc_pollset* fd_get_read_notifier_pollset(grpc_fd* fd) {
Craig Tillerc67cc992017-04-27 10:15:51 -0700349 gpr_atm notifier = gpr_atm_acq_load(&fd->read_notifier_pollset);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700350 return (grpc_pollset*)notifier;
Craig Tillerc67cc992017-04-27 10:15:51 -0700351}
352
Craig Tillerbaa14a92017-11-03 09:09:36 -0700353static bool fd_is_shutdown(grpc_fd* fd) {
Craig Tillerfbf61bb2017-11-08 11:50:14 -0800354 return fd->read_closure->IsShutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -0700355}
356
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800357static void fd_notify_on_read(grpc_fd* fd, grpc_closure* closure) {
358 fd->read_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700359}
360
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800361static void fd_notify_on_write(grpc_fd* fd, grpc_closure* closure) {
362 fd->write_closure->NotifyOn(closure);
Craig Tillerc67cc992017-04-27 10:15:51 -0700363}
364
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800365static void fd_become_readable(grpc_fd* fd, grpc_pollset* notifier) {
366 fd->read_closure->SetReady();
Craig Tiller4509c472017-04-27 19:05:13 +0000367 /* Use release store to match with acquire load in fd_get_read_notifier */
368 gpr_atm_rel_store(&fd->read_notifier_pollset, (gpr_atm)notifier);
369}
370
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800371static void fd_become_writable(grpc_fd* fd) { fd->write_closure->SetReady(); }
Craig Tillerc67cc992017-04-27 10:15:51 -0700372
373/*******************************************************************************
374 * Pollset Definitions
375 */
376
Craig Tiller6de05932017-04-28 09:17:38 -0700377GPR_TLS_DECL(g_current_thread_pollset);
378GPR_TLS_DECL(g_current_thread_worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700379
380/* The designated poller */
Craig Tiller6de05932017-04-28 09:17:38 -0700381static gpr_atm g_active_poller;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700382
Craig Tillerbaa14a92017-11-03 09:09:36 -0700383static pollset_neighborhood* g_neighborhoods;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700384static size_t g_num_neighborhoods;
Craig Tiller6de05932017-04-28 09:17:38 -0700385
Craig Tillerc67cc992017-04-27 10:15:51 -0700386/* Return true if first in list */
Craig Tillerbaa14a92017-11-03 09:09:36 -0700387static bool worker_insert(grpc_pollset* pollset, grpc_pollset_worker* worker) {
Craig Tiller4782d922017-11-10 09:53:21 -0800388 if (pollset->root_worker == nullptr) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700389 pollset->root_worker = worker;
390 worker->next = worker->prev = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700391 return true;
392 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700393 worker->next = pollset->root_worker;
394 worker->prev = worker->next->prev;
395 worker->next->prev = worker;
396 worker->prev->next = worker;
Craig Tillerc67cc992017-04-27 10:15:51 -0700397 return false;
398 }
399}
400
401/* Return true if last in list */
402typedef enum { EMPTIED, NEW_ROOT, REMOVED } worker_remove_result;
403
Craig Tillerbaa14a92017-11-03 09:09:36 -0700404static worker_remove_result worker_remove(grpc_pollset* pollset,
405 grpc_pollset_worker* worker) {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700406 if (worker == pollset->root_worker) {
407 if (worker == worker->next) {
Craig Tiller4782d922017-11-10 09:53:21 -0800408 pollset->root_worker = nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -0700409 return EMPTIED;
410 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700411 pollset->root_worker = worker->next;
412 worker->prev->next = worker->next;
413 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700414 return NEW_ROOT;
415 }
416 } else {
Craig Tiller32f90ee2017-04-28 12:46:41 -0700417 worker->prev->next = worker->next;
418 worker->next->prev = worker->prev;
Craig Tillerc67cc992017-04-27 10:15:51 -0700419 return REMOVED;
420 }
421}
422
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700423static size_t choose_neighborhood(void) {
424 return (size_t)gpr_cpu_current_cpu() % g_num_neighborhoods;
Craig Tillerba550da2017-05-01 14:26:31 +0000425}
426
Craig Tillerbaa14a92017-11-03 09:09:36 -0700427static grpc_error* pollset_global_init(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000428 gpr_tls_init(&g_current_thread_pollset);
429 gpr_tls_init(&g_current_thread_worker);
Craig Tiller6de05932017-04-28 09:17:38 -0700430 gpr_atm_no_barrier_store(&g_active_poller, 0);
Craig Tiller375eb252017-04-27 23:29:12 +0000431 global_wakeup_fd.read_fd = -1;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700432 grpc_error* err = grpc_wakeup_fd_init(&global_wakeup_fd);
Craig Tiller375eb252017-04-27 23:29:12 +0000433 if (err != GRPC_ERROR_NONE) return err;
Yash Tibrewal533d1182017-09-18 10:48:22 -0700434 struct epoll_event ev;
435 ev.events = (uint32_t)(EPOLLIN | EPOLLET);
436 ev.data.ptr = &global_wakeup_fd;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700437 if (epoll_ctl(g_epoll_set.epfd, EPOLL_CTL_ADD, global_wakeup_fd.read_fd,
438 &ev) != 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000439 return GRPC_OS_ERROR(errno, "epoll_ctl");
440 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700441 g_num_neighborhoods = GPR_CLAMP(gpr_cpu_num_cores(), 1, MAX_NEIGHBORHOODS);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700442 g_neighborhoods = (pollset_neighborhood*)gpr_zalloc(sizeof(*g_neighborhoods) *
443 g_num_neighborhoods);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700444 for (size_t i = 0; i < g_num_neighborhoods; i++) {
445 gpr_mu_init(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700446 }
Craig Tiller4509c472017-04-27 19:05:13 +0000447 return GRPC_ERROR_NONE;
448}
449
450static void pollset_global_shutdown(void) {
Craig Tiller4509c472017-04-27 19:05:13 +0000451 gpr_tls_destroy(&g_current_thread_pollset);
452 gpr_tls_destroy(&g_current_thread_worker);
Craig Tiller375eb252017-04-27 23:29:12 +0000453 if (global_wakeup_fd.read_fd != -1) grpc_wakeup_fd_destroy(&global_wakeup_fd);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700454 for (size_t i = 0; i < g_num_neighborhoods; i++) {
455 gpr_mu_destroy(&g_neighborhoods[i].mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700456 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700457 gpr_free(g_neighborhoods);
Craig Tiller4509c472017-04-27 19:05:13 +0000458}
459
Craig Tillerbaa14a92017-11-03 09:09:36 -0700460static void pollset_init(grpc_pollset* pollset, gpr_mu** mu) {
Craig Tiller6de05932017-04-28 09:17:38 -0700461 gpr_mu_init(&pollset->mu);
462 *mu = &pollset->mu;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700463 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
464 pollset->reassigning_neighborhood = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800465 pollset->root_worker = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700466 pollset->kicked_without_poller = false;
Craig Tiller6de05932017-04-28 09:17:38 -0700467 pollset->seen_inactive = true;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700468 pollset->shutting_down = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800469 pollset->shutdown_closure = nullptr;
Sree Kuchibhotla30882302017-08-16 13:46:52 -0700470 pollset->begin_refs = 0;
Craig Tiller4782d922017-11-10 09:53:21 -0800471 pollset->next = pollset->prev = nullptr;
Craig Tiller6de05932017-04-28 09:17:38 -0700472}
473
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800474static void pollset_destroy(grpc_pollset* pollset) {
Craig Tillere00d7332017-05-01 15:43:51 +0000475 gpr_mu_lock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000476 if (!pollset->seen_inactive) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700477 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000478 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700479 retry_lock_neighborhood:
480 gpr_mu_lock(&neighborhood->mu);
Craig Tillere00d7332017-05-01 15:43:51 +0000481 gpr_mu_lock(&pollset->mu);
482 if (!pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700483 if (pollset->neighborhood != neighborhood) {
484 gpr_mu_unlock(&neighborhood->mu);
485 neighborhood = pollset->neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000486 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700487 goto retry_lock_neighborhood;
Craig Tillere00d7332017-05-01 15:43:51 +0000488 }
489 pollset->prev->next = pollset->next;
490 pollset->next->prev = pollset->prev;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700491 if (pollset == pollset->neighborhood->active_root) {
492 pollset->neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800493 pollset->next == pollset ? nullptr : pollset->next;
Craig Tillere00d7332017-05-01 15:43:51 +0000494 }
Craig Tillerba550da2017-05-01 14:26:31 +0000495 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700496 gpr_mu_unlock(&pollset->neighborhood->mu);
Craig Tiller6de05932017-04-28 09:17:38 -0700497 }
Craig Tillere00d7332017-05-01 15:43:51 +0000498 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700499 gpr_mu_destroy(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000500}
501
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800502static grpc_error* pollset_kick_all(grpc_pollset* pollset) {
yang-gdf92a642017-08-21 22:38:45 -0700503 GPR_TIMER_BEGIN("pollset_kick_all", 0);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700504 grpc_error* error = GRPC_ERROR_NONE;
Craig Tiller4782d922017-11-10 09:53:21 -0800505 if (pollset->root_worker != nullptr) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700506 grpc_pollset_worker* worker = pollset->root_worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000507 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800508 GRPC_STATS_INC_POLLSET_KICK();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700509 switch (worker->state) {
Craig Tiller55624a32017-05-26 08:14:44 -0700510 case KICKED:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800511 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Craig Tiller55624a32017-05-26 08:14:44 -0700512 break;
513 case UNKICKED:
514 SET_KICK_STATE(worker, KICKED);
515 if (worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800516 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -0700517 gpr_cv_signal(&worker->cv);
518 }
519 break;
520 case DESIGNATED_POLLER:
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800521 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
Craig Tiller55624a32017-05-26 08:14:44 -0700522 SET_KICK_STATE(worker, KICKED);
523 append_error(&error, grpc_wakeup_fd_wakeup(&global_wakeup_fd),
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700524 "pollset_kick_all");
Craig Tiller55624a32017-05-26 08:14:44 -0700525 break;
Craig Tiller4509c472017-04-27 19:05:13 +0000526 }
527
Craig Tiller32f90ee2017-04-28 12:46:41 -0700528 worker = worker->next;
Craig Tiller4509c472017-04-27 19:05:13 +0000529 } while (worker != pollset->root_worker);
530 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700531 // TODO: sreek. Check if we need to set 'kicked_without_poller' to true here
532 // in the else case
yang-gdf92a642017-08-21 22:38:45 -0700533 GPR_TIMER_END("pollset_kick_all", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000534 return error;
535}
536
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800537static void pollset_maybe_finish_shutdown(grpc_pollset* pollset) {
Craig Tiller4782d922017-11-10 09:53:21 -0800538 if (pollset->shutdown_closure != nullptr && pollset->root_worker == nullptr &&
Craig Tillerba550da2017-05-01 14:26:31 +0000539 pollset->begin_refs == 0) {
yang-gdf92a642017-08-21 22:38:45 -0700540 GPR_TIMER_MARK("pollset_finish_shutdown", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800541 GRPC_CLOSURE_SCHED(pollset->shutdown_closure, GRPC_ERROR_NONE);
Craig Tiller4782d922017-11-10 09:53:21 -0800542 pollset->shutdown_closure = nullptr;
Craig Tiller4509c472017-04-27 19:05:13 +0000543 }
544}
545
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800546static void pollset_shutdown(grpc_pollset* pollset, grpc_closure* closure) {
yang-gdf92a642017-08-21 22:38:45 -0700547 GPR_TIMER_BEGIN("pollset_shutdown", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800548 GPR_ASSERT(pollset->shutdown_closure == nullptr);
Craig Tillerc81512a2017-05-26 09:53:58 -0700549 GPR_ASSERT(!pollset->shutting_down);
Craig Tiller4509c472017-04-27 19:05:13 +0000550 pollset->shutdown_closure = closure;
Craig Tillerc81512a2017-05-26 09:53:58 -0700551 pollset->shutting_down = true;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800552 GRPC_LOG_IF_ERROR("pollset_shutdown", pollset_kick_all(pollset));
553 pollset_maybe_finish_shutdown(pollset);
yang-gdf92a642017-08-21 22:38:45 -0700554 GPR_TIMER_END("pollset_shutdown", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000555}
556
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800557static int poll_deadline_to_millis_timeout(grpc_millis millis) {
Craig Tiller20397792017-07-18 11:35:27 -0700558 if (millis == GRPC_MILLIS_INF_FUTURE) return -1;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800559 grpc_millis delta = millis - grpc_core::ExecCtx::Get()->Now();
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700560 if (delta > INT_MAX) {
Craig Tiller20397792017-07-18 11:35:27 -0700561 return INT_MAX;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700562 } else if (delta < 0) {
Craig Tiller4509c472017-04-27 19:05:13 +0000563 return 0;
Craig Tillerd9b82bd2017-08-29 12:16:56 -0700564 } else {
Craig Tiller20397792017-07-18 11:35:27 -0700565 return (int)delta;
Craig Tiller4509c472017-04-27 19:05:13 +0000566 }
Craig Tiller4509c472017-04-27 19:05:13 +0000567}
568
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700569/* Process the epoll events found by do_epoll_wait() function.
570 - g_epoll_set.cursor points to the index of the first event to be processed
571 - This function then processes up-to MAX_EPOLL_EVENTS_PER_ITERATION and
572 updates the g_epoll_set.cursor
Craig Tiller4509c472017-04-27 19:05:13 +0000573
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700574 NOTE ON SYNCRHONIZATION: Similar to do_epoll_wait(), this function is only
575 called by g_active_poller thread. So there is no need for synchronization
576 when accessing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800577static grpc_error* process_epoll_events(grpc_pollset* pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700578 static const char* err_desc = "process_events";
579 grpc_error* error = GRPC_ERROR_NONE;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700580
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700581 GPR_TIMER_BEGIN("process_epoll_events", 0);
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700582 long num_events = gpr_atm_acq_load(&g_epoll_set.num_events);
583 long cursor = gpr_atm_acq_load(&g_epoll_set.cursor);
584 for (int idx = 0;
585 (idx < MAX_EPOLL_EVENTS_HANDLED_PER_ITERATION) && cursor != num_events;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700586 idx++) {
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700587 long c = cursor++;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700588 struct epoll_event* ev = &g_epoll_set.events[c];
589 void* data_ptr = ev->data.ptr;
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700590
Craig Tiller4509c472017-04-27 19:05:13 +0000591 if (data_ptr == &global_wakeup_fd) {
Craig Tiller4509c472017-04-27 19:05:13 +0000592 append_error(&error, grpc_wakeup_fd_consume_wakeup(&global_wakeup_fd),
593 err_desc);
594 } else {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700595 grpc_fd* fd = (grpc_fd*)(data_ptr);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700596 bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
597 bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
598 bool write_ev = (ev->events & EPOLLOUT) != 0;
599
Craig Tiller4509c472017-04-27 19:05:13 +0000600 if (read_ev || cancel) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800601 fd_become_readable(fd, pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000602 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700603
Craig Tiller4509c472017-04-27 19:05:13 +0000604 if (write_ev || cancel) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800605 fd_become_writable(fd);
Craig Tiller4509c472017-04-27 19:05:13 +0000606 }
607 }
608 }
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700609 gpr_atm_rel_store(&g_epoll_set.cursor, cursor);
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700610 GPR_TIMER_END("process_epoll_events", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000611 return error;
612}
613
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700614/* Do epoll_wait and store the events in g_epoll_set.events field. This does not
615 "process" any of the events yet; that is done in process_epoll_events().
616 *See process_epoll_events() function for more details.
617
618 NOTE ON SYNCHRONIZATION: At any point of time, only the g_active_poller
619 (i.e the designated poller thread) will be calling this function. So there is
620 no need for any synchronization when accesing fields in g_epoll_set */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800621static grpc_error* do_epoll_wait(grpc_pollset* ps, grpc_millis deadline) {
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700622 GPR_TIMER_BEGIN("do_epoll_wait", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000623
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700624 int r;
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800625 int timeout = poll_deadline_to_millis_timeout(deadline);
Craig Tiller4509c472017-04-27 19:05:13 +0000626 if (timeout != 0) {
627 GRPC_SCHEDULING_START_BLOCKING_REGION;
628 }
Craig Tiller4509c472017-04-27 19:05:13 +0000629 do {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800630 GRPC_STATS_INC_SYSCALL_POLL();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700631 r = epoll_wait(g_epoll_set.epfd, g_epoll_set.events, MAX_EPOLL_EVENTS,
632 timeout);
Craig Tiller4509c472017-04-27 19:05:13 +0000633 } while (r < 0 && errno == EINTR);
634 if (timeout != 0) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800635 GRPC_SCHEDULING_END_BLOCKING_REGION;
Craig Tiller4509c472017-04-27 19:05:13 +0000636 }
637
638 if (r < 0) return GRPC_OS_ERROR(errno, "epoll_wait");
639
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800640 GRPC_STATS_INC_POLL_EVENTS_RETURNED(r);
Craig Tiller0ff222a2017-09-01 09:41:43 -0700641
ncteisen3cffe1f2017-11-10 13:56:23 -0800642 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700643 gpr_log(GPR_DEBUG, "ps: %p poll got %d events", ps, r);
Craig Tiller4509c472017-04-27 19:05:13 +0000644 }
645
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700646 gpr_atm_rel_store(&g_epoll_set.num_events, r);
647 gpr_atm_rel_store(&g_epoll_set.cursor, 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700648
Sree Kuchibhotla3d609f12017-08-25 10:00:18 -0700649 GPR_TIMER_END("do_epoll_wait", 0);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700650 return GRPC_ERROR_NONE;
Craig Tiller4509c472017-04-27 19:05:13 +0000651}
652
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800653static bool begin_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700654 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700655 grpc_millis deadline) {
yang-gdf92a642017-08-21 22:38:45 -0700656 GPR_TIMER_BEGIN("begin_worker", 0);
Craig Tiller4782d922017-11-10 09:53:21 -0800657 if (worker_hdl != nullptr) *worker_hdl = worker;
Craig Tiller4509c472017-04-27 19:05:13 +0000658 worker->initialized_cv = false;
Craig Tiller55624a32017-05-26 08:14:44 -0700659 SET_KICK_STATE(worker, UNKICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700660 worker->schedule_on_end_work = (grpc_closure_list)GRPC_CLOSURE_LIST_INIT;
Craig Tillerba550da2017-05-01 14:26:31 +0000661 pollset->begin_refs++;
Craig Tiller4509c472017-04-27 19:05:13 +0000662
ncteisen3cffe1f2017-11-10 13:56:23 -0800663 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700664 gpr_log(GPR_ERROR, "PS:%p BEGIN_STARTS:%p", pollset, worker);
665 }
666
Craig Tiller32f90ee2017-04-28 12:46:41 -0700667 if (pollset->seen_inactive) {
668 // pollset has been observed to be inactive, we need to move back to the
669 // active list
Craig Tillere00d7332017-05-01 15:43:51 +0000670 bool is_reassigning = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700671 if (!pollset->reassigning_neighborhood) {
Craig Tillere00d7332017-05-01 15:43:51 +0000672 is_reassigning = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700673 pollset->reassigning_neighborhood = true;
674 pollset->neighborhood = &g_neighborhoods[choose_neighborhood()];
Craig Tillere00d7332017-05-01 15:43:51 +0000675 }
Craig Tillerbaa14a92017-11-03 09:09:36 -0700676 pollset_neighborhood* neighborhood = pollset->neighborhood;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700677 gpr_mu_unlock(&pollset->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000678 // pollset unlocked: state may change (even worker->kick_state)
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700679 retry_lock_neighborhood:
680 gpr_mu_lock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700681 gpr_mu_lock(&pollset->mu);
ncteisen3cffe1f2017-11-10 13:56:23 -0800682 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700683 gpr_log(GPR_ERROR, "PS:%p BEGIN_REORG:%p kick_state=%s is_reassigning=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700684 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700685 is_reassigning);
686 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700687 if (pollset->seen_inactive) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700688 if (neighborhood != pollset->neighborhood) {
689 gpr_mu_unlock(&neighborhood->mu);
690 neighborhood = pollset->neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000691 gpr_mu_unlock(&pollset->mu);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700692 goto retry_lock_neighborhood;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000693 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700694
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700695 /* In the brief time we released the pollset locks above, the worker MAY
696 have been kicked. In this case, the worker should get out of this
697 pollset ASAP and hence this should neither add the pollset to
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700698 neighborhood nor mark the pollset as active.
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700699
700 On a side note, the only way a worker's kick state could have changed
701 at this point is if it were "kicked specifically". Since the worker has
702 not added itself to the pollset yet (by calling worker_insert()), it is
703 not visible in the "kick any" path yet */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700704 if (worker->state == UNKICKED) {
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700705 pollset->seen_inactive = false;
Craig Tiller4782d922017-11-10 09:53:21 -0800706 if (neighborhood->active_root == nullptr) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700707 neighborhood->active_root = pollset->next = pollset->prev = pollset;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700708 /* Make this the designated poller if there isn't one already */
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700709 if (worker->state == UNKICKED &&
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700710 gpr_atm_no_barrier_cas(&g_active_poller, 0, (gpr_atm)worker)) {
711 SET_KICK_STATE(worker, DESIGNATED_POLLER);
712 }
713 } else {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700714 pollset->next = neighborhood->active_root;
Sree Kuchibhotlafb349402017-09-06 10:58:06 -0700715 pollset->prev = pollset->next->prev;
716 pollset->next->prev = pollset->prev->next = pollset;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700717 }
Craig Tiller4509c472017-04-27 19:05:13 +0000718 }
719 }
Craig Tillere00d7332017-05-01 15:43:51 +0000720 if (is_reassigning) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700721 GPR_ASSERT(pollset->reassigning_neighborhood);
722 pollset->reassigning_neighborhood = false;
Craig Tillere00d7332017-05-01 15:43:51 +0000723 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700724 gpr_mu_unlock(&neighborhood->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700725 }
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700726
Craig Tiller32f90ee2017-04-28 12:46:41 -0700727 worker_insert(pollset, worker);
Craig Tillerba550da2017-05-01 14:26:31 +0000728 pollset->begin_refs--;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700729 if (worker->state == UNKICKED && !pollset->kicked_without_poller) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000730 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700731 worker->initialized_cv = true;
732 gpr_cv_init(&worker->cv);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700733 while (worker->state == UNKICKED && !pollset->shutting_down) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800734 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700735 gpr_log(GPR_ERROR, "PS:%p BEGIN_WAIT:%p kick_state=%s shutdown=%d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700736 pollset, worker, kick_state_string(worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700737 pollset->shutting_down);
738 }
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700739
Craig Tiller20397792017-07-18 11:35:27 -0700740 if (gpr_cv_wait(&worker->cv, &pollset->mu,
741 grpc_millis_to_timespec(deadline, GPR_CLOCK_REALTIME)) &&
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700742 worker->state == UNKICKED) {
Sree Kuchibhotla0d8431a2017-07-18 16:21:54 -0700743 /* If gpr_cv_wait returns true (i.e a timeout), pretend that the worker
744 received a kick */
Craig Tiller55624a32017-05-26 08:14:44 -0700745 SET_KICK_STATE(worker, KICKED);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700746 }
Craig Tillerba550da2017-05-01 14:26:31 +0000747 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800748 grpc_core::ExecCtx::Get()->InvalidateNow();
Craig Tiller4509c472017-04-27 19:05:13 +0000749 }
750
ncteisen3cffe1f2017-11-10 13:56:23 -0800751 if (grpc_polling_trace.enabled()) {
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700752 gpr_log(GPR_ERROR,
753 "PS:%p BEGIN_DONE:%p kick_state=%s shutdown=%d "
754 "kicked_without_poller: %d",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700755 pollset, worker, kick_state_string(worker->state),
Sree Kuchibhotla949d0752017-07-20 23:49:15 -0700756 pollset->shutting_down, pollset->kicked_without_poller);
Craig Tiller830e82a2017-05-31 16:26:27 -0700757 }
Craig Tiller4509c472017-04-27 19:05:13 +0000758
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700759 /* We release pollset lock in this function at a couple of places:
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700760 * 1. Briefly when assigning pollset to a neighborhood
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700761 * 2. When doing gpr_cv_wait()
762 * It is possible that 'kicked_without_poller' was set to true during (1) and
763 * 'shutting_down' is set to true during (1) or (2). If either of them is
764 * true, this worker cannot do polling */
Sree Kuchibhotlae6506bc2017-07-18 21:43:45 -0700765 /* TODO(sreek): Perhaps there is a better way to handle kicked_without_poller
766 * case; especially when the worker is the DESIGNATED_POLLER */
767
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700768 if (pollset->kicked_without_poller) {
769 pollset->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700770 GPR_TIMER_END("begin_worker", 0);
Sree Kuchibhotlaa0616ef2017-07-18 23:49:49 -0700771 return false;
772 }
773
yang-gdf92a642017-08-21 22:38:45 -0700774 GPR_TIMER_END("begin_worker", 0);
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700775 return worker->state == DESIGNATED_POLLER && !pollset->shutting_down;
Craig Tiller4509c472017-04-27 19:05:13 +0000776}
777
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700778static bool check_neighborhood_for_available_poller(
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800779 pollset_neighborhood* neighborhood) {
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700780 GPR_TIMER_BEGIN("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700781 bool found_worker = false;
782 do {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700783 grpc_pollset* inspect = neighborhood->active_root;
Craig Tiller4782d922017-11-10 09:53:21 -0800784 if (inspect == nullptr) {
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700785 break;
786 }
787 gpr_mu_lock(&inspect->mu);
788 GPR_ASSERT(!inspect->seen_inactive);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700789 grpc_pollset_worker* inspect_worker = inspect->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -0800790 if (inspect_worker != nullptr) {
Craig Tillera4b8eb02017-04-29 00:13:52 +0000791 do {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700792 switch (inspect_worker->state) {
Craig Tillerba550da2017-05-01 14:26:31 +0000793 case UNKICKED:
794 if (gpr_atm_no_barrier_cas(&g_active_poller, 0,
795 (gpr_atm)inspect_worker)) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800796 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700797 gpr_log(GPR_DEBUG, " .. choose next poller to be %p",
798 inspect_worker);
799 }
Craig Tiller55624a32017-05-26 08:14:44 -0700800 SET_KICK_STATE(inspect_worker, DESIGNATED_POLLER);
Craig Tillerba550da2017-05-01 14:26:31 +0000801 if (inspect_worker->initialized_cv) {
yang-gdf92a642017-08-21 22:38:45 -0700802 GPR_TIMER_MARK("signal worker", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800803 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tillerba550da2017-05-01 14:26:31 +0000804 gpr_cv_signal(&inspect_worker->cv);
805 }
Craig Tiller830e82a2017-05-31 16:26:27 -0700806 } else {
ncteisen3cffe1f2017-11-10 13:56:23 -0800807 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700808 gpr_log(GPR_DEBUG, " .. beaten to choose next poller");
809 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000810 }
Craig Tillerba550da2017-05-01 14:26:31 +0000811 // even if we didn't win the cas, there's a worker, we can stop
812 found_worker = true;
813 break;
814 case KICKED:
815 break;
816 case DESIGNATED_POLLER:
817 found_worker = true; // ok, so someone else found the worker, but
818 // we'll accept that
819 break;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700820 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000821 inspect_worker = inspect_worker->next;
Craig Tiller830e82a2017-05-31 16:26:27 -0700822 } while (!found_worker && inspect_worker != inspect->root_worker);
Craig Tillera4b8eb02017-04-29 00:13:52 +0000823 }
824 if (!found_worker) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800825 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700826 gpr_log(GPR_DEBUG, " .. mark pollset %p inactive", inspect);
827 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700828 inspect->seen_inactive = true;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700829 if (inspect == neighborhood->active_root) {
830 neighborhood->active_root =
Craig Tiller4782d922017-11-10 09:53:21 -0800831 inspect->next == inspect ? nullptr : inspect->next;
Craig Tiller2acab6e2017-04-30 23:06:33 +0000832 }
833 inspect->next->prev = inspect->prev;
834 inspect->prev->next = inspect->next;
Craig Tiller4782d922017-11-10 09:53:21 -0800835 inspect->next = inspect->prev = nullptr;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700836 }
837 gpr_mu_unlock(&inspect->mu);
838 } while (!found_worker);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700839 GPR_TIMER_END("check_neighborhood_for_available_poller", 0);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700840 return found_worker;
841}
842
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800843static void end_worker(grpc_pollset* pollset, grpc_pollset_worker* worker,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700844 grpc_pollset_worker** worker_hdl) {
yang-gdf92a642017-08-21 22:38:45 -0700845 GPR_TIMER_BEGIN("end_worker", 0);
ncteisen3cffe1f2017-11-10 13:56:23 -0800846 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700847 gpr_log(GPR_DEBUG, "PS:%p END_WORKER:%p", pollset, worker);
848 }
Craig Tiller4782d922017-11-10 09:53:21 -0800849 if (worker_hdl != nullptr) *worker_hdl = nullptr;
Craig Tiller830e82a2017-05-31 16:26:27 -0700850 /* Make sure we appear kicked */
Craig Tiller55624a32017-05-26 08:14:44 -0700851 SET_KICK_STATE(worker, KICKED);
Craig Tiller50da5ec2017-05-01 13:51:14 -0700852 grpc_closure_list_move(&worker->schedule_on_end_work,
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800853 grpc_core::ExecCtx::Get()->closure_list());
Craig Tiller8502ecb2017-04-28 14:22:01 -0700854 if (gpr_atm_no_barrier_load(&g_active_poller) == (gpr_atm)worker) {
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700855 if (worker->next != worker && worker->next->state == UNKICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -0800856 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700857 gpr_log(GPR_DEBUG, " .. choose next poller to be peer %p", worker);
858 }
Craig Tiller2acab6e2017-04-30 23:06:33 +0000859 GPR_ASSERT(worker->next->initialized_cv);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700860 gpr_atm_no_barrier_store(&g_active_poller, (gpr_atm)worker->next);
Craig Tiller55624a32017-05-26 08:14:44 -0700861 SET_KICK_STATE(worker->next, DESIGNATED_POLLER);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800862 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700863 gpr_cv_signal(&worker->next->cv);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800864 if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller8502ecb2017-04-28 14:22:01 -0700865 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800866 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller8502ecb2017-04-28 14:22:01 -0700867 gpr_mu_lock(&pollset->mu);
868 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700869 } else {
870 gpr_atm_no_barrier_store(&g_active_poller, 0);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700871 size_t poller_neighborhood_idx =
872 (size_t)(pollset->neighborhood - g_neighborhoods);
Craig Tillerbb742672017-05-17 22:19:05 +0000873 gpr_mu_unlock(&pollset->mu);
Craig Tiller32f90ee2017-04-28 12:46:41 -0700874 bool found_worker = false;
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700875 bool scan_state[MAX_NEIGHBORHOODS];
876 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerbaa14a92017-11-03 09:09:36 -0700877 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700878 &g_neighborhoods[(poller_neighborhood_idx + i) %
879 g_num_neighborhoods];
880 if (gpr_mu_trylock(&neighborhood->mu)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800881 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700882 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerba550da2017-05-01 14:26:31 +0000883 scan_state[i] = true;
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700884 } else {
Craig Tillerba550da2017-05-01 14:26:31 +0000885 scan_state[i] = false;
Craig Tiller32f90ee2017-04-28 12:46:41 -0700886 }
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700887 }
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700888 for (size_t i = 0; !found_worker && i < g_num_neighborhoods; i++) {
Craig Tillerba550da2017-05-01 14:26:31 +0000889 if (scan_state[i]) continue;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700890 pollset_neighborhood* neighborhood =
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700891 &g_neighborhoods[(poller_neighborhood_idx + i) %
892 g_num_neighborhoods];
893 gpr_mu_lock(&neighborhood->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800894 found_worker = check_neighborhood_for_available_poller(neighborhood);
Vijay Pai4b7ef4d2017-09-11 23:09:22 -0700895 gpr_mu_unlock(&neighborhood->mu);
Craig Tillerbbf4c7a2017-04-28 15:12:10 -0700896 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800897 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller32f90ee2017-04-28 12:46:41 -0700898 gpr_mu_lock(&pollset->mu);
899 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800900 } else if (grpc_core::ExecCtx::Get()->HasWork()) {
Craig Tiller50da5ec2017-05-01 13:51:14 -0700901 gpr_mu_unlock(&pollset->mu);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800902 grpc_core::ExecCtx::Get()->Flush();
Craig Tiller50da5ec2017-05-01 13:51:14 -0700903 gpr_mu_lock(&pollset->mu);
Craig Tiller4509c472017-04-27 19:05:13 +0000904 }
905 if (worker->initialized_cv) {
906 gpr_cv_destroy(&worker->cv);
907 }
ncteisen3cffe1f2017-11-10 13:56:23 -0800908 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700909 gpr_log(GPR_DEBUG, " .. remove worker");
910 }
Craig Tiller32f90ee2017-04-28 12:46:41 -0700911 if (EMPTIED == worker_remove(pollset, worker)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800912 pollset_maybe_finish_shutdown(pollset);
Craig Tiller4509c472017-04-27 19:05:13 +0000913 }
Craig Tillera4b8eb02017-04-29 00:13:52 +0000914 GPR_ASSERT(gpr_atm_no_barrier_load(&g_active_poller) != (gpr_atm)worker);
yang-gdf92a642017-08-21 22:38:45 -0700915 GPR_TIMER_END("end_worker", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000916}
917
918/* pollset->po.mu lock must be held by the caller before calling this.
919 The function pollset_work() may temporarily release the lock (pollset->po.mu)
920 during the course of its execution but it will always re-acquire the lock and
921 ensure that it is held by the time the function returns */
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800922static grpc_error* pollset_work(grpc_pollset* ps,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700923 grpc_pollset_worker** worker_hdl,
Craig Tiller20397792017-07-18 11:35:27 -0700924 grpc_millis deadline) {
Craig Tiller4509c472017-04-27 19:05:13 +0000925 grpc_pollset_worker worker;
Craig Tillerbaa14a92017-11-03 09:09:36 -0700926 grpc_error* error = GRPC_ERROR_NONE;
927 static const char* err_desc = "pollset_work";
yang-gdf92a642017-08-21 22:38:45 -0700928 GPR_TIMER_BEGIN("pollset_work", 0);
Sree Kuchibhotlab154cd12017-08-25 10:33:41 -0700929 if (ps->kicked_without_poller) {
930 ps->kicked_without_poller = false;
yang-gdf92a642017-08-21 22:38:45 -0700931 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000932 return GRPC_ERROR_NONE;
933 }
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700934
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800935 if (begin_worker(ps, &worker, worker_hdl, deadline)) {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700936 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000937 gpr_tls_set(&g_current_thread_worker, (intptr_t)&worker);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700938 GPR_ASSERT(!ps->shutting_down);
939 GPR_ASSERT(!ps->seen_inactive);
940
941 gpr_mu_unlock(&ps->mu); /* unlock */
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700942 /* This is the designated polling thread at this point and should ideally do
943 polling. However, if there are unprocessed events left from a previous
944 call to do_epoll_wait(), skip calling epoll_wait() in this iteration and
945 process the pending epoll events.
946
947 The reason for decoupling do_epoll_wait and process_epoll_events is to
948 better distrubute the work (i.e handling epoll events) across multiple
949 threads
950
951 process_epoll_events() returns very quickly: It just queues the work on
952 exec_ctx but does not execute it (the actual exectution or more
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800953 accurately grpc_core::ExecCtx::Get()->Flush() happens in end_worker()
954 AFTER selecting a designated poller). So we are not waiting long periods
955 without a designated poller */
Sree Kuchibhotlaa92a9cc2017-08-27 14:02:15 -0700956 if (gpr_atm_acq_load(&g_epoll_set.cursor) ==
957 gpr_atm_acq_load(&g_epoll_set.num_events)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800958 append_error(&error, do_epoll_wait(ps, deadline), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700959 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800960 append_error(&error, process_epoll_events(ps), err_desc);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700961
962 gpr_mu_lock(&ps->mu); /* lock */
963
Craig Tiller4509c472017-04-27 19:05:13 +0000964 gpr_tls_set(&g_current_thread_worker, 0);
Craig Tiller830e82a2017-05-31 16:26:27 -0700965 } else {
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700966 gpr_tls_set(&g_current_thread_pollset, (intptr_t)ps);
Craig Tiller4509c472017-04-27 19:05:13 +0000967 }
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800968 end_worker(ps, &worker, worker_hdl);
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -0700969
Craig Tiller8502ecb2017-04-28 14:22:01 -0700970 gpr_tls_set(&g_current_thread_pollset, 0);
yang-gdf92a642017-08-21 22:38:45 -0700971 GPR_TIMER_END("pollset_work", 0);
Craig Tiller4509c472017-04-27 19:05:13 +0000972 return error;
973}
974
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800975static grpc_error* pollset_kick(grpc_pollset* pollset,
Craig Tillerbaa14a92017-11-03 09:09:36 -0700976 grpc_pollset_worker* specific_worker) {
yang-gdf92a642017-08-21 22:38:45 -0700977 GPR_TIMER_BEGIN("pollset_kick", 0);
Yash Tibrewal8cf14702017-12-06 09:47:54 -0800978 GRPC_STATS_INC_POLLSET_KICK();
Craig Tillerbaa14a92017-11-03 09:09:36 -0700979 grpc_error* ret_err = GRPC_ERROR_NONE;
ncteisen3cffe1f2017-11-10 13:56:23 -0800980 if (grpc_polling_trace.enabled()) {
Craig Tillerb89bac02017-05-26 15:20:32 +0000981 gpr_strvec log;
982 gpr_strvec_init(&log);
Craig Tillerbaa14a92017-11-03 09:09:36 -0700983 char* tmp;
984 gpr_asprintf(&tmp, "PS:%p KICK:%p curps=%p curworker=%p root=%p", pollset,
985 specific_worker, (void*)gpr_tls_get(&g_current_thread_pollset),
986 (void*)gpr_tls_get(&g_current_thread_worker),
987 pollset->root_worker);
Craig Tillerb89bac02017-05-26 15:20:32 +0000988 gpr_strvec_add(&log, tmp);
Craig Tiller4782d922017-11-10 09:53:21 -0800989 if (pollset->root_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700990 gpr_asprintf(&tmp, " {kick_state=%s next=%p {kick_state=%s}}",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700991 kick_state_string(pollset->root_worker->state),
Craig Tiller830e82a2017-05-31 16:26:27 -0700992 pollset->root_worker->next,
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700993 kick_state_string(pollset->root_worker->next->state));
Craig Tillerb89bac02017-05-26 15:20:32 +0000994 gpr_strvec_add(&log, tmp);
995 }
Craig Tiller4782d922017-11-10 09:53:21 -0800996 if (specific_worker != nullptr) {
Craig Tiller830e82a2017-05-31 16:26:27 -0700997 gpr_asprintf(&tmp, " worker_kick_state=%s",
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -0700998 kick_state_string(specific_worker->state));
Craig Tillerb89bac02017-05-26 15:20:32 +0000999 gpr_strvec_add(&log, tmp);
1000 }
Craig Tiller4782d922017-11-10 09:53:21 -08001001 tmp = gpr_strvec_flatten(&log, nullptr);
Craig Tillerb89bac02017-05-26 15:20:32 +00001002 gpr_strvec_destroy(&log);
Craig Tiller830e82a2017-05-31 16:26:27 -07001003 gpr_log(GPR_ERROR, "%s", tmp);
Craig Tillerb89bac02017-05-26 15:20:32 +00001004 gpr_free(tmp);
1005 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001006
Craig Tiller4782d922017-11-10 09:53:21 -08001007 if (specific_worker == nullptr) {
Craig Tiller4509c472017-04-27 19:05:13 +00001008 if (gpr_tls_get(&g_current_thread_pollset) != (intptr_t)pollset) {
Craig Tillerbaa14a92017-11-03 09:09:36 -07001009 grpc_pollset_worker* root_worker = pollset->root_worker;
Craig Tiller4782d922017-11-10 09:53:21 -08001010 if (root_worker == nullptr) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001011 GRPC_STATS_INC_POLLSET_KICKED_WITHOUT_POLLER();
Craig Tiller4509c472017-04-27 19:05:13 +00001012 pollset->kicked_without_poller = true;
ncteisen3cffe1f2017-11-10 13:56:23 -08001013 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001014 gpr_log(GPR_ERROR, " .. kicked_without_poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001015 }
yang-gdf92a642017-08-21 22:38:45 -07001016 goto done;
Craig Tiller375eb252017-04-27 23:29:12 +00001017 }
Craig Tillerbaa14a92017-11-03 09:09:36 -07001018 grpc_pollset_worker* next_worker = root_worker->next;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001019 if (root_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001020 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001021 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001022 gpr_log(GPR_ERROR, " .. already kicked %p", root_worker);
1023 }
1024 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001025 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001026 } else if (next_worker->state == KICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001027 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001028 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001029 gpr_log(GPR_ERROR, " .. already kicked %p", next_worker);
1030 }
1031 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001032 goto done;
Craig Tiller830e82a2017-05-31 16:26:27 -07001033 } else if (root_worker ==
1034 next_worker && // only try and wake up a poller if
1035 // there is no next worker
Craig Tillerbaa14a92017-11-03 09:09:36 -07001036 root_worker == (grpc_pollset_worker*)gpr_atm_no_barrier_load(
Craig Tiller830e82a2017-05-31 16:26:27 -07001037 &g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001038 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001039 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001040 gpr_log(GPR_ERROR, " .. kicked %p", root_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001041 }
Craig Tiller55624a32017-05-26 08:14:44 -07001042 SET_KICK_STATE(root_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001043 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1044 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001045 } else if (next_worker->state == UNKICKED) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001046 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001047 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001048 gpr_log(GPR_ERROR, " .. kicked %p", next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001049 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001050 GPR_ASSERT(next_worker->initialized_cv);
Craig Tiller55624a32017-05-26 08:14:44 -07001051 SET_KICK_STATE(next_worker, KICKED);
Craig Tiller375eb252017-04-27 23:29:12 +00001052 gpr_cv_signal(&next_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001053 goto done;
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001054 } else if (next_worker->state == DESIGNATED_POLLER) {
1055 if (root_worker->state != DESIGNATED_POLLER) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001056 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001057 gpr_log(
1058 GPR_ERROR,
1059 " .. kicked root non-poller %p (initialized_cv=%d) (poller=%p)",
1060 root_worker, root_worker->initialized_cv, next_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001061 }
Craig Tiller55624a32017-05-26 08:14:44 -07001062 SET_KICK_STATE(root_worker, KICKED);
1063 if (root_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001064 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
Craig Tiller55624a32017-05-26 08:14:44 -07001065 gpr_cv_signal(&root_worker->cv);
1066 }
yang-gdf92a642017-08-21 22:38:45 -07001067 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001068 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001069 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001070 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001071 gpr_log(GPR_ERROR, " .. non-root poller %p (root=%p)", next_worker,
Craig Tiller75aef7f2017-05-26 08:26:08 -07001072 root_worker);
1073 }
Craig Tiller55624a32017-05-26 08:14:44 -07001074 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001075 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1076 goto done;
Craig Tiller55624a32017-05-26 08:14:44 -07001077 }
Craig Tiller8502ecb2017-04-28 14:22:01 -07001078 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001079 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001080 GPR_ASSERT(next_worker->state == KICKED);
Craig Tiller55624a32017-05-26 08:14:44 -07001081 SET_KICK_STATE(next_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001082 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001083 }
1084 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001085 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001086 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001087 gpr_log(GPR_ERROR, " .. kicked while waking up");
1088 }
yang-gdf92a642017-08-21 22:38:45 -07001089 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001090 }
Sree Kuchibhotlafb349402017-09-06 10:58:06 -07001091
1092 GPR_UNREACHABLE_CODE(goto done);
1093 }
1094
Yash Tibrewalb2a54ac2017-09-13 10:18:07 -07001095 if (specific_worker->state == KICKED) {
ncteisen3cffe1f2017-11-10 13:56:23 -08001096 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001097 gpr_log(GPR_ERROR, " .. specific worker already kicked");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001098 }
yang-gdf92a642017-08-21 22:38:45 -07001099 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001100 } else if (gpr_tls_get(&g_current_thread_worker) ==
1101 (intptr_t)specific_worker) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001102 GRPC_STATS_INC_POLLSET_KICK_OWN_THREAD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001103 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001104 gpr_log(GPR_ERROR, " .. mark %p kicked", specific_worker);
Craig Tiller75aef7f2017-05-26 08:26:08 -07001105 }
Craig Tiller55624a32017-05-26 08:14:44 -07001106 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001107 goto done;
Craig Tiller32f90ee2017-04-28 12:46:41 -07001108 } else if (specific_worker ==
Craig Tillerbaa14a92017-11-03 09:09:36 -07001109 (grpc_pollset_worker*)gpr_atm_no_barrier_load(&g_active_poller)) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001110 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_FD();
ncteisen3cffe1f2017-11-10 13:56:23 -08001111 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001112 gpr_log(GPR_ERROR, " .. kick active poller");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001113 }
Craig Tiller55624a32017-05-26 08:14:44 -07001114 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001115 ret_err = grpc_wakeup_fd_wakeup(&global_wakeup_fd);
1116 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001117 } else if (specific_worker->initialized_cv) {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001118 GRPC_STATS_INC_POLLSET_KICK_WAKEUP_CV();
ncteisen3cffe1f2017-11-10 13:56:23 -08001119 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001120 gpr_log(GPR_ERROR, " .. kick waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001121 }
Craig Tiller55624a32017-05-26 08:14:44 -07001122 SET_KICK_STATE(specific_worker, KICKED);
Craig Tiller4509c472017-04-27 19:05:13 +00001123 gpr_cv_signal(&specific_worker->cv);
yang-gdf92a642017-08-21 22:38:45 -07001124 goto done;
Craig Tiller8502ecb2017-04-28 14:22:01 -07001125 } else {
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001126 GRPC_STATS_INC_POLLSET_KICKED_AGAIN();
ncteisen3cffe1f2017-11-10 13:56:23 -08001127 if (grpc_polling_trace.enabled()) {
Craig Tiller830e82a2017-05-31 16:26:27 -07001128 gpr_log(GPR_ERROR, " .. kick non-waiting worker");
Craig Tiller75aef7f2017-05-26 08:26:08 -07001129 }
Craig Tiller55624a32017-05-26 08:14:44 -07001130 SET_KICK_STATE(specific_worker, KICKED);
yang-gdf92a642017-08-21 22:38:45 -07001131 goto done;
Craig Tiller4509c472017-04-27 19:05:13 +00001132 }
yang-gdf92a642017-08-21 22:38:45 -07001133done:
1134 GPR_TIMER_END("pollset_kick", 0);
1135 return ret_err;
Craig Tiller4509c472017-04-27 19:05:13 +00001136}
1137
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001138static void pollset_add_fd(grpc_pollset* pollset, grpc_fd* fd) {}
Craig Tiller4509c472017-04-27 19:05:13 +00001139
Craig Tiller4509c472017-04-27 19:05:13 +00001140/*******************************************************************************
Craig Tillerc67cc992017-04-27 10:15:51 -07001141 * Pollset-set Definitions
1142 */
1143
Craig Tillerbaa14a92017-11-03 09:09:36 -07001144static grpc_pollset_set* pollset_set_create(void) {
1145 return (grpc_pollset_set*)((intptr_t)0xdeafbeef);
Craig Tillerc67cc992017-04-27 10:15:51 -07001146}
1147
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001148static void pollset_set_destroy(grpc_pollset_set* pss) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001149
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001150static void pollset_set_add_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001151
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001152static void pollset_set_del_fd(grpc_pollset_set* pss, grpc_fd* fd) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001153
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001154static void pollset_set_add_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001155
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001156static void pollset_set_del_pollset(grpc_pollset_set* pss, grpc_pollset* ps) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001157
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001158static void pollset_set_add_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001159 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001160
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001161static void pollset_set_del_pollset_set(grpc_pollset_set* bag,
Craig Tillerbaa14a92017-11-03 09:09:36 -07001162 grpc_pollset_set* item) {}
Craig Tillerc67cc992017-04-27 10:15:51 -07001163
1164/*******************************************************************************
1165 * Event engine binding
1166 */
1167
1168static void shutdown_engine(void) {
1169 fd_global_shutdown();
1170 pollset_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001171 epoll_set_shutdown();
Craig Tillerc67cc992017-04-27 10:15:51 -07001172}
1173
1174static const grpc_event_engine_vtable vtable = {
Yash Tibrewal533d1182017-09-18 10:48:22 -07001175 sizeof(grpc_pollset),
Craig Tillerc67cc992017-04-27 10:15:51 -07001176
Yash Tibrewal533d1182017-09-18 10:48:22 -07001177 fd_create,
1178 fd_wrapped_fd,
1179 fd_orphan,
1180 fd_shutdown,
1181 fd_notify_on_read,
1182 fd_notify_on_write,
1183 fd_is_shutdown,
1184 fd_get_read_notifier_pollset,
Craig Tillerc67cc992017-04-27 10:15:51 -07001185
Yash Tibrewal533d1182017-09-18 10:48:22 -07001186 pollset_init,
1187 pollset_shutdown,
1188 pollset_destroy,
1189 pollset_work,
1190 pollset_kick,
1191 pollset_add_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001192
Yash Tibrewal533d1182017-09-18 10:48:22 -07001193 pollset_set_create,
1194 pollset_set_destroy,
1195 pollset_set_add_pollset,
1196 pollset_set_del_pollset,
1197 pollset_set_add_pollset_set,
1198 pollset_set_del_pollset_set,
1199 pollset_set_add_fd,
1200 pollset_set_del_fd,
Craig Tillerc67cc992017-04-27 10:15:51 -07001201
Yash Tibrewal533d1182017-09-18 10:48:22 -07001202 shutdown_engine,
Craig Tillerc67cc992017-04-27 10:15:51 -07001203};
1204
1205/* It is possible that GLIBC has epoll but the underlying kernel doesn't.
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001206 * Create epoll_fd (epoll_set_init() takes care of that) to make sure epoll
1207 * support is available */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001208const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
Craig Tillerc67cc992017-04-27 10:15:51 -07001209 if (!grpc_has_wakeup_fd()) {
yang-g30101b02017-11-06 14:35:30 -08001210 gpr_log(GPR_ERROR, "Skipping epoll1 because of no wakeup fd.");
Craig Tiller4782d922017-11-10 09:53:21 -08001211 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001212 }
1213
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001214 if (!epoll_set_init()) {
Craig Tiller4782d922017-11-10 09:53:21 -08001215 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001216 }
1217
Craig Tillerc67cc992017-04-27 10:15:51 -07001218 fd_global_init();
1219
1220 if (!GRPC_LOG_IF_ERROR("pollset_global_init", pollset_global_init())) {
Craig Tiller4509c472017-04-27 19:05:13 +00001221 fd_global_shutdown();
Sree Kuchibhotla5efc9132017-08-17 14:10:38 -07001222 epoll_set_shutdown();
Craig Tiller4782d922017-11-10 09:53:21 -08001223 return nullptr;
Craig Tillerc67cc992017-04-27 10:15:51 -07001224 }
1225
1226 return &vtable;
1227}
1228
1229#else /* defined(GRPC_LINUX_EPOLL) */
1230#if defined(GRPC_POSIX_SOCKET)
Yash Tibrewal1cac2232017-09-26 11:31:11 -07001231#include "src/core/lib/iomgr/ev_epoll1_linux.h"
Craig Tillerc67cc992017-04-27 10:15:51 -07001232/* If GRPC_LINUX_EPOLL is not defined, it means epoll is not available. Return
1233 * NULL */
Craig Tillerbaa14a92017-11-03 09:09:36 -07001234const grpc_event_engine_vtable* grpc_init_epoll1_linux(bool explicit_request) {
yang-g30101b02017-11-06 14:35:30 -08001235 gpr_log(GPR_ERROR,
yang-g56e72572017-11-06 15:54:48 -08001236 "Skipping epoll1 becuase GRPC_LINUX_EPOLL is not defined.");
Yash Tibrewal8cf14702017-12-06 09:47:54 -08001237 return nullptr;
Craig Tiller9ddb3152017-04-27 21:32:56 +00001238}
Craig Tillerc67cc992017-04-27 10:15:51 -07001239#endif /* defined(GRPC_POSIX_SOCKET) */
1240#endif /* !defined(GRPC_LINUX_EPOLL) */