blob: 2ac1866a66293de2963752b295d378aa7660b550 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
ctiller58393c22015-01-07 14:03:30 -080044#include <grpc/support/alloc.h>
45#include <grpc/support/log.h>
46#include <grpc/support/useful.h>
47
Craig Tillerf95e37f2015-02-18 15:15:29 -080048enum descriptor_state {
49 NOT_READY = 0,
50 READY = 1
51}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080052
David Klempnerd1785242015-01-28 17:00:21 -080053/* We need to keep a freelist not because of any concerns of malloc performance
54 * but instead so that implementations with multiple threads in (for example)
55 * epoll_wait deal with the race between pollset removal and incoming poll
56 * notifications.
57 *
58 * The problem is that the poller ultimately holds a reference to this
59 * object, so it is very difficult to know when is safe to free it, at least
60 * without some expensive synchronization.
61 *
62 * If we keep the object freelisted, in the worst case losing this race just
63 * becomes a spurious read notification on a reused fd.
64 */
65/* TODO(klempner): We could use some form of polling generation count to know
66 * when these are safe to free. */
67/* TODO(klempner): Consider disabling freelisting if we don't have multiple
68 * threads in poll on the same fd */
69/* TODO(klempner): Batch these allocations to reduce fragmentation */
70static grpc_fd *fd_freelist = NULL;
71static gpr_mu fd_freelist_mu;
72
73static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080074 gpr_mu_lock(&fd_freelist_mu);
75 fd->freelist_next = fd_freelist;
76 fd_freelist = fd;
77 gpr_mu_unlock(&fd_freelist_mu);
78}
79
80static grpc_fd *alloc_fd(int fd) {
81 grpc_fd *r = NULL;
82 gpr_mu_lock(&fd_freelist_mu);
83 if (fd_freelist != NULL) {
84 r = fd_freelist;
85 fd_freelist = fd_freelist->freelist_next;
86 }
87 gpr_mu_unlock(&fd_freelist_mu);
88 if (r == NULL) {
89 r = gpr_malloc(sizeof(grpc_fd));
90 gpr_mu_init(&r->set_state_mu);
91 gpr_mu_init(&r->watcher_mu);
92 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -070093
David Klempnerd1785242015-01-28 17:00:21 -080094 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080095 gpr_atm_rel_store(&r->readst, NOT_READY);
96 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080097 gpr_atm_rel_store(&r->shutdown, 0);
98 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -070099 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
100 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800101 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700102 r->read_watcher = r->write_watcher = NULL;
David Klempnerd1785242015-01-28 17:00:21 -0800103 return r;
104}
105
106static void destroy(grpc_fd *fd) {
107 gpr_mu_destroy(&fd->set_state_mu);
108 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800109 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800110}
111
Craig Tiller9ae76972015-05-31 13:58:24 -0700112#ifdef GRPC_FD_REF_COUNT_DEBUG
113#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
114#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
115static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) {
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700116 gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700117 gpr_atm_no_barrier_load(&fd->refst),
118 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700119#else
120#define REF_BY(fd, n, reason) ref_by(fd, n)
121#define UNREF_BY(fd, n, reason) unref_by(fd, n)
ctiller58393c22015-01-07 14:03:30 -0800122static void ref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700123#endif
Craig Tiller23139ae2015-02-17 15:46:13 -0800124 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800125}
126
Craig Tiller9ae76972015-05-31 13:58:24 -0700127#ifdef GRPC_FD_REF_COUNT_DEBUG
128static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) {
129 gpr_atm old;
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700130 gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700131 gpr_atm_no_barrier_load(&fd->refst),
132 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700133#else
ctiller58393c22015-01-07 14:03:30 -0800134static void unref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700135 gpr_atm old;
136#endif
137 old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller23139ae2015-02-17 15:46:13 -0800138 if (old == n) {
Craig Tiller0317b3d2015-06-01 21:57:03 -0700139 if (fd->on_done_closure) {
140 grpc_iomgr_add_callback(fd->on_done_closure);
141 }
Craig Tillerfa275a92015-06-01 13:55:54 -0700142 grpc_iomgr_unregister_object(&fd->iomgr_object);
Craig Tiller6f432162015-06-02 12:51:43 -0700143 freelist_fd(fd);
Craig Tiller23139ae2015-02-17 15:46:13 -0800144 } else {
145 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800146 }
147}
148
Craig Tiller7d413212015-02-09 08:00:02 -0800149void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800150
151void grpc_fd_global_shutdown(void) {
152 while (fd_freelist != NULL) {
153 grpc_fd *fd = fd_freelist;
154 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800155 destroy(fd);
156 }
David Klempnerd1785242015-01-28 17:00:21 -0800157 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800158}
159
Craig Tillerfa275a92015-06-01 13:55:54 -0700160grpc_fd *grpc_fd_create(int fd, const char *name) {
David Klempnerd1785242015-01-28 17:00:21 -0800161 grpc_fd *r = alloc_fd(fd);
Craig Tillerfa275a92015-06-01 13:55:54 -0700162 grpc_iomgr_register_object(&r->iomgr_object, name);
ctiller58393c22015-01-07 14:03:30 -0800163 return r;
164}
165
166int grpc_fd_is_orphaned(grpc_fd *fd) {
167 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
168}
169
Craig Tiller886d7ec2015-05-14 16:18:42 -0700170static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700171 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
172 grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
173 } else if (fd->read_watcher) {
174 grpc_pollset_force_kick(fd->read_watcher->pollset);
175 } else if (fd->write_watcher) {
176 grpc_pollset_force_kick(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700177 }
178}
179
180static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800181 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700182 maybe_wake_one_watcher_locked(fd);
183 gpr_mu_unlock(&fd->watcher_mu);
184}
185
Craig Tillerc95de722015-05-29 08:56:46 -0700186static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700187 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700188 for (watcher = fd->inactive_watcher_root.next;
189 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller7d413212015-02-09 08:00:02 -0800190 grpc_pollset_force_kick(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800191 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700192 if (fd->read_watcher) {
193 grpc_pollset_force_kick(fd->read_watcher->pollset);
194 }
195 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
196 grpc_pollset_force_kick(fd->write_watcher->pollset);
197 }
ctiller58393c22015-01-07 14:03:30 -0800198}
199
Craig Tiller4b678bd2015-06-02 16:12:24 -0700200void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
201 const char *reason) {
Craig Tiller0317b3d2015-06-01 21:57:03 -0700202 fd->on_done_closure = on_done;
David Klempnerc6bccc22015-02-24 17:33:05 -0800203 shutdown(fd->fd, SHUT_RDWR);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700204 REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700205 gpr_mu_lock(&fd->watcher_mu);
Craig Tillerc95de722015-05-29 08:56:46 -0700206 wake_all_watchers_locked(fd);
Craig Tiller017912d2015-05-29 07:05:13 -0700207 gpr_mu_unlock(&fd->watcher_mu);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700208 UNREF_BY(fd, 2, reason); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800209}
210
211/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700212#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller0317b3d2015-06-01 21:57:03 -0700213void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
214 ref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700215}
ctiller58393c22015-01-07 14:03:30 -0800216
Craig Tiller0317b3d2015-06-01 21:57:03 -0700217void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file,
218 int line) {
219 unref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700220}
221#else
Craig Tiller0317b3d2015-06-01 21:57:03 -0700222void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700223
Craig Tiller0317b3d2015-06-01 21:57:03 -0700224void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700225#endif
ctiller58393c22015-01-07 14:03:30 -0800226
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700227static void process_callback(grpc_iomgr_closure *closure, int success,
ctiller58393c22015-01-07 14:03:30 -0800228 int allow_synchronous_callback) {
229 if (allow_synchronous_callback) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700230 closure->cb(closure->cb_arg, success);
ctiller58393c22015-01-07 14:03:30 -0800231 } else {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700232 grpc_iomgr_add_delayed_callback(closure, success);
ctiller58393c22015-01-07 14:03:30 -0800233 }
234}
235
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700236static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
237 int success, int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800238 size_t i;
239 for (i = 0; i < n; i++) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700240 process_callback(callbacks + i, success, allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800241 }
242}
243
Craig Tillerf95e37f2015-02-18 15:15:29 -0800244static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
245 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800246 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800247 case NOT_READY:
248 /* There is no race if the descriptor is already ready, so we skip
249 the interlocked op in that case. As long as the app doesn't
250 try to set the same upcall twice (which it shouldn't) then
251 oldval should never be anything other than READY or NOT_READY. We
252 don't
253 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800254 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800255 /* swap was successful -- the closure will run after the next
256 set_ready call. NOTE: we don't have an ABA problem here,
257 since we should never have concurrent calls to the same
258 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700259 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800260 return;
261 }
262 /* swap was unsuccessful due to an intervening set_ready call.
263 Fall through to the READY code below */
264 case READY:
David Garcia Quintas07997b62015-05-28 13:51:50 -0700265 GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800266 gpr_atm_rel_store(st, NOT_READY);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700267 process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
ctiller58393c22015-01-07 14:03:30 -0800268 allow_synchronous_callback);
269 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800270 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800271 /* upcallptr was set to a different closure. This is an error! */
272 gpr_log(GPR_ERROR,
273 "User called a notify_on function with a previous callback still "
274 "pending");
275 abort();
276 }
277 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
278 abort();
279}
280
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700281static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
ctiller58393c22015-01-07 14:03:30 -0800282 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800283 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800284
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800285 switch (state) {
286 case READY:
287 /* duplicate ready, ignore */
288 return;
ctiller58393c22015-01-07 14:03:30 -0800289 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800290 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800291 /* swap was successful -- the closure will run after the next
292 notify_on call. */
293 return;
294 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800295 /* swap was unsuccessful due to an intervening set_ready call.
296 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800297 state = gpr_atm_acq_load(st);
298 default: /* waiting */
David Garcia Quintas07997b62015-05-28 13:51:50 -0700299 GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
300 gpr_atm_no_barrier_load(st) != NOT_READY);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700301 callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800302 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800303 return;
304 }
305}
306
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800307static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800308 int allow_synchronous_callback) {
309 /* only one set_ready can be active at once (but there may be a racing
310 notify_on) */
311 int success;
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700312 grpc_iomgr_closure* closure;
ctiller58393c22015-01-07 14:03:30 -0800313 size_t ncb = 0;
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700314
ctiller58393c22015-01-07 14:03:30 -0800315 gpr_mu_lock(&fd->set_state_mu);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700316 set_ready_locked(st, &closure, &ncb);
ctiller58393c22015-01-07 14:03:30 -0800317 gpr_mu_unlock(&fd->set_state_mu);
318 success = !gpr_atm_acq_load(&fd->shutdown);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700319 GPR_ASSERT(ncb <= 1);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700320 if (ncb > 0) {
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700321 process_callbacks(closure, ncb, success, allow_synchronous_callback);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700322 }
ctiller58393c22015-01-07 14:03:30 -0800323}
324
325void grpc_fd_shutdown(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800326 size_t ncb = 0;
327 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700328 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800329 gpr_atm_rel_store(&fd->shutdown, 1);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700330 set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
331 set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
ctiller58393c22015-01-07 14:03:30 -0800332 gpr_mu_unlock(&fd->set_state_mu);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700333 GPR_ASSERT(ncb <= 2);
David Garcia Quintas1c762bd2015-05-31 17:04:43 -0700334 process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
335 0 /* GPR_FALSE */);
ctiller58393c22015-01-07 14:03:30 -0800336}
337
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800338void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
339 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800340}
341
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800342void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
343 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800344}
345
346gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800347 gpr_uint32 read_mask, gpr_uint32 write_mask,
348 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700349 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800350 /* keep track of pollers that have requested our events, in case they change
351 */
Craig Tiller9ae76972015-05-31 13:58:24 -0700352 GRPC_FD_REF(fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800353
ctiller58393c22015-01-07 14:03:30 -0800354 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700355 /* if there is nobody polling for read, but we need to, then start doing so */
356 if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
357 fd->read_watcher = watcher;
358 mask |= read_mask;
359 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700360 /* if there is nobody polling for write, but we need to, then start doing so
361 */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700362 if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
363 fd->write_watcher = watcher;
364 mask |= write_mask;
365 }
366 /* if not polling, remember this watcher in case we need someone to later */
367 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700368 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700369 watcher->prev = watcher->next->prev;
370 watcher->next->prev = watcher->prev->next = watcher;
371 }
Craig Tiller7d413212015-02-09 08:00:02 -0800372 watcher->pollset = pollset;
373 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800374 gpr_mu_unlock(&fd->watcher_mu);
375
Craig Tiller886d7ec2015-05-14 16:18:42 -0700376 return mask;
ctiller58393c22015-01-07 14:03:30 -0800377}
378
Craig Tiller886d7ec2015-05-14 16:18:42 -0700379void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
380 int was_polling = 0;
381 int kick = 0;
382 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800383
Craig Tiller886d7ec2015-05-14 16:18:42 -0700384 gpr_mu_lock(&fd->watcher_mu);
385 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700386 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700387 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700388 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700389 fd->read_watcher = NULL;
390 }
391 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700392 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700393 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700394 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700395 fd->write_watcher = NULL;
396 }
397 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700398 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700399 watcher->next->prev = watcher->prev;
400 watcher->prev->next = watcher->next;
401 }
402 if (kick) {
403 maybe_wake_one_watcher_locked(fd);
404 }
405 gpr_mu_unlock(&fd->watcher_mu);
406
Craig Tiller9ae76972015-05-31 13:58:24 -0700407 GRPC_FD_UNREF(fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800408}
409
410void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
411 set_ready(fd, &fd->readst, allow_synchronous_callback);
412}
413
414void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
415 set_ready(fd, &fd->writest, allow_synchronous_callback);
416}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800417
Craig Tiller190d3602015-02-18 09:23:38 -0800418#endif