blob: 25da3979e995ac1ec181c65f064b1dd84ee02056 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
ctiller58393c22015-01-07 14:03:30 -080044#include <grpc/support/alloc.h>
45#include <grpc/support/log.h>
46#include <grpc/support/useful.h>
47
Craig Tillerf95e37f2015-02-18 15:15:29 -080048enum descriptor_state {
49 NOT_READY = 0,
50 READY = 1
51}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080052
David Klempnerd1785242015-01-28 17:00:21 -080053/* We need to keep a freelist not because of any concerns of malloc performance
54 * but instead so that implementations with multiple threads in (for example)
55 * epoll_wait deal with the race between pollset removal and incoming poll
56 * notifications.
57 *
58 * The problem is that the poller ultimately holds a reference to this
59 * object, so it is very difficult to know when is safe to free it, at least
60 * without some expensive synchronization.
61 *
62 * If we keep the object freelisted, in the worst case losing this race just
63 * becomes a spurious read notification on a reused fd.
64 */
65/* TODO(klempner): We could use some form of polling generation count to know
66 * when these are safe to free. */
67/* TODO(klempner): Consider disabling freelisting if we don't have multiple
68 * threads in poll on the same fd */
69/* TODO(klempner): Batch these allocations to reduce fragmentation */
70static grpc_fd *fd_freelist = NULL;
71static gpr_mu fd_freelist_mu;
72
73static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080074 gpr_mu_lock(&fd_freelist_mu);
75 fd->freelist_next = fd_freelist;
76 fd_freelist = fd;
Craig Tillerb1f220d2015-07-01 13:54:28 -070077 grpc_iomgr_unregister_object(&fd->iomgr_object);
David Klempnerd1785242015-01-28 17:00:21 -080078 gpr_mu_unlock(&fd_freelist_mu);
79}
80
81static grpc_fd *alloc_fd(int fd) {
82 grpc_fd *r = NULL;
83 gpr_mu_lock(&fd_freelist_mu);
84 if (fd_freelist != NULL) {
85 r = fd_freelist;
86 fd_freelist = fd_freelist->freelist_next;
87 }
88 gpr_mu_unlock(&fd_freelist_mu);
89 if (r == NULL) {
90 r = gpr_malloc(sizeof(grpc_fd));
91 gpr_mu_init(&r->set_state_mu);
92 gpr_mu_init(&r->watcher_mu);
93 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -070094
David Klempnerd1785242015-01-28 17:00:21 -080095 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080096 gpr_atm_rel_store(&r->readst, NOT_READY);
97 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080098 gpr_atm_rel_store(&r->shutdown, 0);
99 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700100 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
101 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800102 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700103 r->read_watcher = r->write_watcher = NULL;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700104 r->on_done_closure = NULL;
David Klempnerd1785242015-01-28 17:00:21 -0800105 return r;
106}
107
108static void destroy(grpc_fd *fd) {
109 gpr_mu_destroy(&fd->set_state_mu);
110 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800111 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800112}
113
Craig Tiller9ae76972015-05-31 13:58:24 -0700114#ifdef GRPC_FD_REF_COUNT_DEBUG
115#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
116#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
Craig Tiller8674cb12015-06-05 07:09:25 -0700117static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
118 int line) {
Craig Tiller87cc0842015-06-30 08:15:55 -0700119 gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700120 gpr_atm_no_barrier_load(&fd->refst),
121 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700122#else
123#define REF_BY(fd, n, reason) ref_by(fd, n)
124#define UNREF_BY(fd, n, reason) unref_by(fd, n)
ctiller58393c22015-01-07 14:03:30 -0800125static void ref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700126#endif
Craig Tiller23139ae2015-02-17 15:46:13 -0800127 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800128}
129
Craig Tiller9ae76972015-05-31 13:58:24 -0700130#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller8674cb12015-06-05 07:09:25 -0700131static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
132 int line) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700133 gpr_atm old;
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700134 gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700135 gpr_atm_no_barrier_load(&fd->refst),
136 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700137#else
ctiller58393c22015-01-07 14:03:30 -0800138static void unref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700139 gpr_atm old;
140#endif
141 old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller23139ae2015-02-17 15:46:13 -0800142 if (old == n) {
Craig Tiller6f432162015-06-02 12:51:43 -0700143 freelist_fd(fd);
Craig Tiller23139ae2015-02-17 15:46:13 -0800144 } else {
145 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800146 }
147}
148
Craig Tiller7d413212015-02-09 08:00:02 -0800149void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800150
151void grpc_fd_global_shutdown(void) {
152 while (fd_freelist != NULL) {
153 grpc_fd *fd = fd_freelist;
154 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800155 destroy(fd);
156 }
David Klempnerd1785242015-01-28 17:00:21 -0800157 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800158}
159
Craig Tillerfa275a92015-06-01 13:55:54 -0700160grpc_fd *grpc_fd_create(int fd, const char *name) {
David Klempnerd1785242015-01-28 17:00:21 -0800161 grpc_fd *r = alloc_fd(fd);
Craig Tillerfa275a92015-06-01 13:55:54 -0700162 grpc_iomgr_register_object(&r->iomgr_object, name);
ctiller58393c22015-01-07 14:03:30 -0800163 return r;
164}
165
166int grpc_fd_is_orphaned(grpc_fd *fd) {
167 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
168}
169
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700170static void pollset_kick_locked(grpc_pollset *pollset) {
171 gpr_mu_lock(GRPC_POLLSET_MU(pollset));
172 grpc_pollset_kick(pollset, NULL);
173 gpr_mu_unlock(GRPC_POLLSET_MU(pollset));
174}
175
Craig Tiller886d7ec2015-05-14 16:18:42 -0700176static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700177 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700178 pollset_kick_locked(fd->inactive_watcher_root.next->pollset);
Craig Tiller354bf6d2015-05-18 10:18:03 -0700179 } else if (fd->read_watcher) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700180 pollset_kick_locked(fd->read_watcher->pollset);
Craig Tiller354bf6d2015-05-18 10:18:03 -0700181 } else if (fd->write_watcher) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700182 pollset_kick_locked(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700183 }
184}
185
186static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800187 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700188 maybe_wake_one_watcher_locked(fd);
189 gpr_mu_unlock(&fd->watcher_mu);
190}
191
Craig Tillerc95de722015-05-29 08:56:46 -0700192static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700193 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700194 for (watcher = fd->inactive_watcher_root.next;
195 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700196 pollset_kick_locked(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800197 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700198 if (fd->read_watcher) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700199 pollset_kick_locked(fd->read_watcher->pollset);
Craig Tiller354bf6d2015-05-18 10:18:03 -0700200 }
201 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700202 pollset_kick_locked(fd->write_watcher->pollset);
Craig Tiller354bf6d2015-05-18 10:18:03 -0700203 }
ctiller58393c22015-01-07 14:03:30 -0800204}
205
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700206static int has_watchers(grpc_fd *fd) {
Craig Tiller079a11b2015-06-30 10:07:15 -0700207 return fd->read_watcher != NULL || fd->write_watcher != NULL ||
208 fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700209}
210
Craig Tiller4b678bd2015-06-02 16:12:24 -0700211void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
212 const char *reason) {
Craig Tiller0317b3d2015-06-01 21:57:03 -0700213 fd->on_done_closure = on_done;
David Klempnerc6bccc22015-02-24 17:33:05 -0800214 shutdown(fd->fd, SHUT_RDWR);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700215 REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700216 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700217 if (!has_watchers(fd)) {
Craig Tilleree98d842015-06-29 08:45:55 -0700218 close(fd->fd);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700219 if (fd->on_done_closure) {
220 grpc_iomgr_add_callback(fd->on_done_closure);
221 }
222 } else {
223 wake_all_watchers_locked(fd);
224 }
Craig Tiller017912d2015-05-29 07:05:13 -0700225 gpr_mu_unlock(&fd->watcher_mu);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700226 UNREF_BY(fd, 2, reason); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800227}
228
229/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700230#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller0317b3d2015-06-01 21:57:03 -0700231void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
232 ref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700233}
ctiller58393c22015-01-07 14:03:30 -0800234
Craig Tiller0317b3d2015-06-01 21:57:03 -0700235void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file,
236 int line) {
237 unref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700238}
239#else
Craig Tiller0317b3d2015-06-01 21:57:03 -0700240void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700241
Craig Tiller0317b3d2015-06-01 21:57:03 -0700242void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700243#endif
ctiller58393c22015-01-07 14:03:30 -0800244
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700245static void process_callback(grpc_iomgr_closure *closure, int success,
Craig Tiller8674cb12015-06-05 07:09:25 -0700246 int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800247 if (allow_synchronous_callback) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700248 closure->cb(closure->cb_arg, success);
ctiller58393c22015-01-07 14:03:30 -0800249 } else {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700250 grpc_iomgr_add_delayed_callback(closure, success);
ctiller58393c22015-01-07 14:03:30 -0800251 }
252}
253
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700254static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
255 int success, int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800256 size_t i;
257 for (i = 0; i < n; i++) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700258 process_callback(callbacks + i, success, allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800259 }
260}
261
Craig Tillerf95e37f2015-02-18 15:15:29 -0800262static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
263 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800264 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800265 case NOT_READY:
266 /* There is no race if the descriptor is already ready, so we skip
267 the interlocked op in that case. As long as the app doesn't
268 try to set the same upcall twice (which it shouldn't) then
269 oldval should never be anything other than READY or NOT_READY. We
270 don't
271 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800272 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800273 /* swap was successful -- the closure will run after the next
274 set_ready call. NOTE: we don't have an ABA problem here,
275 since we should never have concurrent calls to the same
276 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700277 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800278 return;
279 }
280 /* swap was unsuccessful due to an intervening set_ready call.
281 Fall through to the READY code below */
282 case READY:
David Garcia Quintas07997b62015-05-28 13:51:50 -0700283 GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800284 gpr_atm_rel_store(st, NOT_READY);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700285 process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
Craig Tiller8674cb12015-06-05 07:09:25 -0700286 allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800287 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800288 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800289 /* upcallptr was set to a different closure. This is an error! */
290 gpr_log(GPR_ERROR,
291 "User called a notify_on function with a previous callback still "
292 "pending");
293 abort();
294 }
295 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
296 abort();
297}
298
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700299static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
ctiller58393c22015-01-07 14:03:30 -0800300 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800301 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800302
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800303 switch (state) {
304 case READY:
305 /* duplicate ready, ignore */
306 return;
ctiller58393c22015-01-07 14:03:30 -0800307 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800308 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800309 /* swap was successful -- the closure will run after the next
310 notify_on call. */
311 return;
312 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800313 /* swap was unsuccessful due to an intervening set_ready call.
314 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800315 state = gpr_atm_acq_load(st);
316 default: /* waiting */
David Garcia Quintas07997b62015-05-28 13:51:50 -0700317 GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
318 gpr_atm_no_barrier_load(st) != NOT_READY);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700319 callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800320 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800321 return;
322 }
323}
324
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800325static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800326 int allow_synchronous_callback) {
327 /* only one set_ready can be active at once (but there may be a racing
328 notify_on) */
329 int success;
Craig Tiller8674cb12015-06-05 07:09:25 -0700330 grpc_iomgr_closure *closure;
ctiller58393c22015-01-07 14:03:30 -0800331 size_t ncb = 0;
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700332
ctiller58393c22015-01-07 14:03:30 -0800333 gpr_mu_lock(&fd->set_state_mu);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700334 set_ready_locked(st, &closure, &ncb);
ctiller58393c22015-01-07 14:03:30 -0800335 gpr_mu_unlock(&fd->set_state_mu);
336 success = !gpr_atm_acq_load(&fd->shutdown);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700337 GPR_ASSERT(ncb <= 1);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700338 if (ncb > 0) {
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700339 process_callbacks(closure, ncb, success, allow_synchronous_callback);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700340 }
ctiller58393c22015-01-07 14:03:30 -0800341}
342
343void grpc_fd_shutdown(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800344 size_t ncb = 0;
345 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700346 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800347 gpr_atm_rel_store(&fd->shutdown, 1);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700348 set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
349 set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
ctiller58393c22015-01-07 14:03:30 -0800350 gpr_mu_unlock(&fd->set_state_mu);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700351 GPR_ASSERT(ncb <= 2);
David Garcia Quintas1c762bd2015-05-31 17:04:43 -0700352 process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
353 0 /* GPR_FALSE */);
ctiller58393c22015-01-07 14:03:30 -0800354}
355
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800356void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
357 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800358}
359
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800360void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
361 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800362}
363
364gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800365 gpr_uint32 read_mask, gpr_uint32 write_mask,
366 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700367 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800368 /* keep track of pollers that have requested our events, in case they change
369 */
Craig Tiller9ae76972015-05-31 13:58:24 -0700370 GRPC_FD_REF(fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800371
ctiller58393c22015-01-07 14:03:30 -0800372 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700373 /* if we are shutdown, then don't add to the watcher set */
374 if (gpr_atm_no_barrier_load(&fd->shutdown)) {
375 watcher->fd = NULL;
376 watcher->pollset = NULL;
377 gpr_mu_unlock(&fd->watcher_mu);
Craig Tillerdf26af52015-07-01 16:53:09 -0700378 GRPC_FD_UNREF(fd, "poll");
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700379 return 0;
380 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700381 /* if there is nobody polling for read, but we need to, then start doing so */
Craig Tiller2daa88c2015-07-01 14:46:42 -0700382 if (read_mask && !fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700383 fd->read_watcher = watcher;
384 mask |= read_mask;
385 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700386 /* if there is nobody polling for write, but we need to, then start doing so
387 */
Craig Tiller2daa88c2015-07-01 14:46:42 -0700388 if (write_mask && !fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700389 fd->write_watcher = watcher;
390 mask |= write_mask;
391 }
392 /* if not polling, remember this watcher in case we need someone to later */
393 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700394 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700395 watcher->prev = watcher->next->prev;
396 watcher->next->prev = watcher->prev->next = watcher;
397 }
Craig Tiller7d413212015-02-09 08:00:02 -0800398 watcher->pollset = pollset;
399 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800400 gpr_mu_unlock(&fd->watcher_mu);
401
Craig Tiller886d7ec2015-05-14 16:18:42 -0700402 return mask;
ctiller58393c22015-01-07 14:03:30 -0800403}
404
Craig Tiller886d7ec2015-05-14 16:18:42 -0700405void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
406 int was_polling = 0;
407 int kick = 0;
408 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800409
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700410 if (fd == NULL) {
411 return;
412 }
413
Craig Tiller886d7ec2015-05-14 16:18:42 -0700414 gpr_mu_lock(&fd->watcher_mu);
415 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700416 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700417 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700418 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700419 fd->read_watcher = NULL;
420 }
421 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700422 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700423 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700424 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700425 fd->write_watcher = NULL;
426 }
427 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700428 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700429 watcher->next->prev = watcher->prev;
430 watcher->prev->next = watcher->next;
431 }
432 if (kick) {
433 maybe_wake_one_watcher_locked(fd);
434 }
Craig Tilleree98d842015-06-29 08:45:55 -0700435 if (grpc_fd_is_orphaned(fd) && !has_watchers(fd)) {
436 close(fd->fd);
437 if (fd->on_done_closure != NULL) {
438 grpc_iomgr_add_callback(fd->on_done_closure);
439 }
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700440 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700441 gpr_mu_unlock(&fd->watcher_mu);
442
Craig Tiller9ae76972015-05-31 13:58:24 -0700443 GRPC_FD_UNREF(fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800444}
445
446void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
447 set_ready(fd, &fd->readst, allow_synchronous_callback);
448}
449
450void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
451 set_ready(fd, &fd->writest, allow_synchronous_callback);
452}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800453
Craig Tiller190d3602015-02-18 09:23:38 -0800454#endif