blob: a2df838d4a0cd56465a460c4cb29a16045b60221 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
ctiller58393c22015-01-07 14:03:30 -080044#include <grpc/support/alloc.h>
45#include <grpc/support/log.h>
46#include <grpc/support/useful.h>
47
Craig Tillerf95e37f2015-02-18 15:15:29 -080048enum descriptor_state {
49 NOT_READY = 0,
50 READY = 1
51}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080052
David Klempnerd1785242015-01-28 17:00:21 -080053/* We need to keep a freelist not because of any concerns of malloc performance
54 * but instead so that implementations with multiple threads in (for example)
55 * epoll_wait deal with the race between pollset removal and incoming poll
56 * notifications.
57 *
58 * The problem is that the poller ultimately holds a reference to this
59 * object, so it is very difficult to know when is safe to free it, at least
60 * without some expensive synchronization.
61 *
62 * If we keep the object freelisted, in the worst case losing this race just
63 * becomes a spurious read notification on a reused fd.
64 */
65/* TODO(klempner): We could use some form of polling generation count to know
66 * when these are safe to free. */
67/* TODO(klempner): Consider disabling freelisting if we don't have multiple
68 * threads in poll on the same fd */
69/* TODO(klempner): Batch these allocations to reduce fragmentation */
70static grpc_fd *fd_freelist = NULL;
71static gpr_mu fd_freelist_mu;
72
73static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080074 gpr_mu_lock(&fd_freelist_mu);
75 fd->freelist_next = fd_freelist;
76 fd_freelist = fd;
Craig Tillerb1f220d2015-07-01 13:54:28 -070077 grpc_iomgr_unregister_object(&fd->iomgr_object);
David Klempnerd1785242015-01-28 17:00:21 -080078 gpr_mu_unlock(&fd_freelist_mu);
79}
80
81static grpc_fd *alloc_fd(int fd) {
82 grpc_fd *r = NULL;
83 gpr_mu_lock(&fd_freelist_mu);
84 if (fd_freelist != NULL) {
85 r = fd_freelist;
86 fd_freelist = fd_freelist->freelist_next;
87 }
88 gpr_mu_unlock(&fd_freelist_mu);
89 if (r == NULL) {
90 r = gpr_malloc(sizeof(grpc_fd));
91 gpr_mu_init(&r->set_state_mu);
92 gpr_mu_init(&r->watcher_mu);
93 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -070094
David Klempnerd1785242015-01-28 17:00:21 -080095 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080096 gpr_atm_rel_store(&r->readst, NOT_READY);
97 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080098 gpr_atm_rel_store(&r->shutdown, 0);
99 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700100 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
101 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800102 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700103 r->read_watcher = r->write_watcher = NULL;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700104 r->on_done_closure = NULL;
Craig Tiller0613e582015-07-30 11:55:43 -0700105 r->closed = 0;
David Klempnerd1785242015-01-28 17:00:21 -0800106 return r;
107}
108
109static void destroy(grpc_fd *fd) {
110 gpr_mu_destroy(&fd->set_state_mu);
111 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800112 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800113}
114
Craig Tiller9ae76972015-05-31 13:58:24 -0700115#ifdef GRPC_FD_REF_COUNT_DEBUG
116#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
117#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
Craig Tiller8674cb12015-06-05 07:09:25 -0700118static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
119 int line) {
Craig Tiller87cc0842015-06-30 08:15:55 -0700120 gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700121 gpr_atm_no_barrier_load(&fd->refst),
122 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700123#else
124#define REF_BY(fd, n, reason) ref_by(fd, n)
125#define UNREF_BY(fd, n, reason) unref_by(fd, n)
ctiller58393c22015-01-07 14:03:30 -0800126static void ref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700127#endif
Craig Tiller23139ae2015-02-17 15:46:13 -0800128 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800129}
130
Craig Tiller9ae76972015-05-31 13:58:24 -0700131#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller8674cb12015-06-05 07:09:25 -0700132static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
133 int line) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700134 gpr_atm old;
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700135 gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700136 gpr_atm_no_barrier_load(&fd->refst),
137 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700138#else
ctiller58393c22015-01-07 14:03:30 -0800139static void unref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700140 gpr_atm old;
141#endif
142 old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller23139ae2015-02-17 15:46:13 -0800143 if (old == n) {
Craig Tiller6f432162015-06-02 12:51:43 -0700144 freelist_fd(fd);
Craig Tiller23139ae2015-02-17 15:46:13 -0800145 } else {
146 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800147 }
148}
149
Craig Tiller7d413212015-02-09 08:00:02 -0800150void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800151
152void grpc_fd_global_shutdown(void) {
153 while (fd_freelist != NULL) {
154 grpc_fd *fd = fd_freelist;
155 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800156 destroy(fd);
157 }
David Klempnerd1785242015-01-28 17:00:21 -0800158 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800159}
160
Craig Tillerfa275a92015-06-01 13:55:54 -0700161grpc_fd *grpc_fd_create(int fd, const char *name) {
David Klempnerd1785242015-01-28 17:00:21 -0800162 grpc_fd *r = alloc_fd(fd);
Craig Tillerfa275a92015-06-01 13:55:54 -0700163 grpc_iomgr_register_object(&r->iomgr_object, name);
ctiller58393c22015-01-07 14:03:30 -0800164 return r;
165}
166
167int grpc_fd_is_orphaned(grpc_fd *fd) {
168 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
169}
170
Craig Tiller886d7ec2015-05-14 16:18:42 -0700171static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700172 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
173 grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
174 } else if (fd->read_watcher) {
175 grpc_pollset_force_kick(fd->read_watcher->pollset);
176 } else if (fd->write_watcher) {
177 grpc_pollset_force_kick(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700178 }
179}
180
181static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800182 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700183 maybe_wake_one_watcher_locked(fd);
184 gpr_mu_unlock(&fd->watcher_mu);
185}
186
Craig Tillerc95de722015-05-29 08:56:46 -0700187static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700188 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700189 for (watcher = fd->inactive_watcher_root.next;
190 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller7d413212015-02-09 08:00:02 -0800191 grpc_pollset_force_kick(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800192 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700193 if (fd->read_watcher) {
194 grpc_pollset_force_kick(fd->read_watcher->pollset);
195 }
196 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
197 grpc_pollset_force_kick(fd->write_watcher->pollset);
198 }
ctiller58393c22015-01-07 14:03:30 -0800199}
200
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700201static int has_watchers(grpc_fd *fd) {
Craig Tiller079a11b2015-06-30 10:07:15 -0700202 return fd->read_watcher != NULL || fd->write_watcher != NULL ||
203 fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700204}
205
Craig Tiller4b678bd2015-06-02 16:12:24 -0700206void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
207 const char *reason) {
Craig Tiller0317b3d2015-06-01 21:57:03 -0700208 fd->on_done_closure = on_done;
David Klempnerc6bccc22015-02-24 17:33:05 -0800209 shutdown(fd->fd, SHUT_RDWR);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700210 REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700211 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700212 if (!has_watchers(fd)) {
Craig Tiller0613e582015-07-30 11:55:43 -0700213 GPR_ASSERT(!fd->closed);
214 fd->closed = 1;
Craig Tilleree98d842015-06-29 08:45:55 -0700215 close(fd->fd);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700216 if (fd->on_done_closure) {
217 grpc_iomgr_add_callback(fd->on_done_closure);
218 }
219 } else {
220 wake_all_watchers_locked(fd);
221 }
Craig Tiller017912d2015-05-29 07:05:13 -0700222 gpr_mu_unlock(&fd->watcher_mu);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700223 UNREF_BY(fd, 2, reason); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800224}
225
226/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700227#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller0317b3d2015-06-01 21:57:03 -0700228void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
229 ref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700230}
ctiller58393c22015-01-07 14:03:30 -0800231
Craig Tiller0317b3d2015-06-01 21:57:03 -0700232void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file,
233 int line) {
234 unref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700235}
236#else
Craig Tiller0317b3d2015-06-01 21:57:03 -0700237void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700238
Craig Tiller0317b3d2015-06-01 21:57:03 -0700239void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700240#endif
ctiller58393c22015-01-07 14:03:30 -0800241
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700242static void process_callback(grpc_iomgr_closure *closure, int success,
Craig Tiller8674cb12015-06-05 07:09:25 -0700243 int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800244 if (allow_synchronous_callback) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700245 closure->cb(closure->cb_arg, success);
ctiller58393c22015-01-07 14:03:30 -0800246 } else {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700247 grpc_iomgr_add_delayed_callback(closure, success);
ctiller58393c22015-01-07 14:03:30 -0800248 }
249}
250
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700251static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
252 int success, int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800253 size_t i;
254 for (i = 0; i < n; i++) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700255 process_callback(callbacks + i, success, allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800256 }
257}
258
Craig Tillerf95e37f2015-02-18 15:15:29 -0800259static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
260 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800261 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800262 case NOT_READY:
263 /* There is no race if the descriptor is already ready, so we skip
264 the interlocked op in that case. As long as the app doesn't
265 try to set the same upcall twice (which it shouldn't) then
266 oldval should never be anything other than READY or NOT_READY. We
267 don't
268 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800269 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800270 /* swap was successful -- the closure will run after the next
271 set_ready call. NOTE: we don't have an ABA problem here,
272 since we should never have concurrent calls to the same
273 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700274 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800275 return;
276 }
277 /* swap was unsuccessful due to an intervening set_ready call.
278 Fall through to the READY code below */
279 case READY:
David Garcia Quintas07997b62015-05-28 13:51:50 -0700280 GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800281 gpr_atm_rel_store(st, NOT_READY);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700282 process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
Craig Tiller8674cb12015-06-05 07:09:25 -0700283 allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800284 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800285 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800286 /* upcallptr was set to a different closure. This is an error! */
287 gpr_log(GPR_ERROR,
288 "User called a notify_on function with a previous callback still "
289 "pending");
290 abort();
291 }
292 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
293 abort();
294}
295
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700296static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
ctiller58393c22015-01-07 14:03:30 -0800297 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800298 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800299
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800300 switch (state) {
301 case READY:
302 /* duplicate ready, ignore */
303 return;
ctiller58393c22015-01-07 14:03:30 -0800304 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800305 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800306 /* swap was successful -- the closure will run after the next
307 notify_on call. */
308 return;
309 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800310 /* swap was unsuccessful due to an intervening set_ready call.
311 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800312 state = gpr_atm_acq_load(st);
313 default: /* waiting */
David Garcia Quintas07997b62015-05-28 13:51:50 -0700314 GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
315 gpr_atm_no_barrier_load(st) != NOT_READY);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700316 callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800317 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800318 return;
319 }
320}
321
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800322static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800323 int allow_synchronous_callback) {
324 /* only one set_ready can be active at once (but there may be a racing
325 notify_on) */
326 int success;
Craig Tiller8674cb12015-06-05 07:09:25 -0700327 grpc_iomgr_closure *closure;
ctiller58393c22015-01-07 14:03:30 -0800328 size_t ncb = 0;
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700329
ctiller58393c22015-01-07 14:03:30 -0800330 gpr_mu_lock(&fd->set_state_mu);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700331 set_ready_locked(st, &closure, &ncb);
ctiller58393c22015-01-07 14:03:30 -0800332 gpr_mu_unlock(&fd->set_state_mu);
333 success = !gpr_atm_acq_load(&fd->shutdown);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700334 GPR_ASSERT(ncb <= 1);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700335 if (ncb > 0) {
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700336 process_callbacks(closure, ncb, success, allow_synchronous_callback);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700337 }
ctiller58393c22015-01-07 14:03:30 -0800338}
339
340void grpc_fd_shutdown(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800341 size_t ncb = 0;
342 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700343 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800344 gpr_atm_rel_store(&fd->shutdown, 1);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700345 set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
346 set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
ctiller58393c22015-01-07 14:03:30 -0800347 gpr_mu_unlock(&fd->set_state_mu);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700348 GPR_ASSERT(ncb <= 2);
David Garcia Quintas1c762bd2015-05-31 17:04:43 -0700349 process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
350 0 /* GPR_FALSE */);
ctiller58393c22015-01-07 14:03:30 -0800351}
352
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800353void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
354 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800355}
356
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800357void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
358 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800359}
360
361gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800362 gpr_uint32 read_mask, gpr_uint32 write_mask,
363 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700364 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800365 /* keep track of pollers that have requested our events, in case they change
366 */
Craig Tiller9ae76972015-05-31 13:58:24 -0700367 GRPC_FD_REF(fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800368
ctiller58393c22015-01-07 14:03:30 -0800369 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700370 /* if we are shutdown, then don't add to the watcher set */
371 if (gpr_atm_no_barrier_load(&fd->shutdown)) {
372 watcher->fd = NULL;
373 watcher->pollset = NULL;
374 gpr_mu_unlock(&fd->watcher_mu);
Craig Tillerdf26af52015-07-01 16:53:09 -0700375 GRPC_FD_UNREF(fd, "poll");
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700376 return 0;
377 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700378 /* if there is nobody polling for read, but we need to, then start doing so */
Craig Tiller2daa88c2015-07-01 14:46:42 -0700379 if (read_mask && !fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700380 fd->read_watcher = watcher;
381 mask |= read_mask;
382 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700383 /* if there is nobody polling for write, but we need to, then start doing so
384 */
Craig Tiller2daa88c2015-07-01 14:46:42 -0700385 if (write_mask && !fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700386 fd->write_watcher = watcher;
387 mask |= write_mask;
388 }
389 /* if not polling, remember this watcher in case we need someone to later */
390 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700391 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700392 watcher->prev = watcher->next->prev;
393 watcher->next->prev = watcher->prev->next = watcher;
394 }
Craig Tiller7d413212015-02-09 08:00:02 -0800395 watcher->pollset = pollset;
396 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800397 gpr_mu_unlock(&fd->watcher_mu);
398
Craig Tiller886d7ec2015-05-14 16:18:42 -0700399 return mask;
ctiller58393c22015-01-07 14:03:30 -0800400}
401
Craig Tiller886d7ec2015-05-14 16:18:42 -0700402void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
403 int was_polling = 0;
404 int kick = 0;
405 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800406
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700407 if (fd == NULL) {
408 return;
409 }
410
Craig Tiller886d7ec2015-05-14 16:18:42 -0700411 gpr_mu_lock(&fd->watcher_mu);
412 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700413 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700414 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700415 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700416 fd->read_watcher = NULL;
417 }
418 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700419 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700420 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700421 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700422 fd->write_watcher = NULL;
423 }
424 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700425 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700426 watcher->next->prev = watcher->prev;
427 watcher->prev->next = watcher->next;
428 }
429 if (kick) {
430 maybe_wake_one_watcher_locked(fd);
431 }
Craig Tiller0613e582015-07-30 11:55:43 -0700432 if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
433 fd->closed = 1;
Craig Tilleree98d842015-06-29 08:45:55 -0700434 close(fd->fd);
435 if (fd->on_done_closure != NULL) {
436 grpc_iomgr_add_callback(fd->on_done_closure);
437 }
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700438 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700439 gpr_mu_unlock(&fd->watcher_mu);
440
Craig Tiller9ae76972015-05-31 13:58:24 -0700441 GRPC_FD_UNREF(fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800442}
443
444void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
445 set_ready(fd, &fd->readst, allow_synchronous_callback);
446}
447
448void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
449 set_ready(fd, &fd->writest, allow_synchronous_callback);
450}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800451
Craig Tiller190d3602015-02-18 09:23:38 -0800452#endif