blob: b0506a4b979e99526a7a8920ddbcf24d1c0c6ab9 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
ctiller58393c22015-01-07 14:03:30 -080044#include <grpc/support/alloc.h>
45#include <grpc/support/log.h>
46#include <grpc/support/useful.h>
47
Craig Tillerf95e37f2015-02-18 15:15:29 -080048enum descriptor_state {
49 NOT_READY = 0,
50 READY = 1
51}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080052
David Klempnerd1785242015-01-28 17:00:21 -080053/* We need to keep a freelist not because of any concerns of malloc performance
54 * but instead so that implementations with multiple threads in (for example)
55 * epoll_wait deal with the race between pollset removal and incoming poll
56 * notifications.
57 *
58 * The problem is that the poller ultimately holds a reference to this
59 * object, so it is very difficult to know when is safe to free it, at least
60 * without some expensive synchronization.
61 *
62 * If we keep the object freelisted, in the worst case losing this race just
63 * becomes a spurious read notification on a reused fd.
64 */
65/* TODO(klempner): We could use some form of polling generation count to know
66 * when these are safe to free. */
67/* TODO(klempner): Consider disabling freelisting if we don't have multiple
68 * threads in poll on the same fd */
69/* TODO(klempner): Batch these allocations to reduce fragmentation */
70static grpc_fd *fd_freelist = NULL;
71static gpr_mu fd_freelist_mu;
72
73static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080074 gpr_mu_lock(&fd_freelist_mu);
75 fd->freelist_next = fd_freelist;
76 fd_freelist = fd;
77 gpr_mu_unlock(&fd_freelist_mu);
78}
79
80static grpc_fd *alloc_fd(int fd) {
81 grpc_fd *r = NULL;
82 gpr_mu_lock(&fd_freelist_mu);
83 if (fd_freelist != NULL) {
84 r = fd_freelist;
85 fd_freelist = fd_freelist->freelist_next;
86 }
87 gpr_mu_unlock(&fd_freelist_mu);
88 if (r == NULL) {
89 r = gpr_malloc(sizeof(grpc_fd));
90 gpr_mu_init(&r->set_state_mu);
91 gpr_mu_init(&r->watcher_mu);
92 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -070093
David Klempnerd1785242015-01-28 17:00:21 -080094 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080095 gpr_atm_rel_store(&r->readst, NOT_READY);
96 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080097 gpr_atm_rel_store(&r->shutdown, 0);
98 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -070099 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
100 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800101 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700102 r->read_watcher = r->write_watcher = NULL;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700103 r->on_done_closure = NULL;
David Klempnerd1785242015-01-28 17:00:21 -0800104 return r;
105}
106
107static void destroy(grpc_fd *fd) {
108 gpr_mu_destroy(&fd->set_state_mu);
109 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800110 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800111}
112
Craig Tiller9ae76972015-05-31 13:58:24 -0700113#ifdef GRPC_FD_REF_COUNT_DEBUG
114#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
115#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
Craig Tiller8674cb12015-06-05 07:09:25 -0700116static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file,
117 int line) {
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700118 gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700119 gpr_atm_no_barrier_load(&fd->refst),
120 gpr_atm_no_barrier_load(&fd->refst) + n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700121#else
122#define REF_BY(fd, n, reason) ref_by(fd, n)
123#define UNREF_BY(fd, n, reason) unref_by(fd, n)
ctiller58393c22015-01-07 14:03:30 -0800124static void ref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700125#endif
Craig Tiller23139ae2015-02-17 15:46:13 -0800126 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800127}
128
Craig Tiller9ae76972015-05-31 13:58:24 -0700129#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller8674cb12015-06-05 07:09:25 -0700130static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file,
131 int line) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700132 gpr_atm old;
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700133 gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
Craig Tiller4b678bd2015-06-02 16:12:24 -0700134 gpr_atm_no_barrier_load(&fd->refst),
135 gpr_atm_no_barrier_load(&fd->refst) - n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700136#else
ctiller58393c22015-01-07 14:03:30 -0800137static void unref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700138 gpr_atm old;
139#endif
140 old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller23139ae2015-02-17 15:46:13 -0800141 if (old == n) {
Craig Tillerfa275a92015-06-01 13:55:54 -0700142 grpc_iomgr_unregister_object(&fd->iomgr_object);
Craig Tiller6f432162015-06-02 12:51:43 -0700143 freelist_fd(fd);
Craig Tiller23139ae2015-02-17 15:46:13 -0800144 } else {
145 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800146 }
147}
148
Craig Tiller7d413212015-02-09 08:00:02 -0800149void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800150
151void grpc_fd_global_shutdown(void) {
152 while (fd_freelist != NULL) {
153 grpc_fd *fd = fd_freelist;
154 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800155 destroy(fd);
156 }
David Klempnerd1785242015-01-28 17:00:21 -0800157 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800158}
159
Craig Tillerfa275a92015-06-01 13:55:54 -0700160grpc_fd *grpc_fd_create(int fd, const char *name) {
David Klempnerd1785242015-01-28 17:00:21 -0800161 grpc_fd *r = alloc_fd(fd);
Craig Tillerfa275a92015-06-01 13:55:54 -0700162 grpc_iomgr_register_object(&r->iomgr_object, name);
ctiller58393c22015-01-07 14:03:30 -0800163 return r;
164}
165
166int grpc_fd_is_orphaned(grpc_fd *fd) {
167 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
168}
169
Craig Tiller886d7ec2015-05-14 16:18:42 -0700170static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700171 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
172 grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
173 } else if (fd->read_watcher) {
174 grpc_pollset_force_kick(fd->read_watcher->pollset);
175 } else if (fd->write_watcher) {
176 grpc_pollset_force_kick(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700177 }
178}
179
180static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800181 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700182 maybe_wake_one_watcher_locked(fd);
183 gpr_mu_unlock(&fd->watcher_mu);
184}
185
Craig Tillerc95de722015-05-29 08:56:46 -0700186static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700187 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700188 for (watcher = fd->inactive_watcher_root.next;
189 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller7d413212015-02-09 08:00:02 -0800190 grpc_pollset_force_kick(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800191 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700192 if (fd->read_watcher) {
193 grpc_pollset_force_kick(fd->read_watcher->pollset);
194 }
195 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
196 grpc_pollset_force_kick(fd->write_watcher->pollset);
197 }
ctiller58393c22015-01-07 14:03:30 -0800198}
199
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700200static int has_watchers(grpc_fd *fd) {
201 return fd->read_watcher != NULL || fd->write_watcher != NULL || fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
202}
203
Craig Tiller4b678bd2015-06-02 16:12:24 -0700204void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done,
205 const char *reason) {
Craig Tiller0317b3d2015-06-01 21:57:03 -0700206 fd->on_done_closure = on_done;
David Klempnerc6bccc22015-02-24 17:33:05 -0800207 shutdown(fd->fd, SHUT_RDWR);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700208 REF_BY(fd, 1, reason); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700209 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700210 if (!has_watchers(fd)) {
211 if (fd->on_done_closure) {
212 grpc_iomgr_add_callback(fd->on_done_closure);
213 }
214 } else {
215 wake_all_watchers_locked(fd);
216 }
Craig Tiller017912d2015-05-29 07:05:13 -0700217 gpr_mu_unlock(&fd->watcher_mu);
Craig Tiller4b678bd2015-06-02 16:12:24 -0700218 UNREF_BY(fd, 2, reason); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800219}
220
221/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700222#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller0317b3d2015-06-01 21:57:03 -0700223void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
224 ref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700225}
ctiller58393c22015-01-07 14:03:30 -0800226
Craig Tiller0317b3d2015-06-01 21:57:03 -0700227void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file,
228 int line) {
229 unref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700230}
231#else
Craig Tiller0317b3d2015-06-01 21:57:03 -0700232void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700233
Craig Tiller0317b3d2015-06-01 21:57:03 -0700234void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700235#endif
ctiller58393c22015-01-07 14:03:30 -0800236
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700237static void process_callback(grpc_iomgr_closure *closure, int success,
Craig Tiller8674cb12015-06-05 07:09:25 -0700238 int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800239 if (allow_synchronous_callback) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700240 closure->cb(closure->cb_arg, success);
ctiller58393c22015-01-07 14:03:30 -0800241 } else {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700242 grpc_iomgr_add_delayed_callback(closure, success);
ctiller58393c22015-01-07 14:03:30 -0800243 }
244}
245
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700246static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
247 int success, int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800248 size_t i;
249 for (i = 0; i < n; i++) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700250 process_callback(callbacks + i, success, allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800251 }
252}
253
Craig Tillerf95e37f2015-02-18 15:15:29 -0800254static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
255 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800256 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800257 case NOT_READY:
258 /* There is no race if the descriptor is already ready, so we skip
259 the interlocked op in that case. As long as the app doesn't
260 try to set the same upcall twice (which it shouldn't) then
261 oldval should never be anything other than READY or NOT_READY. We
262 don't
263 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800264 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800265 /* swap was successful -- the closure will run after the next
266 set_ready call. NOTE: we don't have an ABA problem here,
267 since we should never have concurrent calls to the same
268 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700269 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800270 return;
271 }
272 /* swap was unsuccessful due to an intervening set_ready call.
273 Fall through to the READY code below */
274 case READY:
David Garcia Quintas07997b62015-05-28 13:51:50 -0700275 GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800276 gpr_atm_rel_store(st, NOT_READY);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700277 process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
Craig Tiller8674cb12015-06-05 07:09:25 -0700278 allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800279 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800280 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800281 /* upcallptr was set to a different closure. This is an error! */
282 gpr_log(GPR_ERROR,
283 "User called a notify_on function with a previous callback still "
284 "pending");
285 abort();
286 }
287 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
288 abort();
289}
290
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700291static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
ctiller58393c22015-01-07 14:03:30 -0800292 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800293 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800294
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800295 switch (state) {
296 case READY:
297 /* duplicate ready, ignore */
298 return;
ctiller58393c22015-01-07 14:03:30 -0800299 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800300 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800301 /* swap was successful -- the closure will run after the next
302 notify_on call. */
303 return;
304 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800305 /* swap was unsuccessful due to an intervening set_ready call.
306 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800307 state = gpr_atm_acq_load(st);
308 default: /* waiting */
David Garcia Quintas07997b62015-05-28 13:51:50 -0700309 GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
310 gpr_atm_no_barrier_load(st) != NOT_READY);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700311 callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800312 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800313 return;
314 }
315}
316
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800317static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800318 int allow_synchronous_callback) {
319 /* only one set_ready can be active at once (but there may be a racing
320 notify_on) */
321 int success;
Craig Tiller8674cb12015-06-05 07:09:25 -0700322 grpc_iomgr_closure *closure;
ctiller58393c22015-01-07 14:03:30 -0800323 size_t ncb = 0;
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700324
ctiller58393c22015-01-07 14:03:30 -0800325 gpr_mu_lock(&fd->set_state_mu);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700326 set_ready_locked(st, &closure, &ncb);
ctiller58393c22015-01-07 14:03:30 -0800327 gpr_mu_unlock(&fd->set_state_mu);
328 success = !gpr_atm_acq_load(&fd->shutdown);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700329 GPR_ASSERT(ncb <= 1);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700330 if (ncb > 0) {
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700331 process_callbacks(closure, ncb, success, allow_synchronous_callback);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700332 }
ctiller58393c22015-01-07 14:03:30 -0800333}
334
335void grpc_fd_shutdown(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800336 size_t ncb = 0;
337 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700338 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800339 gpr_atm_rel_store(&fd->shutdown, 1);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700340 set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
341 set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
ctiller58393c22015-01-07 14:03:30 -0800342 gpr_mu_unlock(&fd->set_state_mu);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700343 GPR_ASSERT(ncb <= 2);
David Garcia Quintas1c762bd2015-05-31 17:04:43 -0700344 process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
345 0 /* GPR_FALSE */);
ctiller58393c22015-01-07 14:03:30 -0800346}
347
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800348void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
349 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800350}
351
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800352void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
353 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800354}
355
356gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800357 gpr_uint32 read_mask, gpr_uint32 write_mask,
358 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700359 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800360 /* keep track of pollers that have requested our events, in case they change
361 */
Craig Tiller9ae76972015-05-31 13:58:24 -0700362 GRPC_FD_REF(fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800363
ctiller58393c22015-01-07 14:03:30 -0800364 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700365 /* if we are shutdown, then don't add to the watcher set */
366 if (gpr_atm_no_barrier_load(&fd->shutdown)) {
367 watcher->fd = NULL;
368 watcher->pollset = NULL;
369 gpr_mu_unlock(&fd->watcher_mu);
370 return 0;
371 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700372 /* if there is nobody polling for read, but we need to, then start doing so */
373 if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
374 fd->read_watcher = watcher;
375 mask |= read_mask;
376 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700377 /* if there is nobody polling for write, but we need to, then start doing so
378 */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700379 if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
380 fd->write_watcher = watcher;
381 mask |= write_mask;
382 }
383 /* if not polling, remember this watcher in case we need someone to later */
384 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700385 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700386 watcher->prev = watcher->next->prev;
387 watcher->next->prev = watcher->prev->next = watcher;
388 }
Craig Tiller7d413212015-02-09 08:00:02 -0800389 watcher->pollset = pollset;
390 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800391 gpr_mu_unlock(&fd->watcher_mu);
392
Craig Tiller886d7ec2015-05-14 16:18:42 -0700393 return mask;
ctiller58393c22015-01-07 14:03:30 -0800394}
395
Craig Tiller886d7ec2015-05-14 16:18:42 -0700396void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
397 int was_polling = 0;
398 int kick = 0;
399 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800400
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700401 if (fd == NULL) {
402 return;
403 }
404
Craig Tiller886d7ec2015-05-14 16:18:42 -0700405 gpr_mu_lock(&fd->watcher_mu);
406 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700407 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700408 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700409 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700410 fd->read_watcher = NULL;
411 }
412 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700413 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700414 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700415 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700416 fd->write_watcher = NULL;
417 }
418 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700419 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700420 watcher->next->prev = watcher->prev;
421 watcher->prev->next = watcher->next;
422 }
423 if (kick) {
424 maybe_wake_one_watcher_locked(fd);
425 }
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700426 if (fd->on_done_closure != NULL && !has_watchers(fd)) {
427 grpc_iomgr_add_callback(fd->on_done_closure);
428 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700429 gpr_mu_unlock(&fd->watcher_mu);
430
Craig Tiller9ae76972015-05-31 13:58:24 -0700431 GRPC_FD_UNREF(fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800432}
433
434void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
435 set_ready(fd, &fd->readst, allow_synchronous_callback);
436}
437
438void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
439 set_ready(fd, &fd->writest, allow_synchronous_callback);
440}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800441
Craig Tiller190d3602015-02-18 09:23:38 -0800442#endif