blob: 52a6920321da676328923d62300b28d819b30ae8 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
ctiller58393c22015-01-07 14:03:30 -080044#include <grpc/support/alloc.h>
45#include <grpc/support/log.h>
46#include <grpc/support/useful.h>
47
Craig Tillerf95e37f2015-02-18 15:15:29 -080048enum descriptor_state {
49 NOT_READY = 0,
50 READY = 1
51}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080052
David Klempnerd1785242015-01-28 17:00:21 -080053/* We need to keep a freelist not because of any concerns of malloc performance
54 * but instead so that implementations with multiple threads in (for example)
55 * epoll_wait deal with the race between pollset removal and incoming poll
56 * notifications.
57 *
58 * The problem is that the poller ultimately holds a reference to this
59 * object, so it is very difficult to know when is safe to free it, at least
60 * without some expensive synchronization.
61 *
62 * If we keep the object freelisted, in the worst case losing this race just
63 * becomes a spurious read notification on a reused fd.
64 */
65/* TODO(klempner): We could use some form of polling generation count to know
66 * when these are safe to free. */
67/* TODO(klempner): Consider disabling freelisting if we don't have multiple
68 * threads in poll on the same fd */
69/* TODO(klempner): Batch these allocations to reduce fragmentation */
70static grpc_fd *fd_freelist = NULL;
71static gpr_mu fd_freelist_mu;
72
73static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080074 gpr_mu_lock(&fd_freelist_mu);
75 fd->freelist_next = fd_freelist;
76 fd_freelist = fd;
77 gpr_mu_unlock(&fd_freelist_mu);
78}
79
80static grpc_fd *alloc_fd(int fd) {
81 grpc_fd *r = NULL;
82 gpr_mu_lock(&fd_freelist_mu);
83 if (fd_freelist != NULL) {
84 r = fd_freelist;
85 fd_freelist = fd_freelist->freelist_next;
86 }
87 gpr_mu_unlock(&fd_freelist_mu);
88 if (r == NULL) {
89 r = gpr_malloc(sizeof(grpc_fd));
90 gpr_mu_init(&r->set_state_mu);
91 gpr_mu_init(&r->watcher_mu);
92 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -070093
David Klempnerd1785242015-01-28 17:00:21 -080094 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080095 gpr_atm_rel_store(&r->readst, NOT_READY);
96 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080097 gpr_atm_rel_store(&r->shutdown, 0);
98 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -070099 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
100 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800101 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700102 r->read_watcher = r->write_watcher = NULL;
David Klempnerd1785242015-01-28 17:00:21 -0800103 return r;
104}
105
106static void destroy(grpc_fd *fd) {
107 gpr_mu_destroy(&fd->set_state_mu);
108 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800109 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800110}
111
Craig Tiller9ae76972015-05-31 13:58:24 -0700112#ifdef GRPC_FD_REF_COUNT_DEBUG
113#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
114#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
115static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) {
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700116 gpr_log(GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
117 fd->refst, fd->refst + n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700118#else
119#define REF_BY(fd, n, reason) ref_by(fd, n)
120#define UNREF_BY(fd, n, reason) unref_by(fd, n)
ctiller58393c22015-01-07 14:03:30 -0800121static void ref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700122#endif
Craig Tiller23139ae2015-02-17 15:46:13 -0800123 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800124}
125
Craig Tiller9ae76972015-05-31 13:58:24 -0700126#ifdef GRPC_FD_REF_COUNT_DEBUG
127static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) {
128 gpr_atm old;
Craig Tiller8e0b08a2015-06-01 17:04:17 -0700129 gpr_log(GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n,
130 fd->refst, fd->refst - n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700131#else
ctiller58393c22015-01-07 14:03:30 -0800132static void unref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700133 gpr_atm old;
134#endif
135 old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller23139ae2015-02-17 15:46:13 -0800136 if (old == n) {
Craig Tiller0317b3d2015-06-01 21:57:03 -0700137 if (fd->on_done_closure) {
138 grpc_iomgr_add_callback(fd->on_done_closure);
139 }
David Klempnerd1785242015-01-28 17:00:21 -0800140 freelist_fd(fd);
Craig Tillerfa275a92015-06-01 13:55:54 -0700141 grpc_iomgr_unregister_object(&fd->iomgr_object);
Craig Tiller23139ae2015-02-17 15:46:13 -0800142 } else {
143 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800144 }
145}
146
Craig Tiller7d413212015-02-09 08:00:02 -0800147void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800148
149void grpc_fd_global_shutdown(void) {
150 while (fd_freelist != NULL) {
151 grpc_fd *fd = fd_freelist;
152 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800153 destroy(fd);
154 }
David Klempnerd1785242015-01-28 17:00:21 -0800155 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800156}
157
Craig Tillerfa275a92015-06-01 13:55:54 -0700158grpc_fd *grpc_fd_create(int fd, const char *name) {
David Klempnerd1785242015-01-28 17:00:21 -0800159 grpc_fd *r = alloc_fd(fd);
Craig Tillerfa275a92015-06-01 13:55:54 -0700160 grpc_iomgr_register_object(&r->iomgr_object, name);
ctiller58393c22015-01-07 14:03:30 -0800161 return r;
162}
163
164int grpc_fd_is_orphaned(grpc_fd *fd) {
165 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
166}
167
Craig Tiller886d7ec2015-05-14 16:18:42 -0700168static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700169 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
170 grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
171 } else if (fd->read_watcher) {
172 grpc_pollset_force_kick(fd->read_watcher->pollset);
173 } else if (fd->write_watcher) {
174 grpc_pollset_force_kick(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700175 }
176}
177
178static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800179 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700180 maybe_wake_one_watcher_locked(fd);
181 gpr_mu_unlock(&fd->watcher_mu);
182}
183
Craig Tillerc95de722015-05-29 08:56:46 -0700184static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700185 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700186 for (watcher = fd->inactive_watcher_root.next;
187 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller7d413212015-02-09 08:00:02 -0800188 grpc_pollset_force_kick(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800189 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700190 if (fd->read_watcher) {
191 grpc_pollset_force_kick(fd->read_watcher->pollset);
192 }
193 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
194 grpc_pollset_force_kick(fd->write_watcher->pollset);
195 }
ctiller58393c22015-01-07 14:03:30 -0800196}
197
Craig Tiller0317b3d2015-06-01 21:57:03 -0700198void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_closure *on_done) {
199 fd->on_done_closure = on_done;
David Klempnerc6bccc22015-02-24 17:33:05 -0800200 shutdown(fd->fd, SHUT_RDWR);
Craig Tiller9ae76972015-05-31 13:58:24 -0700201 REF_BY(fd, 1, "orphan"); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700202 gpr_mu_lock(&fd->watcher_mu);
Craig Tillerc95de722015-05-29 08:56:46 -0700203 wake_all_watchers_locked(fd);
Craig Tiller017912d2015-05-29 07:05:13 -0700204 gpr_mu_unlock(&fd->watcher_mu);
Craig Tiller9ae76972015-05-31 13:58:24 -0700205 UNREF_BY(fd, 2, "orphan"); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800206}
207
208/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700209#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller0317b3d2015-06-01 21:57:03 -0700210void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
211 ref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700212}
ctiller58393c22015-01-07 14:03:30 -0800213
Craig Tiller0317b3d2015-06-01 21:57:03 -0700214void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file,
215 int line) {
216 unref_by(fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700217}
218#else
Craig Tiller0317b3d2015-06-01 21:57:03 -0700219void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700220
Craig Tiller0317b3d2015-06-01 21:57:03 -0700221void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
Craig Tiller9ae76972015-05-31 13:58:24 -0700222#endif
ctiller58393c22015-01-07 14:03:30 -0800223
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700224static void process_callback(grpc_iomgr_closure *closure, int success,
ctiller58393c22015-01-07 14:03:30 -0800225 int allow_synchronous_callback) {
226 if (allow_synchronous_callback) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700227 closure->cb(closure->cb_arg, success);
ctiller58393c22015-01-07 14:03:30 -0800228 } else {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700229 grpc_iomgr_add_delayed_callback(closure, success);
ctiller58393c22015-01-07 14:03:30 -0800230 }
231}
232
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700233static void process_callbacks(grpc_iomgr_closure *callbacks, size_t n,
234 int success, int allow_synchronous_callback) {
ctiller58393c22015-01-07 14:03:30 -0800235 size_t i;
236 for (i = 0; i < n; i++) {
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700237 process_callback(callbacks + i, success, allow_synchronous_callback);
ctiller58393c22015-01-07 14:03:30 -0800238 }
239}
240
Craig Tillerf95e37f2015-02-18 15:15:29 -0800241static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
242 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800243 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800244 case NOT_READY:
245 /* There is no race if the descriptor is already ready, so we skip
246 the interlocked op in that case. As long as the app doesn't
247 try to set the same upcall twice (which it shouldn't) then
248 oldval should never be anything other than READY or NOT_READY. We
249 don't
250 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800251 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800252 /* swap was successful -- the closure will run after the next
253 set_ready call. NOTE: we don't have an ABA problem here,
254 since we should never have concurrent calls to the same
255 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700256 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800257 return;
258 }
259 /* swap was unsuccessful due to an intervening set_ready call.
260 Fall through to the READY code below */
261 case READY:
David Garcia Quintas07997b62015-05-28 13:51:50 -0700262 GPR_ASSERT(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800263 gpr_atm_rel_store(st, NOT_READY);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700264 process_callback(closure, !gpr_atm_acq_load(&fd->shutdown),
ctiller58393c22015-01-07 14:03:30 -0800265 allow_synchronous_callback);
266 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800267 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800268 /* upcallptr was set to a different closure. This is an error! */
269 gpr_log(GPR_ERROR,
270 "User called a notify_on function with a previous callback still "
271 "pending");
272 abort();
273 }
274 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
275 abort();
276}
277
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700278static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure **callbacks,
ctiller58393c22015-01-07 14:03:30 -0800279 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800280 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800281
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800282 switch (state) {
283 case READY:
284 /* duplicate ready, ignore */
285 return;
ctiller58393c22015-01-07 14:03:30 -0800286 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800287 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800288 /* swap was successful -- the closure will run after the next
289 notify_on call. */
290 return;
291 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800292 /* swap was unsuccessful due to an intervening set_ready call.
293 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800294 state = gpr_atm_acq_load(st);
295 default: /* waiting */
David Garcia Quintas07997b62015-05-28 13:51:50 -0700296 GPR_ASSERT(gpr_atm_no_barrier_load(st) != READY &&
297 gpr_atm_no_barrier_load(st) != NOT_READY);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700298 callbacks[(*ncallbacks)++] = (grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800299 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800300 return;
301 }
302}
303
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800304static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800305 int allow_synchronous_callback) {
306 /* only one set_ready can be active at once (but there may be a racing
307 notify_on) */
308 int success;
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700309 grpc_iomgr_closure* closure;
ctiller58393c22015-01-07 14:03:30 -0800310 size_t ncb = 0;
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700311
ctiller58393c22015-01-07 14:03:30 -0800312 gpr_mu_lock(&fd->set_state_mu);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700313 set_ready_locked(st, &closure, &ncb);
ctiller58393c22015-01-07 14:03:30 -0800314 gpr_mu_unlock(&fd->set_state_mu);
315 success = !gpr_atm_acq_load(&fd->shutdown);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700316 GPR_ASSERT(ncb <= 1);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700317 if (ncb > 0) {
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700318 process_callbacks(closure, ncb, success, allow_synchronous_callback);
David Garcia Quintasa30020f2015-05-27 19:21:01 -0700319 }
ctiller58393c22015-01-07 14:03:30 -0800320}
321
322void grpc_fd_shutdown(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800323 size_t ncb = 0;
324 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700325 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800326 gpr_atm_rel_store(&fd->shutdown, 1);
David Garcia Quintas2738ae82015-05-28 16:06:48 -0700327 set_ready_locked(&fd->readst, &fd->shutdown_closures[0], &ncb);
328 set_ready_locked(&fd->writest, &fd->shutdown_closures[0], &ncb);
ctiller58393c22015-01-07 14:03:30 -0800329 gpr_mu_unlock(&fd->set_state_mu);
David Garcia Quintas07997b62015-05-28 13:51:50 -0700330 GPR_ASSERT(ncb <= 2);
David Garcia Quintas1c762bd2015-05-31 17:04:43 -0700331 process_callbacks(fd->shutdown_closures[0], ncb, 0 /* GPR_FALSE */,
332 0 /* GPR_FALSE */);
ctiller58393c22015-01-07 14:03:30 -0800333}
334
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800335void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
336 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800337}
338
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800339void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
340 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800341}
342
343gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800344 gpr_uint32 read_mask, gpr_uint32 write_mask,
345 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700346 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800347 /* keep track of pollers that have requested our events, in case they change
348 */
Craig Tiller9ae76972015-05-31 13:58:24 -0700349 GRPC_FD_REF(fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800350
ctiller58393c22015-01-07 14:03:30 -0800351 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700352 /* if there is nobody polling for read, but we need to, then start doing so */
353 if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
354 fd->read_watcher = watcher;
355 mask |= read_mask;
356 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700357 /* if there is nobody polling for write, but we need to, then start doing so
358 */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700359 if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
360 fd->write_watcher = watcher;
361 mask |= write_mask;
362 }
363 /* if not polling, remember this watcher in case we need someone to later */
364 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700365 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700366 watcher->prev = watcher->next->prev;
367 watcher->next->prev = watcher->prev->next = watcher;
368 }
Craig Tiller7d413212015-02-09 08:00:02 -0800369 watcher->pollset = pollset;
370 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800371 gpr_mu_unlock(&fd->watcher_mu);
372
Craig Tiller886d7ec2015-05-14 16:18:42 -0700373 return mask;
ctiller58393c22015-01-07 14:03:30 -0800374}
375
Craig Tiller886d7ec2015-05-14 16:18:42 -0700376void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
377 int was_polling = 0;
378 int kick = 0;
379 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800380
Craig Tiller886d7ec2015-05-14 16:18:42 -0700381 gpr_mu_lock(&fd->watcher_mu);
382 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700383 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700384 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700385 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700386 fd->read_watcher = NULL;
387 }
388 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700389 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700390 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700391 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700392 fd->write_watcher = NULL;
393 }
394 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700395 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700396 watcher->next->prev = watcher->prev;
397 watcher->prev->next = watcher->next;
398 }
399 if (kick) {
400 maybe_wake_one_watcher_locked(fd);
401 }
402 gpr_mu_unlock(&fd->watcher_mu);
403
Craig Tiller9ae76972015-05-31 13:58:24 -0700404 GRPC_FD_UNREF(fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800405}
406
407void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
408 set_ready(fd, &fd->readst, allow_synchronous_callback);
409}
410
411void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
412 set_ready(fd, &fd->writest, allow_synchronous_callback);
413}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800414
Craig Tiller190d3602015-02-18 09:23:38 -0800415#endif