blob: 65dff84a685d0c066148f4a88e76039b7b140985 [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
44#include "src/core/iomgr/iomgr_internal.h"
45#include <grpc/support/alloc.h>
46#include <grpc/support/log.h>
47#include <grpc/support/useful.h>
48
Craig Tillerf95e37f2015-02-18 15:15:29 -080049enum descriptor_state {
50 NOT_READY = 0,
51 READY = 1
52}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080053
David Klempnerd1785242015-01-28 17:00:21 -080054/* We need to keep a freelist not because of any concerns of malloc performance
55 * but instead so that implementations with multiple threads in (for example)
56 * epoll_wait deal with the race between pollset removal and incoming poll
57 * notifications.
58 *
59 * The problem is that the poller ultimately holds a reference to this
60 * object, so it is very difficult to know when is safe to free it, at least
61 * without some expensive synchronization.
62 *
63 * If we keep the object freelisted, in the worst case losing this race just
64 * becomes a spurious read notification on a reused fd.
65 */
66/* TODO(klempner): We could use some form of polling generation count to know
67 * when these are safe to free. */
68/* TODO(klempner): Consider disabling freelisting if we don't have multiple
69 * threads in poll on the same fd */
70/* TODO(klempner): Batch these allocations to reduce fragmentation */
71static grpc_fd *fd_freelist = NULL;
72static gpr_mu fd_freelist_mu;
73
74static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080075 gpr_mu_lock(&fd_freelist_mu);
76 fd->freelist_next = fd_freelist;
77 fd_freelist = fd;
78 gpr_mu_unlock(&fd_freelist_mu);
79}
80
81static grpc_fd *alloc_fd(int fd) {
82 grpc_fd *r = NULL;
83 gpr_mu_lock(&fd_freelist_mu);
84 if (fd_freelist != NULL) {
85 r = fd_freelist;
86 fd_freelist = fd_freelist->freelist_next;
87 }
88 gpr_mu_unlock(&fd_freelist_mu);
89 if (r == NULL) {
90 r = gpr_malloc(sizeof(grpc_fd));
91 gpr_mu_init(&r->set_state_mu);
92 gpr_mu_init(&r->watcher_mu);
93 }
94 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080095 gpr_atm_rel_store(&r->readst, NOT_READY);
96 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080097 gpr_atm_rel_store(&r->shutdown, 0);
98 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -070099 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
100 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800101 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700102 r->read_watcher = r->write_watcher = NULL;
David Klempnerd1785242015-01-28 17:00:21 -0800103 return r;
104}
105
106static void destroy(grpc_fd *fd) {
107 gpr_mu_destroy(&fd->set_state_mu);
108 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800109 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800110}
111
Craig Tiller9ae76972015-05-31 13:58:24 -0700112#ifdef GRPC_FD_REF_COUNT_DEBUG
113#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
114#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
115static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) {
116 gpr_log(GPR_DEBUG, "FD %d ref %d %d -> %d [%s; %s:%d]", fd->fd, n, fd->refst, fd->refst + n, reason, file, line);
117#else
118#define REF_BY(fd, n, reason) ref_by(fd, n)
119#define UNREF_BY(fd, n, reason) unref_by(fd, n)
ctiller58393c22015-01-07 14:03:30 -0800120static void ref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700121#endif
Craig Tiller23139ae2015-02-17 15:46:13 -0800122 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800123}
124
Craig Tiller9ae76972015-05-31 13:58:24 -0700125#ifdef GRPC_FD_REF_COUNT_DEBUG
126static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) {
127 gpr_atm old;
128 gpr_log(GPR_DEBUG, "FD %d unref %d %d -> %d [%s; %s:%d]", fd->fd, n, fd->refst, fd->refst - n, reason, file, line);
129#else
ctiller58393c22015-01-07 14:03:30 -0800130static void unref_by(grpc_fd *fd, int n) {
Craig Tiller9ae76972015-05-31 13:58:24 -0700131 gpr_atm old;
132#endif
133 old = gpr_atm_full_fetch_add(&fd->refst, -n);
Craig Tiller23139ae2015-02-17 15:46:13 -0800134 if (old == n) {
David Klempnerc6bccc22015-02-24 17:33:05 -0800135 close(fd->fd);
David Klempnerd1785242015-01-28 17:00:21 -0800136 grpc_iomgr_add_callback(fd->on_done, fd->on_done_user_data);
137 freelist_fd(fd);
138 grpc_iomgr_unref();
Craig Tiller23139ae2015-02-17 15:46:13 -0800139 } else {
140 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800141 }
142}
143
Craig Tiller7d413212015-02-09 08:00:02 -0800144void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800145
146void grpc_fd_global_shutdown(void) {
147 while (fd_freelist != NULL) {
148 grpc_fd *fd = fd_freelist;
149 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800150 destroy(fd);
151 }
David Klempnerd1785242015-01-28 17:00:21 -0800152 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800153}
154
155static void do_nothing(void *ignored, int success) {}
156
157grpc_fd *grpc_fd_create(int fd) {
David Klempnerd1785242015-01-28 17:00:21 -0800158 grpc_fd *r = alloc_fd(fd);
ctiller58393c22015-01-07 14:03:30 -0800159 grpc_iomgr_ref();
ctiller58393c22015-01-07 14:03:30 -0800160 return r;
161}
162
163int grpc_fd_is_orphaned(grpc_fd *fd) {
164 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
165}
166
Craig Tiller886d7ec2015-05-14 16:18:42 -0700167static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700168 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
169 grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
170 } else if (fd->read_watcher) {
171 grpc_pollset_force_kick(fd->read_watcher->pollset);
172 } else if (fd->write_watcher) {
173 grpc_pollset_force_kick(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700174 }
175}
176
177static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800178 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700179 maybe_wake_one_watcher_locked(fd);
180 gpr_mu_unlock(&fd->watcher_mu);
181}
182
Craig Tillerc95de722015-05-29 08:56:46 -0700183static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700184 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700185 for (watcher = fd->inactive_watcher_root.next;
186 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller7d413212015-02-09 08:00:02 -0800187 grpc_pollset_force_kick(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800188 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700189 if (fd->read_watcher) {
190 grpc_pollset_force_kick(fd->read_watcher->pollset);
191 }
192 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
193 grpc_pollset_force_kick(fd->write_watcher->pollset);
194 }
ctiller58393c22015-01-07 14:03:30 -0800195}
196
197void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) {
198 fd->on_done = on_done ? on_done : do_nothing;
199 fd->on_done_user_data = user_data;
David Klempnerc6bccc22015-02-24 17:33:05 -0800200 shutdown(fd->fd, SHUT_RDWR);
Craig Tiller9ae76972015-05-31 13:58:24 -0700201 REF_BY(fd, 1, "orphan"); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700202 gpr_mu_lock(&fd->watcher_mu);
Craig Tillerc95de722015-05-29 08:56:46 -0700203 wake_all_watchers_locked(fd);
Craig Tiller017912d2015-05-29 07:05:13 -0700204 gpr_mu_unlock(&fd->watcher_mu);
Craig Tiller9ae76972015-05-31 13:58:24 -0700205 UNREF_BY(fd, 2, "orphan"); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800206}
207
208/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700209#ifdef GRPC_FD_REF_COUNT_DEBUG
210void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) {
211 ref_by(fd, 2, reason, file, line);
212}
ctiller58393c22015-01-07 14:03:30 -0800213
Craig Tiller9ae76972015-05-31 13:58:24 -0700214void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line) {
215 unref_by(fd, 2, reason, file, line);
216}
217#else
218void grpc_fd_ref(grpc_fd *fd) {
219 ref_by(fd, 2);
220}
221
222void grpc_fd_unref(grpc_fd *fd) {
223 unref_by(fd, 2);
224}
225#endif
ctiller58393c22015-01-07 14:03:30 -0800226
ctiller58393c22015-01-07 14:03:30 -0800227static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success,
228 int allow_synchronous_callback) {
229 if (allow_synchronous_callback) {
230 cb(arg, success);
231 } else {
232 grpc_iomgr_add_delayed_callback(cb, arg, success);
233 }
234}
235
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800236static void make_callbacks(grpc_iomgr_closure *callbacks, size_t n, int success,
ctiller58393c22015-01-07 14:03:30 -0800237 int allow_synchronous_callback) {
238 size_t i;
239 for (i = 0; i < n; i++) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800240 make_callback(callbacks[i].cb, callbacks[i].cb_arg, success,
ctiller58393c22015-01-07 14:03:30 -0800241 allow_synchronous_callback);
242 }
243}
244
Craig Tillerf95e37f2015-02-18 15:15:29 -0800245static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
246 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800247 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800248 case NOT_READY:
249 /* There is no race if the descriptor is already ready, so we skip
250 the interlocked op in that case. As long as the app doesn't
251 try to set the same upcall twice (which it shouldn't) then
252 oldval should never be anything other than READY or NOT_READY. We
253 don't
254 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800255 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800256 /* swap was successful -- the closure will run after the next
257 set_ready call. NOTE: we don't have an ABA problem here,
258 since we should never have concurrent calls to the same
259 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700260 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800261 return;
262 }
263 /* swap was unsuccessful due to an intervening set_ready call.
264 Fall through to the READY code below */
265 case READY:
David Klempner466423b2015-03-11 15:00:46 -0700266 assert(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800267 gpr_atm_rel_store(st, NOT_READY);
Craig Tillerf95e37f2015-02-18 15:15:29 -0800268 make_callback(closure->cb, closure->cb_arg,
269 !gpr_atm_acq_load(&fd->shutdown),
ctiller58393c22015-01-07 14:03:30 -0800270 allow_synchronous_callback);
271 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800272 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800273 /* upcallptr was set to a different closure. This is an error! */
274 gpr_log(GPR_ERROR,
275 "User called a notify_on function with a previous callback still "
276 "pending");
277 abort();
278 }
279 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
280 abort();
281}
282
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800283static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure *callbacks,
ctiller58393c22015-01-07 14:03:30 -0800284 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800285 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800286
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800287 switch (state) {
288 case READY:
289 /* duplicate ready, ignore */
290 return;
ctiller58393c22015-01-07 14:03:30 -0800291 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800292 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800293 /* swap was successful -- the closure will run after the next
294 notify_on call. */
295 return;
296 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800297 /* swap was unsuccessful due to an intervening set_ready call.
298 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800299 state = gpr_atm_acq_load(st);
300 default: /* waiting */
David Klempner466423b2015-03-11 15:00:46 -0700301 assert(gpr_atm_no_barrier_load(st) != READY &&
302 gpr_atm_no_barrier_load(st) != NOT_READY);
Craig Tillerf95e37f2015-02-18 15:15:29 -0800303 callbacks[(*ncallbacks)++] = *(grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800304 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800305 return;
306 }
307}
308
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800309static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800310 int allow_synchronous_callback) {
311 /* only one set_ready can be active at once (but there may be a racing
312 notify_on) */
313 int success;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800314 grpc_iomgr_closure cb;
ctiller58393c22015-01-07 14:03:30 -0800315 size_t ncb = 0;
316 gpr_mu_lock(&fd->set_state_mu);
317 set_ready_locked(st, &cb, &ncb);
318 gpr_mu_unlock(&fd->set_state_mu);
319 success = !gpr_atm_acq_load(&fd->shutdown);
320 make_callbacks(&cb, ncb, success, allow_synchronous_callback);
321}
322
323void grpc_fd_shutdown(grpc_fd *fd) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800324 grpc_iomgr_closure cb[2];
ctiller58393c22015-01-07 14:03:30 -0800325 size_t ncb = 0;
326 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700327 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800328 gpr_atm_rel_store(&fd->shutdown, 1);
329 set_ready_locked(&fd->readst, cb, &ncb);
330 set_ready_locked(&fd->writest, cb, &ncb);
331 gpr_mu_unlock(&fd->set_state_mu);
332 make_callbacks(cb, ncb, 0, 0);
333}
334
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800335void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
336 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800337}
338
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800339void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
340 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800341}
342
343gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800344 gpr_uint32 read_mask, gpr_uint32 write_mask,
345 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700346 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800347 /* keep track of pollers that have requested our events, in case they change
348 */
Craig Tiller9ae76972015-05-31 13:58:24 -0700349 GRPC_FD_REF(fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800350
ctiller58393c22015-01-07 14:03:30 -0800351 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700352 /* if there is nobody polling for read, but we need to, then start doing so */
353 if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
354 fd->read_watcher = watcher;
355 mask |= read_mask;
356 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700357 /* if there is nobody polling for write, but we need to, then start doing so
358 */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700359 if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
360 fd->write_watcher = watcher;
361 mask |= write_mask;
362 }
363 /* if not polling, remember this watcher in case we need someone to later */
364 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700365 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700366 watcher->prev = watcher->next->prev;
367 watcher->next->prev = watcher->prev->next = watcher;
368 }
Craig Tiller7d413212015-02-09 08:00:02 -0800369 watcher->pollset = pollset;
370 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800371 gpr_mu_unlock(&fd->watcher_mu);
372
Craig Tiller886d7ec2015-05-14 16:18:42 -0700373 return mask;
ctiller58393c22015-01-07 14:03:30 -0800374}
375
Craig Tiller886d7ec2015-05-14 16:18:42 -0700376void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
377 int was_polling = 0;
378 int kick = 0;
379 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800380
Craig Tiller886d7ec2015-05-14 16:18:42 -0700381 gpr_mu_lock(&fd->watcher_mu);
382 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700383 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700384 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700385 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700386 fd->read_watcher = NULL;
387 }
388 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700389 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700390 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700391 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700392 fd->write_watcher = NULL;
393 }
394 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700395 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700396 watcher->next->prev = watcher->prev;
397 watcher->prev->next = watcher->next;
398 }
399 if (kick) {
400 maybe_wake_one_watcher_locked(fd);
401 }
402 gpr_mu_unlock(&fd->watcher_mu);
403
Craig Tiller9ae76972015-05-31 13:58:24 -0700404 GRPC_FD_UNREF(fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800405}
406
407void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
408 set_ready(fd, &fd->readst, allow_synchronous_callback);
409}
410
411void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
412 set_ready(fd, &fd->writest, allow_synchronous_callback);
413}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800414
Craig Tiller190d3602015-02-18 09:23:38 -0800415#endif