blob: 3509d021eec0176fd4d6793b3ffda09e2a43260d [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
44#include "src/core/iomgr/iomgr_internal.h"
45#include <grpc/support/alloc.h>
46#include <grpc/support/log.h>
47#include <grpc/support/useful.h>
48
Craig Tillerf95e37f2015-02-18 15:15:29 -080049enum descriptor_state {
50 NOT_READY = 0,
51 READY = 1
52}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080053
David Klempnerd1785242015-01-28 17:00:21 -080054/* We need to keep a freelist not because of any concerns of malloc performance
55 * but instead so that implementations with multiple threads in (for example)
56 * epoll_wait deal with the race between pollset removal and incoming poll
57 * notifications.
58 *
59 * The problem is that the poller ultimately holds a reference to this
60 * object, so it is very difficult to know when is safe to free it, at least
61 * without some expensive synchronization.
62 *
63 * If we keep the object freelisted, in the worst case losing this race just
64 * becomes a spurious read notification on a reused fd.
65 */
66/* TODO(klempner): We could use some form of polling generation count to know
67 * when these are safe to free. */
68/* TODO(klempner): Consider disabling freelisting if we don't have multiple
69 * threads in poll on the same fd */
70/* TODO(klempner): Batch these allocations to reduce fragmentation */
71static grpc_fd *fd_freelist = NULL;
72static gpr_mu fd_freelist_mu;
73
74static void freelist_fd(grpc_fd *fd) {
David Klempnerd1785242015-01-28 17:00:21 -080075 gpr_mu_lock(&fd_freelist_mu);
76 fd->freelist_next = fd_freelist;
77 fd_freelist = fd;
78 gpr_mu_unlock(&fd_freelist_mu);
79}
80
81static grpc_fd *alloc_fd(int fd) {
82 grpc_fd *r = NULL;
83 gpr_mu_lock(&fd_freelist_mu);
84 if (fd_freelist != NULL) {
85 r = fd_freelist;
86 fd_freelist = fd_freelist->freelist_next;
87 }
88 gpr_mu_unlock(&fd_freelist_mu);
89 if (r == NULL) {
90 r = gpr_malloc(sizeof(grpc_fd));
91 gpr_mu_init(&r->set_state_mu);
92 gpr_mu_init(&r->watcher_mu);
93 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -070094
David Klempnerd1785242015-01-28 17:00:21 -080095 gpr_atm_rel_store(&r->refst, 1);
Craig Tiller0fcd53c2015-02-18 15:10:53 -080096 gpr_atm_rel_store(&r->readst, NOT_READY);
97 gpr_atm_rel_store(&r->writest, NOT_READY);
David Klempnerd1785242015-01-28 17:00:21 -080098 gpr_atm_rel_store(&r->shutdown, 0);
99 r->fd = fd;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700100 r->inactive_watcher_root.next = r->inactive_watcher_root.prev =
101 &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800102 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700103 r->read_watcher = r->write_watcher = NULL;
David Klempnerd1785242015-01-28 17:00:21 -0800104 return r;
105}
106
107static void destroy(grpc_fd *fd) {
108 gpr_mu_destroy(&fd->set_state_mu);
109 gpr_mu_destroy(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800110 gpr_free(fd);
ctiller58393c22015-01-07 14:03:30 -0800111}
112
113static void ref_by(grpc_fd *fd, int n) {
Craig Tiller23139ae2015-02-17 15:46:13 -0800114 GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800115}
116
117static void unref_by(grpc_fd *fd, int n) {
Craig Tiller23139ae2015-02-17 15:46:13 -0800118 gpr_atm old = gpr_atm_full_fetch_add(&fd->refst, -n);
119 if (old == n) {
David Klempnerc6bccc22015-02-24 17:33:05 -0800120 close(fd->fd);
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700121 fd->on_done_iocb.cb = fd->on_done;
122 fd->on_done_iocb.cb_arg = fd->on_done_user_data;
123 fd->on_done_iocb.is_ext_managed = 1;
124 grpc_iomgr_add_callback(&fd->on_done_iocb);
David Klempnerd1785242015-01-28 17:00:21 -0800125 freelist_fd(fd);
126 grpc_iomgr_unref();
Craig Tiller23139ae2015-02-17 15:46:13 -0800127 } else {
128 GPR_ASSERT(old > n);
David Klempnerd1785242015-01-28 17:00:21 -0800129 }
130}
131
Craig Tiller7d413212015-02-09 08:00:02 -0800132void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); }
David Klempnerd1785242015-01-28 17:00:21 -0800133
134void grpc_fd_global_shutdown(void) {
135 while (fd_freelist != NULL) {
136 grpc_fd *fd = fd_freelist;
137 fd_freelist = fd_freelist->freelist_next;
ctiller58393c22015-01-07 14:03:30 -0800138 destroy(fd);
139 }
David Klempnerd1785242015-01-28 17:00:21 -0800140 gpr_mu_destroy(&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800141}
142
143static void do_nothing(void *ignored, int success) {}
144
145grpc_fd *grpc_fd_create(int fd) {
David Klempnerd1785242015-01-28 17:00:21 -0800146 grpc_fd *r = alloc_fd(fd);
ctiller58393c22015-01-07 14:03:30 -0800147 grpc_iomgr_ref();
ctiller58393c22015-01-07 14:03:30 -0800148 grpc_pollset_add_fd(grpc_backup_pollset(), r);
149 return r;
150}
151
152int grpc_fd_is_orphaned(grpc_fd *fd) {
153 return (gpr_atm_acq_load(&fd->refst) & 1) == 0;
154}
155
Craig Tiller886d7ec2015-05-14 16:18:42 -0700156static void maybe_wake_one_watcher_locked(grpc_fd *fd) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700157 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) {
158 grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset);
159 } else if (fd->read_watcher) {
160 grpc_pollset_force_kick(fd->read_watcher->pollset);
161 } else if (fd->write_watcher) {
162 grpc_pollset_force_kick(fd->write_watcher->pollset);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700163 }
164}
165
166static void maybe_wake_one_watcher(grpc_fd *fd) {
ctiller58393c22015-01-07 14:03:30 -0800167 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700168 maybe_wake_one_watcher_locked(fd);
169 gpr_mu_unlock(&fd->watcher_mu);
170}
171
Craig Tillerc95de722015-05-29 08:56:46 -0700172static void wake_all_watchers_locked(grpc_fd *fd) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700173 grpc_fd_watcher *watcher;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700174 for (watcher = fd->inactive_watcher_root.next;
175 watcher != &fd->inactive_watcher_root; watcher = watcher->next) {
Craig Tiller7d413212015-02-09 08:00:02 -0800176 grpc_pollset_force_kick(watcher->pollset);
ctiller58393c22015-01-07 14:03:30 -0800177 }
Craig Tiller354bf6d2015-05-18 10:18:03 -0700178 if (fd->read_watcher) {
179 grpc_pollset_force_kick(fd->read_watcher->pollset);
180 }
181 if (fd->write_watcher && fd->write_watcher != fd->read_watcher) {
182 grpc_pollset_force_kick(fd->write_watcher->pollset);
183 }
ctiller58393c22015-01-07 14:03:30 -0800184}
185
186void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) {
187 fd->on_done = on_done ? on_done : do_nothing;
188 fd->on_done_user_data = user_data;
David Klempnerc6bccc22015-02-24 17:33:05 -0800189 shutdown(fd->fd, SHUT_RDWR);
ctiller58393c22015-01-07 14:03:30 -0800190 ref_by(fd, 1); /* remove active status, but keep referenced */
Craig Tiller017912d2015-05-29 07:05:13 -0700191 gpr_mu_lock(&fd->watcher_mu);
Craig Tillerc95de722015-05-29 08:56:46 -0700192 wake_all_watchers_locked(fd);
Craig Tiller017912d2015-05-29 07:05:13 -0700193 gpr_mu_unlock(&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800194 unref_by(fd, 2); /* drop the reference */
195}
196
197/* increment refcount by two to avoid changing the orphan bit */
198void grpc_fd_ref(grpc_fd *fd) { ref_by(fd, 2); }
199
200void grpc_fd_unref(grpc_fd *fd) { unref_by(fd, 2); }
201
ctiller58393c22015-01-07 14:03:30 -0800202static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success,
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700203 int allow_synchronous_callback,
204 grpc_iomgr_closure *iocb) {
ctiller58393c22015-01-07 14:03:30 -0800205 if (allow_synchronous_callback) {
206 cb(arg, success);
207 } else {
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700208 /* !iocb: allocate -> managed by iomgr
209 * iocb: "iocb" holds an instance managed by fd_posix */
210 iocb = grpc_iomgr_cb_create(cb, arg, !iocb /* is_ext_managed */);
211 grpc_iomgr_add_delayed_callback(iocb, success);
ctiller58393c22015-01-07 14:03:30 -0800212 }
213}
214
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800215static void make_callbacks(grpc_iomgr_closure *callbacks, size_t n, int success,
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700216 int allow_synchronous_callback,
217 grpc_iomgr_closure *iocbs) {
ctiller58393c22015-01-07 14:03:30 -0800218 size_t i;
219 for (i = 0; i < n; i++) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800220 make_callback(callbacks[i].cb, callbacks[i].cb_arg, success,
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700221 allow_synchronous_callback, iocbs + i);
ctiller58393c22015-01-07 14:03:30 -0800222 }
223}
224
Craig Tillerf95e37f2015-02-18 15:15:29 -0800225static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure,
226 int allow_synchronous_callback) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800227 switch (gpr_atm_acq_load(st)) {
ctiller58393c22015-01-07 14:03:30 -0800228 case NOT_READY:
229 /* There is no race if the descriptor is already ready, so we skip
230 the interlocked op in that case. As long as the app doesn't
231 try to set the same upcall twice (which it shouldn't) then
232 oldval should never be anything other than READY or NOT_READY. We
233 don't
234 check for user error on the fast path. */
Craig Tillerf95e37f2015-02-18 15:15:29 -0800235 if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) {
ctiller58393c22015-01-07 14:03:30 -0800236 /* swap was successful -- the closure will run after the next
237 set_ready call. NOTE: we don't have an ABA problem here,
238 since we should never have concurrent calls to the same
239 notify_on function. */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700240 maybe_wake_one_watcher(fd);
ctiller58393c22015-01-07 14:03:30 -0800241 return;
242 }
243 /* swap was unsuccessful due to an intervening set_ready call.
244 Fall through to the READY code below */
245 case READY:
David Klempner466423b2015-03-11 15:00:46 -0700246 assert(gpr_atm_no_barrier_load(st) == READY);
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800247 gpr_atm_rel_store(st, NOT_READY);
Craig Tillerf95e37f2015-02-18 15:15:29 -0800248 make_callback(closure->cb, closure->cb_arg,
249 !gpr_atm_acq_load(&fd->shutdown),
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700250 allow_synchronous_callback, NULL);
ctiller58393c22015-01-07 14:03:30 -0800251 return;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800252 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800253 /* upcallptr was set to a different closure. This is an error! */
254 gpr_log(GPR_ERROR,
255 "User called a notify_on function with a previous callback still "
256 "pending");
257 abort();
258 }
259 gpr_log(GPR_ERROR, "Corrupt memory in &st->state");
260 abort();
261}
262
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800263static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure *callbacks,
ctiller58393c22015-01-07 14:03:30 -0800264 size_t *ncallbacks) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800265 gpr_intptr state = gpr_atm_acq_load(st);
ctiller58393c22015-01-07 14:03:30 -0800266
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800267 switch (state) {
268 case READY:
269 /* duplicate ready, ignore */
270 return;
ctiller58393c22015-01-07 14:03:30 -0800271 case NOT_READY:
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800272 if (gpr_atm_rel_cas(st, NOT_READY, READY)) {
ctiller58393c22015-01-07 14:03:30 -0800273 /* swap was successful -- the closure will run after the next
274 notify_on call. */
275 return;
276 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800277 /* swap was unsuccessful due to an intervening set_ready call.
278 Fall through to the WAITING code below */
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800279 state = gpr_atm_acq_load(st);
280 default: /* waiting */
David Klempner466423b2015-03-11 15:00:46 -0700281 assert(gpr_atm_no_barrier_load(st) != READY &&
282 gpr_atm_no_barrier_load(st) != NOT_READY);
Craig Tillerf95e37f2015-02-18 15:15:29 -0800283 callbacks[(*ncallbacks)++] = *(grpc_iomgr_closure *)state;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800284 gpr_atm_rel_store(st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800285 return;
286 }
287}
288
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800289static void set_ready(grpc_fd *fd, gpr_atm *st,
ctiller58393c22015-01-07 14:03:30 -0800290 int allow_synchronous_callback) {
291 /* only one set_ready can be active at once (but there may be a racing
292 notify_on) */
293 int success;
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800294 grpc_iomgr_closure cb;
ctiller58393c22015-01-07 14:03:30 -0800295 size_t ncb = 0;
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700296 grpc_iomgr_closure *ready_iocb;
ctiller58393c22015-01-07 14:03:30 -0800297 gpr_mu_lock(&fd->set_state_mu);
298 set_ready_locked(st, &cb, &ncb);
299 gpr_mu_unlock(&fd->set_state_mu);
300 success = !gpr_atm_acq_load(&fd->shutdown);
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700301 assert(ncb <= 1);
302 ready_iocb = grpc_iomgr_cb_create(cb.cb, cb.cb_arg, 0);
303 make_callbacks(&cb, ncb, success, allow_synchronous_callback, ready_iocb);
ctiller58393c22015-01-07 14:03:30 -0800304}
305
306void grpc_fd_shutdown(grpc_fd *fd) {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800307 grpc_iomgr_closure cb[2];
ctiller58393c22015-01-07 14:03:30 -0800308 size_t ncb = 0;
309 gpr_mu_lock(&fd->set_state_mu);
David Klempner466423b2015-03-11 15:00:46 -0700310 GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800311 gpr_atm_rel_store(&fd->shutdown, 1);
312 set_ready_locked(&fd->readst, cb, &ncb);
313 set_ready_locked(&fd->writest, cb, &ncb);
314 gpr_mu_unlock(&fd->set_state_mu);
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700315 assert(ncb <= 2);
316 make_callbacks(cb, ncb, 0, 0, fd->shutdown_iocbs);
ctiller58393c22015-01-07 14:03:30 -0800317}
318
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800319void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) {
320 notify_on(fd, &fd->readst, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800321}
322
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800323void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) {
324 notify_on(fd, &fd->writest, closure, 0);
ctiller58393c22015-01-07 14:03:30 -0800325}
326
327gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
Craig Tiller7d413212015-02-09 08:00:02 -0800328 gpr_uint32 read_mask, gpr_uint32 write_mask,
329 grpc_fd_watcher *watcher) {
Craig Tiller886d7ec2015-05-14 16:18:42 -0700330 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800331 /* keep track of pollers that have requested our events, in case they change
332 */
Craig Tiller59ea16f2015-02-18 16:18:08 -0800333 grpc_fd_ref(fd);
334
ctiller58393c22015-01-07 14:03:30 -0800335 gpr_mu_lock(&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700336 /* if there is nobody polling for read, but we need to, then start doing so */
337 if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) {
338 fd->read_watcher = watcher;
339 mask |= read_mask;
340 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700341 /* if there is nobody polling for write, but we need to, then start doing so
342 */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700343 if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) {
344 fd->write_watcher = watcher;
345 mask |= write_mask;
346 }
347 /* if not polling, remember this watcher in case we need someone to later */
348 if (mask == 0) {
Craig Tiller354bf6d2015-05-18 10:18:03 -0700349 watcher->next = &fd->inactive_watcher_root;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700350 watcher->prev = watcher->next->prev;
351 watcher->next->prev = watcher->prev->next = watcher;
352 }
Craig Tiller7d413212015-02-09 08:00:02 -0800353 watcher->pollset = pollset;
354 watcher->fd = fd;
ctiller58393c22015-01-07 14:03:30 -0800355 gpr_mu_unlock(&fd->watcher_mu);
356
Craig Tiller886d7ec2015-05-14 16:18:42 -0700357 return mask;
ctiller58393c22015-01-07 14:03:30 -0800358}
359
Craig Tiller886d7ec2015-05-14 16:18:42 -0700360void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
361 int was_polling = 0;
362 int kick = 0;
363 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800364
Craig Tiller886d7ec2015-05-14 16:18:42 -0700365 gpr_mu_lock(&fd->watcher_mu);
366 if (watcher == fd->read_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700367 /* remove read watcher, kick if we still need a read */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700368 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700369 kick = kick || !got_read;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700370 fd->read_watcher = NULL;
371 }
372 if (watcher == fd->write_watcher) {
Craig Tiller236d0982015-05-18 10:26:44 -0700373 /* remove write watcher, kick if we still need a write */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700374 was_polling = 1;
Craig Tiller8e50fe92015-05-18 10:45:04 -0700375 kick = kick || !got_write;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700376 fd->write_watcher = NULL;
377 }
378 if (!was_polling) {
Craig Tiller236d0982015-05-18 10:26:44 -0700379 /* remove from inactive list */
Craig Tiller886d7ec2015-05-14 16:18:42 -0700380 watcher->next->prev = watcher->prev;
381 watcher->prev->next = watcher->next;
382 }
383 if (kick) {
384 maybe_wake_one_watcher_locked(fd);
385 }
386 gpr_mu_unlock(&fd->watcher_mu);
387
388 grpc_fd_unref(fd);
ctiller58393c22015-01-07 14:03:30 -0800389}
390
391void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) {
392 set_ready(fd, &fd->readst, allow_synchronous_callback);
393}
394
395void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) {
396 set_ready(fd, &fd->writest, allow_synchronous_callback);
397}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800398
Craig Tiller190d3602015-02-18 09:23:38 -0800399#endif