blob: 8ae63e2a89a1ef5a9dc109e18557f062e85a7b3c [file] [log] [blame]
ctiller58393c22015-01-07 14:03:30 -08001/*
2 *
Craig Tiller06059952015-02-18 08:34:56 -08003 * Copyright 2015, Google Inc.
ctiller58393c22015-01-07 14:03:30 -08004 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 * * Neither the name of Google Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 */
33
Craig Tillerd14a1a52015-01-21 15:26:29 -080034#include <grpc/support/port_platform.h>
35
36#ifdef GPR_POSIX_SOCKET
37
ctiller58393c22015-01-07 14:03:30 -080038#include "src/core/iomgr/fd_posix.h"
39
40#include <assert.h>
David Klempnerc6bccc22015-02-24 17:33:05 -080041#include <sys/socket.h>
ctiller58393c22015-01-07 14:03:30 -080042#include <unistd.h>
43
ctiller58393c22015-01-07 14:03:30 -080044#include <grpc/support/alloc.h>
45#include <grpc/support/log.h>
46#include <grpc/support/useful.h>
47
Craig Tiller45724b32015-09-22 10:42:19 -070048enum descriptor_state
49{
Craig Tillerf95e37f2015-02-18 15:15:29 -080050 NOT_READY = 0,
51 READY = 1
Craig Tiller45724b32015-09-22 10:42:19 -070052}; /* or a pointer to a closure to call */
ctiller58393c22015-01-07 14:03:30 -080053
David Klempnerd1785242015-01-28 17:00:21 -080054/* We need to keep a freelist not because of any concerns of malloc performance
55 * but instead so that implementations with multiple threads in (for example)
56 * epoll_wait deal with the race between pollset removal and incoming poll
57 * notifications.
58 *
59 * The problem is that the poller ultimately holds a reference to this
60 * object, so it is very difficult to know when is safe to free it, at least
61 * without some expensive synchronization.
62 *
63 * If we keep the object freelisted, in the worst case losing this race just
64 * becomes a spurious read notification on a reused fd.
65 */
66/* TODO(klempner): We could use some form of polling generation count to know
67 * when these are safe to free. */
68/* TODO(klempner): Consider disabling freelisting if we don't have multiple
69 * threads in poll on the same fd */
70/* TODO(klempner): Batch these allocations to reduce fragmentation */
71static grpc_fd *fd_freelist = NULL;
72static gpr_mu fd_freelist_mu;
73
Craig Tiller45724b32015-09-22 10:42:19 -070074static void
75freelist_fd (grpc_fd * fd)
76{
77 gpr_mu_lock (&fd_freelist_mu);
David Klempnerd1785242015-01-28 17:00:21 -080078 fd->freelist_next = fd_freelist;
79 fd_freelist = fd;
Craig Tiller45724b32015-09-22 10:42:19 -070080 grpc_iomgr_unregister_object (&fd->iomgr_object);
81 gpr_mu_unlock (&fd_freelist_mu);
David Klempnerd1785242015-01-28 17:00:21 -080082}
83
Craig Tiller45724b32015-09-22 10:42:19 -070084static grpc_fd *
85alloc_fd (int fd)
86{
David Klempnerd1785242015-01-28 17:00:21 -080087 grpc_fd *r = NULL;
Craig Tiller45724b32015-09-22 10:42:19 -070088 gpr_mu_lock (&fd_freelist_mu);
89 if (fd_freelist != NULL)
90 {
91 r = fd_freelist;
92 fd_freelist = fd_freelist->freelist_next;
93 }
94 gpr_mu_unlock (&fd_freelist_mu);
95 if (r == NULL)
96 {
97 r = gpr_malloc (sizeof (grpc_fd));
98 gpr_mu_init (&r->set_state_mu);
99 gpr_mu_init (&r->watcher_mu);
100 }
David Garcia Quintas5f228f52015-05-26 19:58:50 -0700101
Craig Tiller45724b32015-09-22 10:42:19 -0700102 gpr_atm_rel_store (&r->refst, 1);
103 gpr_atm_rel_store (&r->readst, NOT_READY);
104 gpr_atm_rel_store (&r->writest, NOT_READY);
105 gpr_atm_rel_store (&r->shutdown, 0);
David Klempnerd1785242015-01-28 17:00:21 -0800106 r->fd = fd;
Craig Tiller45724b32015-09-22 10:42:19 -0700107 r->inactive_watcher_root.next = r->inactive_watcher_root.prev = &r->inactive_watcher_root;
David Klempnerd1785242015-01-28 17:00:21 -0800108 r->freelist_next = NULL;
Craig Tiller886d7ec2015-05-14 16:18:42 -0700109 r->read_watcher = r->write_watcher = NULL;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700110 r->on_done_closure = NULL;
Craig Tiller0613e582015-07-30 11:55:43 -0700111 r->closed = 0;
David Klempnerd1785242015-01-28 17:00:21 -0800112 return r;
113}
114
Craig Tiller45724b32015-09-22 10:42:19 -0700115static void
116destroy (grpc_fd * fd)
117{
118 gpr_mu_destroy (&fd->set_state_mu);
119 gpr_mu_destroy (&fd->watcher_mu);
120 gpr_free (fd);
ctiller58393c22015-01-07 14:03:30 -0800121}
122
Craig Tiller9ae76972015-05-31 13:58:24 -0700123#ifdef GRPC_FD_REF_COUNT_DEBUG
124#define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__)
125#define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__)
Craig Tiller45724b32015-09-22 10:42:19 -0700126static void
127ref_by (grpc_fd * fd, int n, const char *reason, const char *file, int line)
128{
129 gpr_log (GPR_DEBUG, "FD %d %p ref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load (&fd->refst), gpr_atm_no_barrier_load (&fd->refst) + n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700130#else
131#define REF_BY(fd, n, reason) ref_by(fd, n)
132#define UNREF_BY(fd, n, reason) unref_by(fd, n)
Craig Tiller45724b32015-09-22 10:42:19 -0700133static void
134ref_by (grpc_fd * fd, int n)
135{
Craig Tiller9ae76972015-05-31 13:58:24 -0700136#endif
Craig Tiller45724b32015-09-22 10:42:19 -0700137 GPR_ASSERT (gpr_atm_no_barrier_fetch_add (&fd->refst, n) > 0);
ctiller58393c22015-01-07 14:03:30 -0800138}
139
Craig Tiller9ae76972015-05-31 13:58:24 -0700140#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller45724b32015-09-22 10:42:19 -0700141static void
142unref_by (grpc_fd * fd, int n, const char *reason, const char *file, int line)
143{
Craig Tiller9ae76972015-05-31 13:58:24 -0700144 gpr_atm old;
Craig Tiller45724b32015-09-22 10:42:19 -0700145 gpr_log (GPR_DEBUG, "FD %d %p unref %d %d -> %d [%s; %s:%d]", fd->fd, fd, n, gpr_atm_no_barrier_load (&fd->refst), gpr_atm_no_barrier_load (&fd->refst) - n, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700146#else
Craig Tiller45724b32015-09-22 10:42:19 -0700147static void
148unref_by (grpc_fd * fd, int n)
149{
Craig Tiller9ae76972015-05-31 13:58:24 -0700150 gpr_atm old;
151#endif
Craig Tiller45724b32015-09-22 10:42:19 -0700152 old = gpr_atm_full_fetch_add (&fd->refst, -n);
153 if (old == n)
154 {
155 freelist_fd (fd);
156 }
157 else
158 {
159 GPR_ASSERT (old > n);
160 }
David Klempnerd1785242015-01-28 17:00:21 -0800161}
162
Craig Tiller45724b32015-09-22 10:42:19 -0700163void
164grpc_fd_global_init (void)
165{
166 gpr_mu_init (&fd_freelist_mu);
ctiller58393c22015-01-07 14:03:30 -0800167}
168
Craig Tiller45724b32015-09-22 10:42:19 -0700169void
170grpc_fd_global_shutdown (void)
171{
172 gpr_mu_lock (&fd_freelist_mu);
173 gpr_mu_unlock (&fd_freelist_mu);
174 while (fd_freelist != NULL)
175 {
176 grpc_fd *fd = fd_freelist;
177 fd_freelist = fd_freelist->freelist_next;
178 destroy (fd);
179 }
180 gpr_mu_destroy (&fd_freelist_mu);
181}
182
183grpc_fd *
184grpc_fd_create (int fd, const char *name)
185{
186 grpc_fd *r = alloc_fd (fd);
187 grpc_iomgr_register_object (&r->iomgr_object, name);
ctiller58393c22015-01-07 14:03:30 -0800188 return r;
189}
190
Craig Tiller45724b32015-09-22 10:42:19 -0700191int
192grpc_fd_is_orphaned (grpc_fd * fd)
193{
194 return (gpr_atm_acq_load (&fd->refst) & 1) == 0;
ctiller58393c22015-01-07 14:03:30 -0800195}
196
Craig Tiller45724b32015-09-22 10:42:19 -0700197static void
198pollset_kick_locked (grpc_pollset * pollset)
199{
200 gpr_mu_lock (GRPC_POLLSET_MU (pollset));
201 grpc_pollset_kick (pollset, NULL);
202 gpr_mu_unlock (GRPC_POLLSET_MU (pollset));
Craig Tiller5ddbb9d2015-07-29 15:58:11 -0700203}
204
Craig Tiller45724b32015-09-22 10:42:19 -0700205static void
206maybe_wake_one_watcher_locked (grpc_fd * fd)
207{
208 if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root)
209 {
210 pollset_kick_locked (fd->inactive_watcher_root.next->pollset);
211 }
212 else if (fd->read_watcher)
213 {
214 pollset_kick_locked (fd->read_watcher->pollset);
215 }
216 else if (fd->write_watcher)
217 {
218 pollset_kick_locked (fd->write_watcher->pollset);
219 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700220}
221
Craig Tiller45724b32015-09-22 10:42:19 -0700222static void
223maybe_wake_one_watcher (grpc_fd * fd)
224{
225 gpr_mu_lock (&fd->watcher_mu);
226 maybe_wake_one_watcher_locked (fd);
227 gpr_mu_unlock (&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700228}
229
Craig Tiller45724b32015-09-22 10:42:19 -0700230static void
231wake_all_watchers_locked (grpc_fd * fd)
232{
Craig Tiller886d7ec2015-05-14 16:18:42 -0700233 grpc_fd_watcher *watcher;
Craig Tiller45724b32015-09-22 10:42:19 -0700234 for (watcher = fd->inactive_watcher_root.next; watcher != &fd->inactive_watcher_root; watcher = watcher->next)
235 {
236 pollset_kick_locked (watcher->pollset);
237 }
238 if (fd->read_watcher)
239 {
240 pollset_kick_locked (fd->read_watcher->pollset);
241 }
242 if (fd->write_watcher && fd->write_watcher != fd->read_watcher)
243 {
244 pollset_kick_locked (fd->write_watcher->pollset);
245 }
ctiller58393c22015-01-07 14:03:30 -0800246}
247
Craig Tiller45724b32015-09-22 10:42:19 -0700248static int
249has_watchers (grpc_fd * fd)
250{
251 return fd->read_watcher != NULL || fd->write_watcher != NULL || fd->inactive_watcher_root.next != &fd->inactive_watcher_root;
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700252}
253
Craig Tiller45724b32015-09-22 10:42:19 -0700254void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700255grpc_fd_orphan (grpc_exec_ctx * exec_ctx, grpc_fd * fd, grpc_closure * on_done, const char *reason)
Craig Tiller45724b32015-09-22 10:42:19 -0700256{
Craig Tiller0317b3d2015-06-01 21:57:03 -0700257 fd->on_done_closure = on_done;
Craig Tiller45724b32015-09-22 10:42:19 -0700258 shutdown (fd->fd, SHUT_RDWR);
259 gpr_mu_lock (&fd->watcher_mu);
260 REF_BY (fd, 1, reason); /* remove active status, but keep referenced */
261 if (!has_watchers (fd))
262 {
263 fd->closed = 1;
264 close (fd->fd);
265 grpc_closure_list_add (closure_list, fd->on_done_closure, 1);
266 }
267 else
268 {
269 wake_all_watchers_locked (fd);
270 }
271 gpr_mu_unlock (&fd->watcher_mu);
272 UNREF_BY (fd, 2, reason); /* drop the reference */
ctiller58393c22015-01-07 14:03:30 -0800273}
274
275/* increment refcount by two to avoid changing the orphan bit */
Craig Tiller9ae76972015-05-31 13:58:24 -0700276#ifdef GRPC_FD_REF_COUNT_DEBUG
Craig Tiller45724b32015-09-22 10:42:19 -0700277void
278grpc_fd_ref (grpc_fd * fd, const char *reason, const char *file, int line)
279{
280 ref_by (fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700281}
ctiller58393c22015-01-07 14:03:30 -0800282
Craig Tiller45724b32015-09-22 10:42:19 -0700283void
284grpc_fd_unref (grpc_fd * fd, const char *reason, const char *file, int line)
285{
286 unref_by (fd, 2, reason, file, line);
Craig Tiller9ae76972015-05-31 13:58:24 -0700287}
288#else
Craig Tiller45724b32015-09-22 10:42:19 -0700289void
290grpc_fd_ref (grpc_fd * fd)
291{
292 ref_by (fd, 2);
293}
Craig Tiller9ae76972015-05-31 13:58:24 -0700294
Craig Tiller45724b32015-09-22 10:42:19 -0700295void
296grpc_fd_unref (grpc_fd * fd)
297{
298 unref_by (fd, 2);
299}
Craig Tiller9ae76972015-05-31 13:58:24 -0700300#endif
ctiller58393c22015-01-07 14:03:30 -0800301
Craig Tiller45724b32015-09-22 10:42:19 -0700302static void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700303notify_on (grpc_exec_ctx * exec_ctx, grpc_fd * fd, gpr_atm * st, grpc_closure * closure)
Craig Tiller45724b32015-09-22 10:42:19 -0700304{
305 switch (gpr_atm_acq_load (st))
306 {
ctiller58393c22015-01-07 14:03:30 -0800307 case NOT_READY:
308 /* There is no race if the descriptor is already ready, so we skip
309 the interlocked op in that case. As long as the app doesn't
310 try to set the same upcall twice (which it shouldn't) then
311 oldval should never be anything other than READY or NOT_READY. We
312 don't
313 check for user error on the fast path. */
Craig Tiller45724b32015-09-22 10:42:19 -0700314 if (gpr_atm_rel_cas (st, NOT_READY, (gpr_intptr) closure))
315 {
316 /* swap was successful -- the closure will run after the next
317 set_ready call. NOTE: we don't have an ABA problem here,
318 since we should never have concurrent calls to the same
319 notify_on function. */
320 maybe_wake_one_watcher (fd);
321 return;
322 }
323 /* swap was unsuccessful due to an intervening set_ready call.
324 Fall through to the READY code below */
ctiller58393c22015-01-07 14:03:30 -0800325 case READY:
Craig Tiller45724b32015-09-22 10:42:19 -0700326 GPR_ASSERT (gpr_atm_no_barrier_load (st) == READY);
327 gpr_atm_rel_store (st, NOT_READY);
328 grpc_closure_list_add (closure_list, closure, !gpr_atm_acq_load (&fd->shutdown));
ctiller58393c22015-01-07 14:03:30 -0800329 return;
Craig Tiller45724b32015-09-22 10:42:19 -0700330 default: /* WAITING */
ctiller58393c22015-01-07 14:03:30 -0800331 /* upcallptr was set to a different closure. This is an error! */
Craig Tiller45724b32015-09-22 10:42:19 -0700332 gpr_log (GPR_ERROR, "User called a notify_on function with a previous callback still " "pending");
333 abort ();
334 }
335 gpr_log (GPR_ERROR, "Corrupt memory in &st->state");
336 abort ();
ctiller58393c22015-01-07 14:03:30 -0800337}
338
Craig Tiller45724b32015-09-22 10:42:19 -0700339static void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700340set_ready_locked (grpc_exec_ctx * exec_ctx, grpc_fd * fd, gpr_atm * st)
Craig Tiller45724b32015-09-22 10:42:19 -0700341{
342 gpr_intptr state = gpr_atm_acq_load (st);
ctiller58393c22015-01-07 14:03:30 -0800343
Craig Tiller45724b32015-09-22 10:42:19 -0700344 switch (state)
345 {
Craig Tiller0fcd53c2015-02-18 15:10:53 -0800346 case READY:
347 /* duplicate ready, ignore */
348 return;
ctiller58393c22015-01-07 14:03:30 -0800349 case NOT_READY:
Craig Tiller45724b32015-09-22 10:42:19 -0700350 if (gpr_atm_rel_cas (st, NOT_READY, READY))
351 {
352 /* swap was successful -- the closure will run after the next
353 notify_on call. */
354 return;
355 }
Craig Tillerf95e37f2015-02-18 15:15:29 -0800356 /* swap was unsuccessful due to an intervening set_ready call.
357 Fall through to the WAITING code below */
Craig Tiller45724b32015-09-22 10:42:19 -0700358 state = gpr_atm_acq_load (st);
359 default: /* waiting */
360 GPR_ASSERT (gpr_atm_no_barrier_load (st) != READY && gpr_atm_no_barrier_load (st) != NOT_READY);
361 grpc_closure_list_add (closure_list, (grpc_closure *) state, !gpr_atm_acq_load (&fd->shutdown));
362 gpr_atm_rel_store (st, NOT_READY);
ctiller58393c22015-01-07 14:03:30 -0800363 return;
Craig Tiller45724b32015-09-22 10:42:19 -0700364 }
ctiller58393c22015-01-07 14:03:30 -0800365}
366
Craig Tiller45724b32015-09-22 10:42:19 -0700367static void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700368set_ready (grpc_exec_ctx * exec_ctx, grpc_fd * fd, gpr_atm * st)
Craig Tiller45724b32015-09-22 10:42:19 -0700369{
ctiller58393c22015-01-07 14:03:30 -0800370 /* only one set_ready can be active at once (but there may be a racing
371 notify_on) */
Craig Tiller45724b32015-09-22 10:42:19 -0700372 gpr_mu_lock (&fd->set_state_mu);
373 set_ready_locked (fd, st, closure_list);
374 gpr_mu_unlock (&fd->set_state_mu);
ctiller58393c22015-01-07 14:03:30 -0800375}
376
Craig Tiller45724b32015-09-22 10:42:19 -0700377void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700378grpc_fd_shutdown (grpc_exec_ctx * exec_ctx, grpc_fd * fd)
Craig Tiller45724b32015-09-22 10:42:19 -0700379{
380 gpr_mu_lock (&fd->set_state_mu);
381 GPR_ASSERT (!gpr_atm_no_barrier_load (&fd->shutdown));
382 gpr_atm_rel_store (&fd->shutdown, 1);
383 set_ready_locked (fd, &fd->readst, closure_list);
384 set_ready_locked (fd, &fd->writest, closure_list);
385 gpr_mu_unlock (&fd->set_state_mu);
ctiller58393c22015-01-07 14:03:30 -0800386}
387
Craig Tiller45724b32015-09-22 10:42:19 -0700388void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700389grpc_fd_notify_on_read (grpc_exec_ctx * exec_ctx, grpc_fd * fd, grpc_closure * closure)
Craig Tiller45724b32015-09-22 10:42:19 -0700390{
391 notify_on (fd, &fd->readst, closure, closure_list);
ctiller58393c22015-01-07 14:03:30 -0800392}
393
Craig Tiller45724b32015-09-22 10:42:19 -0700394void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700395grpc_fd_notify_on_write (grpc_exec_ctx * exec_ctx, grpc_fd * fd, grpc_closure * closure)
Craig Tiller45724b32015-09-22 10:42:19 -0700396{
397 notify_on (fd, &fd->writest, closure, closure_list);
ctiller58393c22015-01-07 14:03:30 -0800398}
399
Craig Tiller45724b32015-09-22 10:42:19 -0700400gpr_uint32
401grpc_fd_begin_poll (grpc_fd * fd, grpc_pollset * pollset, gpr_uint32 read_mask, gpr_uint32 write_mask, grpc_fd_watcher * watcher)
402{
Craig Tiller886d7ec2015-05-14 16:18:42 -0700403 gpr_uint32 mask = 0;
ctiller58393c22015-01-07 14:03:30 -0800404 /* keep track of pollers that have requested our events, in case they change
405 */
Craig Tiller45724b32015-09-22 10:42:19 -0700406 GRPC_FD_REF (fd, "poll");
Craig Tiller59ea16f2015-02-18 16:18:08 -0800407
Craig Tiller45724b32015-09-22 10:42:19 -0700408 gpr_mu_lock (&fd->watcher_mu);
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700409 /* if we are shutdown, then don't add to the watcher set */
Craig Tiller45724b32015-09-22 10:42:19 -0700410 if (gpr_atm_no_barrier_load (&fd->shutdown))
411 {
412 watcher->fd = NULL;
413 watcher->pollset = NULL;
414 gpr_mu_unlock (&fd->watcher_mu);
415 GRPC_FD_UNREF (fd, "poll");
416 return 0;
417 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700418 /* if there is nobody polling for read, but we need to, then start doing so */
Craig Tiller45724b32015-09-22 10:42:19 -0700419 if (read_mask && !fd->read_watcher && (gpr_uintptr) gpr_atm_acq_load (&fd->readst) > READY)
420 {
421 fd->read_watcher = watcher;
422 mask |= read_mask;
423 }
Craig Tiller8e50fe92015-05-18 10:45:04 -0700424 /* if there is nobody polling for write, but we need to, then start doing so
425 */
Craig Tiller45724b32015-09-22 10:42:19 -0700426 if (write_mask && !fd->write_watcher && (gpr_uintptr) gpr_atm_acq_load (&fd->writest) > READY)
427 {
428 fd->write_watcher = watcher;
429 mask |= write_mask;
430 }
Craig Tiller886d7ec2015-05-14 16:18:42 -0700431 /* if not polling, remember this watcher in case we need someone to later */
Craig Tiller45724b32015-09-22 10:42:19 -0700432 if (mask == 0)
433 {
434 watcher->next = &fd->inactive_watcher_root;
435 watcher->prev = watcher->next->prev;
436 watcher->next->prev = watcher->prev->next = watcher;
437 }
Craig Tiller7d413212015-02-09 08:00:02 -0800438 watcher->pollset = pollset;
439 watcher->fd = fd;
Craig Tiller45724b32015-09-22 10:42:19 -0700440 gpr_mu_unlock (&fd->watcher_mu);
ctiller58393c22015-01-07 14:03:30 -0800441
Craig Tiller886d7ec2015-05-14 16:18:42 -0700442 return mask;
ctiller58393c22015-01-07 14:03:30 -0800443}
444
Craig Tiller45724b32015-09-22 10:42:19 -0700445void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700446grpc_fd_end_poll (grpc_exec_ctx * exec_ctx, grpc_fd_watcher * watcher, int got_read, int got_write)
Craig Tiller45724b32015-09-22 10:42:19 -0700447{
Craig Tiller886d7ec2015-05-14 16:18:42 -0700448 int was_polling = 0;
449 int kick = 0;
450 grpc_fd *fd = watcher->fd;
Craig Tiller59ea16f2015-02-18 16:18:08 -0800451
Craig Tiller45724b32015-09-22 10:42:19 -0700452 if (fd == NULL)
453 {
454 return;
455 }
Craig Tiller8b6cb8d2015-06-26 08:08:35 -0700456
Craig Tiller45724b32015-09-22 10:42:19 -0700457 gpr_mu_lock (&fd->watcher_mu);
458 if (watcher == fd->read_watcher)
459 {
460 /* remove read watcher, kick if we still need a read */
461 was_polling = 1;
462 kick = kick || !got_read;
463 fd->read_watcher = NULL;
464 }
465 if (watcher == fd->write_watcher)
466 {
467 /* remove write watcher, kick if we still need a write */
468 was_polling = 1;
469 kick = kick || !got_write;
470 fd->write_watcher = NULL;
471 }
472 if (!was_polling)
473 {
474 /* remove from inactive list */
475 watcher->next->prev = watcher->prev;
476 watcher->prev->next = watcher->next;
477 }
478 if (kick)
479 {
480 maybe_wake_one_watcher_locked (fd);
481 }
482 if (grpc_fd_is_orphaned (fd) && !has_watchers (fd) && !fd->closed)
483 {
484 fd->closed = 1;
485 close (fd->fd);
486 grpc_closure_list_add (closure_list, fd->on_done_closure, 1);
487 }
488 gpr_mu_unlock (&fd->watcher_mu);
Craig Tiller886d7ec2015-05-14 16:18:42 -0700489
Craig Tiller45724b32015-09-22 10:42:19 -0700490 GRPC_FD_UNREF (fd, "poll");
ctiller58393c22015-01-07 14:03:30 -0800491}
492
Craig Tiller45724b32015-09-22 10:42:19 -0700493void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700494grpc_fd_become_readable (grpc_exec_ctx * exec_ctx, grpc_fd * fd)
Craig Tiller45724b32015-09-22 10:42:19 -0700495{
496 set_ready (fd, &fd->readst, closure_list);
ctiller58393c22015-01-07 14:03:30 -0800497}
498
Craig Tiller45724b32015-09-22 10:42:19 -0700499void
Craig Tiller1be70cc2015-09-22 10:45:28 -0700500grpc_fd_become_writable (grpc_exec_ctx * exec_ctx, grpc_fd * fd)
Craig Tiller45724b32015-09-22 10:42:19 -0700501{
502 set_ready (fd, &fd->writest, closure_list);
ctiller58393c22015-01-07 14:03:30 -0800503}
Craig Tillerd14a1a52015-01-21 15:26:29 -0800504
Craig Tiller190d3602015-02-18 09:23:38 -0800505#endif