ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 1 | /* |
| 2 | * |
Craig Tiller | 0605995 | 2015-02-18 08:34:56 -0800 | [diff] [blame] | 3 | * Copyright 2015, Google Inc. |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
| 6 | * Redistribution and use in source and binary forms, with or without |
| 7 | * modification, are permitted provided that the following conditions are |
| 8 | * met: |
| 9 | * |
| 10 | * * Redistributions of source code must retain the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer. |
| 12 | * * Redistributions in binary form must reproduce the above |
| 13 | * copyright notice, this list of conditions and the following disclaimer |
| 14 | * in the documentation and/or other materials provided with the |
| 15 | * distribution. |
| 16 | * * Neither the name of Google Inc. nor the names of its |
| 17 | * contributors may be used to endorse or promote products derived from |
| 18 | * this software without specific prior written permission. |
| 19 | * |
| 20 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 21 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 22 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 23 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 24 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 25 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 26 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 27 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 28 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 29 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 30 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 31 | * |
| 32 | */ |
| 33 | |
Craig Tiller | d14a1a5 | 2015-01-21 15:26:29 -0800 | [diff] [blame] | 34 | #include <grpc/support/port_platform.h> |
| 35 | |
| 36 | #ifdef GPR_POSIX_SOCKET |
| 37 | |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 38 | #include "src/core/iomgr/fd_posix.h" |
| 39 | |
| 40 | #include <assert.h> |
David Klempner | c6bccc2 | 2015-02-24 17:33:05 -0800 | [diff] [blame] | 41 | #include <sys/socket.h> |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 42 | #include <unistd.h> |
| 43 | |
| 44 | #include "src/core/iomgr/iomgr_internal.h" |
| 45 | #include <grpc/support/alloc.h> |
| 46 | #include <grpc/support/log.h> |
| 47 | #include <grpc/support/useful.h> |
| 48 | |
Craig Tiller | f95e37f | 2015-02-18 15:15:29 -0800 | [diff] [blame] | 49 | enum descriptor_state { |
| 50 | NOT_READY = 0, |
| 51 | READY = 1 |
| 52 | }; /* or a pointer to a closure to call */ |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 53 | |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 54 | /* We need to keep a freelist not because of any concerns of malloc performance |
| 55 | * but instead so that implementations with multiple threads in (for example) |
| 56 | * epoll_wait deal with the race between pollset removal and incoming poll |
| 57 | * notifications. |
| 58 | * |
| 59 | * The problem is that the poller ultimately holds a reference to this |
| 60 | * object, so it is very difficult to know when is safe to free it, at least |
| 61 | * without some expensive synchronization. |
| 62 | * |
| 63 | * If we keep the object freelisted, in the worst case losing this race just |
| 64 | * becomes a spurious read notification on a reused fd. |
| 65 | */ |
| 66 | /* TODO(klempner): We could use some form of polling generation count to know |
| 67 | * when these are safe to free. */ |
| 68 | /* TODO(klempner): Consider disabling freelisting if we don't have multiple |
| 69 | * threads in poll on the same fd */ |
| 70 | /* TODO(klempner): Batch these allocations to reduce fragmentation */ |
| 71 | static grpc_fd *fd_freelist = NULL; |
| 72 | static gpr_mu fd_freelist_mu; |
| 73 | |
| 74 | static void freelist_fd(grpc_fd *fd) { |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 75 | gpr_mu_lock(&fd_freelist_mu); |
| 76 | fd->freelist_next = fd_freelist; |
| 77 | fd_freelist = fd; |
| 78 | gpr_mu_unlock(&fd_freelist_mu); |
| 79 | } |
| 80 | |
| 81 | static grpc_fd *alloc_fd(int fd) { |
| 82 | grpc_fd *r = NULL; |
| 83 | gpr_mu_lock(&fd_freelist_mu); |
| 84 | if (fd_freelist != NULL) { |
| 85 | r = fd_freelist; |
| 86 | fd_freelist = fd_freelist->freelist_next; |
| 87 | } |
| 88 | gpr_mu_unlock(&fd_freelist_mu); |
| 89 | if (r == NULL) { |
| 90 | r = gpr_malloc(sizeof(grpc_fd)); |
| 91 | gpr_mu_init(&r->set_state_mu); |
| 92 | gpr_mu_init(&r->watcher_mu); |
| 93 | } |
| 94 | gpr_atm_rel_store(&r->refst, 1); |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 95 | gpr_atm_rel_store(&r->readst, NOT_READY); |
| 96 | gpr_atm_rel_store(&r->writest, NOT_READY); |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 97 | gpr_atm_rel_store(&r->shutdown, 0); |
| 98 | r->fd = fd; |
Craig Tiller | 8e50fe9 | 2015-05-18 10:45:04 -0700 | [diff] [blame] | 99 | r->inactive_watcher_root.next = r->inactive_watcher_root.prev = |
| 100 | &r->inactive_watcher_root; |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 101 | r->freelist_next = NULL; |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 102 | r->read_watcher = r->write_watcher = NULL; |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 103 | return r; |
| 104 | } |
| 105 | |
| 106 | static void destroy(grpc_fd *fd) { |
| 107 | gpr_mu_destroy(&fd->set_state_mu); |
| 108 | gpr_mu_destroy(&fd->watcher_mu); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 109 | gpr_free(fd); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 110 | } |
| 111 | |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 112 | #ifdef GRPC_FD_REF_COUNT_DEBUG |
| 113 | #define REF_BY(fd, n, reason) ref_by(fd, n, reason, __FILE__, __LINE__) |
| 114 | #define UNREF_BY(fd, n, reason) unref_by(fd, n, reason, __FILE__, __LINE__) |
| 115 | static void ref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { |
| 116 | gpr_log(GPR_DEBUG, "FD %d ref %d %d -> %d [%s; %s:%d]", fd->fd, n, fd->refst, fd->refst + n, reason, file, line); |
| 117 | #else |
| 118 | #define REF_BY(fd, n, reason) ref_by(fd, n) |
| 119 | #define UNREF_BY(fd, n, reason) unref_by(fd, n) |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 120 | static void ref_by(grpc_fd *fd, int n) { |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 121 | #endif |
Craig Tiller | 23139ae | 2015-02-17 15:46:13 -0800 | [diff] [blame] | 122 | GPR_ASSERT(gpr_atm_no_barrier_fetch_add(&fd->refst, n) > 0); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 123 | } |
| 124 | |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 125 | #ifdef GRPC_FD_REF_COUNT_DEBUG |
| 126 | static void unref_by(grpc_fd *fd, int n, const char *reason, const char *file, int line) { |
| 127 | gpr_atm old; |
| 128 | gpr_log(GPR_DEBUG, "FD %d unref %d %d -> %d [%s; %s:%d]", fd->fd, n, fd->refst, fd->refst - n, reason, file, line); |
| 129 | #else |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 130 | static void unref_by(grpc_fd *fd, int n) { |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 131 | gpr_atm old; |
| 132 | #endif |
| 133 | old = gpr_atm_full_fetch_add(&fd->refst, -n); |
Craig Tiller | 23139ae | 2015-02-17 15:46:13 -0800 | [diff] [blame] | 134 | if (old == n) { |
David Klempner | c6bccc2 | 2015-02-24 17:33:05 -0800 | [diff] [blame] | 135 | close(fd->fd); |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 136 | grpc_iomgr_add_callback(fd->on_done, fd->on_done_user_data); |
| 137 | freelist_fd(fd); |
| 138 | grpc_iomgr_unref(); |
Craig Tiller | 23139ae | 2015-02-17 15:46:13 -0800 | [diff] [blame] | 139 | } else { |
| 140 | GPR_ASSERT(old > n); |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 141 | } |
| 142 | } |
| 143 | |
Craig Tiller | 7d41321 | 2015-02-09 08:00:02 -0800 | [diff] [blame] | 144 | void grpc_fd_global_init(void) { gpr_mu_init(&fd_freelist_mu); } |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 145 | |
| 146 | void grpc_fd_global_shutdown(void) { |
| 147 | while (fd_freelist != NULL) { |
| 148 | grpc_fd *fd = fd_freelist; |
| 149 | fd_freelist = fd_freelist->freelist_next; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 150 | destroy(fd); |
| 151 | } |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 152 | gpr_mu_destroy(&fd_freelist_mu); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | static void do_nothing(void *ignored, int success) {} |
| 156 | |
| 157 | grpc_fd *grpc_fd_create(int fd) { |
David Klempner | d178524 | 2015-01-28 17:00:21 -0800 | [diff] [blame] | 158 | grpc_fd *r = alloc_fd(fd); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 159 | grpc_iomgr_ref(); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 160 | return r; |
| 161 | } |
| 162 | |
| 163 | int grpc_fd_is_orphaned(grpc_fd *fd) { |
| 164 | return (gpr_atm_acq_load(&fd->refst) & 1) == 0; |
| 165 | } |
| 166 | |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 167 | static void maybe_wake_one_watcher_locked(grpc_fd *fd) { |
Craig Tiller | 354bf6d | 2015-05-18 10:18:03 -0700 | [diff] [blame] | 168 | if (fd->inactive_watcher_root.next != &fd->inactive_watcher_root) { |
| 169 | grpc_pollset_force_kick(fd->inactive_watcher_root.next->pollset); |
| 170 | } else if (fd->read_watcher) { |
| 171 | grpc_pollset_force_kick(fd->read_watcher->pollset); |
| 172 | } else if (fd->write_watcher) { |
| 173 | grpc_pollset_force_kick(fd->write_watcher->pollset); |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 174 | } |
| 175 | } |
| 176 | |
| 177 | static void maybe_wake_one_watcher(grpc_fd *fd) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 178 | gpr_mu_lock(&fd->watcher_mu); |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 179 | maybe_wake_one_watcher_locked(fd); |
| 180 | gpr_mu_unlock(&fd->watcher_mu); |
| 181 | } |
| 182 | |
Craig Tiller | c95de72 | 2015-05-29 08:56:46 -0700 | [diff] [blame] | 183 | static void wake_all_watchers_locked(grpc_fd *fd) { |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 184 | grpc_fd_watcher *watcher; |
Craig Tiller | 8e50fe9 | 2015-05-18 10:45:04 -0700 | [diff] [blame] | 185 | for (watcher = fd->inactive_watcher_root.next; |
| 186 | watcher != &fd->inactive_watcher_root; watcher = watcher->next) { |
Craig Tiller | 7d41321 | 2015-02-09 08:00:02 -0800 | [diff] [blame] | 187 | grpc_pollset_force_kick(watcher->pollset); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 188 | } |
Craig Tiller | 354bf6d | 2015-05-18 10:18:03 -0700 | [diff] [blame] | 189 | if (fd->read_watcher) { |
| 190 | grpc_pollset_force_kick(fd->read_watcher->pollset); |
| 191 | } |
| 192 | if (fd->write_watcher && fd->write_watcher != fd->read_watcher) { |
| 193 | grpc_pollset_force_kick(fd->write_watcher->pollset); |
| 194 | } |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | void grpc_fd_orphan(grpc_fd *fd, grpc_iomgr_cb_func on_done, void *user_data) { |
| 198 | fd->on_done = on_done ? on_done : do_nothing; |
| 199 | fd->on_done_user_data = user_data; |
David Klempner | c6bccc2 | 2015-02-24 17:33:05 -0800 | [diff] [blame] | 200 | shutdown(fd->fd, SHUT_RDWR); |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 201 | REF_BY(fd, 1, "orphan"); /* remove active status, but keep referenced */ |
Craig Tiller | 017912d | 2015-05-29 07:05:13 -0700 | [diff] [blame] | 202 | gpr_mu_lock(&fd->watcher_mu); |
Craig Tiller | c95de72 | 2015-05-29 08:56:46 -0700 | [diff] [blame] | 203 | wake_all_watchers_locked(fd); |
Craig Tiller | 017912d | 2015-05-29 07:05:13 -0700 | [diff] [blame] | 204 | gpr_mu_unlock(&fd->watcher_mu); |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 205 | UNREF_BY(fd, 2, "orphan"); /* drop the reference */ |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 206 | } |
| 207 | |
| 208 | /* increment refcount by two to avoid changing the orphan bit */ |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 209 | #ifdef GRPC_FD_REF_COUNT_DEBUG |
| 210 | void grpc_fd_ref(grpc_fd *fd, const char *reason, const char *file, int line) { |
| 211 | ref_by(fd, 2, reason, file, line); |
| 212 | } |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 213 | |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 214 | void grpc_fd_unref(grpc_fd *fd, const char *reason, const char *file, int line) { |
| 215 | unref_by(fd, 2, reason, file, line); |
| 216 | } |
| 217 | #else |
| 218 | void grpc_fd_ref(grpc_fd *fd) { |
| 219 | ref_by(fd, 2); |
| 220 | } |
| 221 | |
| 222 | void grpc_fd_unref(grpc_fd *fd) { |
| 223 | unref_by(fd, 2); |
| 224 | } |
| 225 | #endif |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 226 | |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 227 | static void make_callback(grpc_iomgr_cb_func cb, void *arg, int success, |
| 228 | int allow_synchronous_callback) { |
| 229 | if (allow_synchronous_callback) { |
| 230 | cb(arg, success); |
| 231 | } else { |
| 232 | grpc_iomgr_add_delayed_callback(cb, arg, success); |
| 233 | } |
| 234 | } |
| 235 | |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 236 | static void make_callbacks(grpc_iomgr_closure *callbacks, size_t n, int success, |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 237 | int allow_synchronous_callback) { |
| 238 | size_t i; |
| 239 | for (i = 0; i < n; i++) { |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 240 | make_callback(callbacks[i].cb, callbacks[i].cb_arg, success, |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 241 | allow_synchronous_callback); |
| 242 | } |
| 243 | } |
| 244 | |
Craig Tiller | f95e37f | 2015-02-18 15:15:29 -0800 | [diff] [blame] | 245 | static void notify_on(grpc_fd *fd, gpr_atm *st, grpc_iomgr_closure *closure, |
| 246 | int allow_synchronous_callback) { |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 247 | switch (gpr_atm_acq_load(st)) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 248 | case NOT_READY: |
| 249 | /* There is no race if the descriptor is already ready, so we skip |
| 250 | the interlocked op in that case. As long as the app doesn't |
| 251 | try to set the same upcall twice (which it shouldn't) then |
| 252 | oldval should never be anything other than READY or NOT_READY. We |
| 253 | don't |
| 254 | check for user error on the fast path. */ |
Craig Tiller | f95e37f | 2015-02-18 15:15:29 -0800 | [diff] [blame] | 255 | if (gpr_atm_rel_cas(st, NOT_READY, (gpr_intptr)closure)) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 256 | /* swap was successful -- the closure will run after the next |
| 257 | set_ready call. NOTE: we don't have an ABA problem here, |
| 258 | since we should never have concurrent calls to the same |
| 259 | notify_on function. */ |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 260 | maybe_wake_one_watcher(fd); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 261 | return; |
| 262 | } |
| 263 | /* swap was unsuccessful due to an intervening set_ready call. |
| 264 | Fall through to the READY code below */ |
| 265 | case READY: |
David Klempner | 466423b | 2015-03-11 15:00:46 -0700 | [diff] [blame] | 266 | assert(gpr_atm_no_barrier_load(st) == READY); |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 267 | gpr_atm_rel_store(st, NOT_READY); |
Craig Tiller | f95e37f | 2015-02-18 15:15:29 -0800 | [diff] [blame] | 268 | make_callback(closure->cb, closure->cb_arg, |
| 269 | !gpr_atm_acq_load(&fd->shutdown), |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 270 | allow_synchronous_callback); |
| 271 | return; |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 272 | default: /* WAITING */ |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 273 | /* upcallptr was set to a different closure. This is an error! */ |
| 274 | gpr_log(GPR_ERROR, |
| 275 | "User called a notify_on function with a previous callback still " |
| 276 | "pending"); |
| 277 | abort(); |
| 278 | } |
| 279 | gpr_log(GPR_ERROR, "Corrupt memory in &st->state"); |
| 280 | abort(); |
| 281 | } |
| 282 | |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 283 | static void set_ready_locked(gpr_atm *st, grpc_iomgr_closure *callbacks, |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 284 | size_t *ncallbacks) { |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 285 | gpr_intptr state = gpr_atm_acq_load(st); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 286 | |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 287 | switch (state) { |
| 288 | case READY: |
| 289 | /* duplicate ready, ignore */ |
| 290 | return; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 291 | case NOT_READY: |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 292 | if (gpr_atm_rel_cas(st, NOT_READY, READY)) { |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 293 | /* swap was successful -- the closure will run after the next |
| 294 | notify_on call. */ |
| 295 | return; |
| 296 | } |
Craig Tiller | f95e37f | 2015-02-18 15:15:29 -0800 | [diff] [blame] | 297 | /* swap was unsuccessful due to an intervening set_ready call. |
| 298 | Fall through to the WAITING code below */ |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 299 | state = gpr_atm_acq_load(st); |
| 300 | default: /* waiting */ |
David Klempner | 466423b | 2015-03-11 15:00:46 -0700 | [diff] [blame] | 301 | assert(gpr_atm_no_barrier_load(st) != READY && |
| 302 | gpr_atm_no_barrier_load(st) != NOT_READY); |
Craig Tiller | f95e37f | 2015-02-18 15:15:29 -0800 | [diff] [blame] | 303 | callbacks[(*ncallbacks)++] = *(grpc_iomgr_closure *)state; |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 304 | gpr_atm_rel_store(st, NOT_READY); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 305 | return; |
| 306 | } |
| 307 | } |
| 308 | |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 309 | static void set_ready(grpc_fd *fd, gpr_atm *st, |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 310 | int allow_synchronous_callback) { |
| 311 | /* only one set_ready can be active at once (but there may be a racing |
| 312 | notify_on) */ |
| 313 | int success; |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 314 | grpc_iomgr_closure cb; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 315 | size_t ncb = 0; |
| 316 | gpr_mu_lock(&fd->set_state_mu); |
| 317 | set_ready_locked(st, &cb, &ncb); |
| 318 | gpr_mu_unlock(&fd->set_state_mu); |
| 319 | success = !gpr_atm_acq_load(&fd->shutdown); |
| 320 | make_callbacks(&cb, ncb, success, allow_synchronous_callback); |
| 321 | } |
| 322 | |
| 323 | void grpc_fd_shutdown(grpc_fd *fd) { |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 324 | grpc_iomgr_closure cb[2]; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 325 | size_t ncb = 0; |
| 326 | gpr_mu_lock(&fd->set_state_mu); |
David Klempner | 466423b | 2015-03-11 15:00:46 -0700 | [diff] [blame] | 327 | GPR_ASSERT(!gpr_atm_no_barrier_load(&fd->shutdown)); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 328 | gpr_atm_rel_store(&fd->shutdown, 1); |
| 329 | set_ready_locked(&fd->readst, cb, &ncb); |
| 330 | set_ready_locked(&fd->writest, cb, &ncb); |
| 331 | gpr_mu_unlock(&fd->set_state_mu); |
| 332 | make_callbacks(cb, ncb, 0, 0); |
| 333 | } |
| 334 | |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 335 | void grpc_fd_notify_on_read(grpc_fd *fd, grpc_iomgr_closure *closure) { |
| 336 | notify_on(fd, &fd->readst, closure, 0); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 337 | } |
| 338 | |
Craig Tiller | 0fcd53c | 2015-02-18 15:10:53 -0800 | [diff] [blame] | 339 | void grpc_fd_notify_on_write(grpc_fd *fd, grpc_iomgr_closure *closure) { |
| 340 | notify_on(fd, &fd->writest, closure, 0); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 341 | } |
| 342 | |
| 343 | gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset, |
Craig Tiller | 7d41321 | 2015-02-09 08:00:02 -0800 | [diff] [blame] | 344 | gpr_uint32 read_mask, gpr_uint32 write_mask, |
| 345 | grpc_fd_watcher *watcher) { |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 346 | gpr_uint32 mask = 0; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 347 | /* keep track of pollers that have requested our events, in case they change |
| 348 | */ |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 349 | GRPC_FD_REF(fd, "poll"); |
Craig Tiller | 59ea16f | 2015-02-18 16:18:08 -0800 | [diff] [blame] | 350 | |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 351 | gpr_mu_lock(&fd->watcher_mu); |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 352 | /* if there is nobody polling for read, but we need to, then start doing so */ |
| 353 | if (!fd->read_watcher && gpr_atm_acq_load(&fd->readst) > READY) { |
| 354 | fd->read_watcher = watcher; |
| 355 | mask |= read_mask; |
| 356 | } |
Craig Tiller | 8e50fe9 | 2015-05-18 10:45:04 -0700 | [diff] [blame] | 357 | /* if there is nobody polling for write, but we need to, then start doing so |
| 358 | */ |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 359 | if (!fd->write_watcher && gpr_atm_acq_load(&fd->writest) > READY) { |
| 360 | fd->write_watcher = watcher; |
| 361 | mask |= write_mask; |
| 362 | } |
| 363 | /* if not polling, remember this watcher in case we need someone to later */ |
| 364 | if (mask == 0) { |
Craig Tiller | 354bf6d | 2015-05-18 10:18:03 -0700 | [diff] [blame] | 365 | watcher->next = &fd->inactive_watcher_root; |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 366 | watcher->prev = watcher->next->prev; |
| 367 | watcher->next->prev = watcher->prev->next = watcher; |
| 368 | } |
Craig Tiller | 7d41321 | 2015-02-09 08:00:02 -0800 | [diff] [blame] | 369 | watcher->pollset = pollset; |
| 370 | watcher->fd = fd; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 371 | gpr_mu_unlock(&fd->watcher_mu); |
| 372 | |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 373 | return mask; |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 374 | } |
| 375 | |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 376 | void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) { |
| 377 | int was_polling = 0; |
| 378 | int kick = 0; |
| 379 | grpc_fd *fd = watcher->fd; |
Craig Tiller | 59ea16f | 2015-02-18 16:18:08 -0800 | [diff] [blame] | 380 | |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 381 | gpr_mu_lock(&fd->watcher_mu); |
| 382 | if (watcher == fd->read_watcher) { |
Craig Tiller | 236d098 | 2015-05-18 10:26:44 -0700 | [diff] [blame] | 383 | /* remove read watcher, kick if we still need a read */ |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 384 | was_polling = 1; |
Craig Tiller | 8e50fe9 | 2015-05-18 10:45:04 -0700 | [diff] [blame] | 385 | kick = kick || !got_read; |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 386 | fd->read_watcher = NULL; |
| 387 | } |
| 388 | if (watcher == fd->write_watcher) { |
Craig Tiller | 236d098 | 2015-05-18 10:26:44 -0700 | [diff] [blame] | 389 | /* remove write watcher, kick if we still need a write */ |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 390 | was_polling = 1; |
Craig Tiller | 8e50fe9 | 2015-05-18 10:45:04 -0700 | [diff] [blame] | 391 | kick = kick || !got_write; |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 392 | fd->write_watcher = NULL; |
| 393 | } |
| 394 | if (!was_polling) { |
Craig Tiller | 236d098 | 2015-05-18 10:26:44 -0700 | [diff] [blame] | 395 | /* remove from inactive list */ |
Craig Tiller | 886d7ec | 2015-05-14 16:18:42 -0700 | [diff] [blame] | 396 | watcher->next->prev = watcher->prev; |
| 397 | watcher->prev->next = watcher->next; |
| 398 | } |
| 399 | if (kick) { |
| 400 | maybe_wake_one_watcher_locked(fd); |
| 401 | } |
| 402 | gpr_mu_unlock(&fd->watcher_mu); |
| 403 | |
Craig Tiller | 9ae7697 | 2015-05-31 13:58:24 -0700 | [diff] [blame^] | 404 | GRPC_FD_UNREF(fd, "poll"); |
ctiller | 58393c2 | 2015-01-07 14:03:30 -0800 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | void grpc_fd_become_readable(grpc_fd *fd, int allow_synchronous_callback) { |
| 408 | set_ready(fd, &fd->readst, allow_synchronous_callback); |
| 409 | } |
| 410 | |
| 411 | void grpc_fd_become_writable(grpc_fd *fd, int allow_synchronous_callback) { |
| 412 | set_ready(fd, &fd->writest, allow_synchronous_callback); |
| 413 | } |
Craig Tiller | d14a1a5 | 2015-01-21 15:26:29 -0800 | [diff] [blame] | 414 | |
Craig Tiller | 190d360 | 2015-02-18 09:23:38 -0800 | [diff] [blame] | 415 | #endif |