Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> |
| 3 | * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * 1. Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * 2. Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in the |
| 12 | * documentation and/or other materials provided with the distribution. |
| 13 | * 3. The name of the author may not be used to endorse or promote products |
| 14 | * derived from this software without specific prior written permission. |
| 15 | * |
| 16 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| 17 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| 18 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
| 19 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
| 21 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 22 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 23 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
| 25 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 26 | */ |
| 27 | #include "event2/event-config.h" |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 28 | #include "evconfig-private.h" |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 29 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 30 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 31 | #include <winsock2.h> |
| 32 | #define WIN32_LEAN_AND_MEAN |
| 33 | #include <windows.h> |
| 34 | #undef WIN32_LEAN_AND_MEAN |
| 35 | #endif |
| 36 | #include <sys/types.h> |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 37 | #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 38 | #include <sys/time.h> |
| 39 | #endif |
| 40 | #include <sys/queue.h> |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 41 | #ifdef EVENT__HAVE_SYS_SOCKET_H |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 42 | #include <sys/socket.h> |
| 43 | #endif |
| 44 | #include <stdio.h> |
| 45 | #include <stdlib.h> |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 46 | #ifdef EVENT__HAVE_UNISTD_H |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 47 | #include <unistd.h> |
| 48 | #endif |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 49 | #include <ctype.h> |
| 50 | #include <errno.h> |
| 51 | #include <signal.h> |
| 52 | #include <string.h> |
| 53 | #include <time.h> |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 54 | #include <limits.h> |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 55 | |
| 56 | #include "event2/event.h" |
| 57 | #include "event2/event_struct.h" |
| 58 | #include "event2/event_compat.h" |
| 59 | #include "event-internal.h" |
| 60 | #include "defer-internal.h" |
| 61 | #include "evthread-internal.h" |
| 62 | #include "event2/thread.h" |
| 63 | #include "event2/util.h" |
| 64 | #include "log-internal.h" |
| 65 | #include "evmap-internal.h" |
| 66 | #include "iocp-internal.h" |
| 67 | #include "changelist-internal.h" |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 68 | #define HT_NO_CACHE_HASH_VALUES |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 69 | #include "ht-internal.h" |
| 70 | #include "util-internal.h" |
| 71 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 72 | |
| 73 | #ifdef EVENT__HAVE_WORKING_KQUEUE |
| 74 | #include "kqueue-internal.h" |
| 75 | #endif |
| 76 | |
| 77 | #ifdef EVENT__HAVE_EVENT_PORTS |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 78 | extern const struct eventop evportops; |
| 79 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 80 | #ifdef EVENT__HAVE_SELECT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 81 | extern const struct eventop selectops; |
| 82 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 83 | #ifdef EVENT__HAVE_POLL |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 84 | extern const struct eventop pollops; |
| 85 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 86 | #ifdef EVENT__HAVE_EPOLL |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 87 | extern const struct eventop epollops; |
| 88 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 89 | #ifdef EVENT__HAVE_WORKING_KQUEUE |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 90 | extern const struct eventop kqops; |
| 91 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 92 | #ifdef EVENT__HAVE_DEVPOLL |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 93 | extern const struct eventop devpollops; |
| 94 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 95 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 96 | extern const struct eventop win32ops; |
| 97 | #endif |
| 98 | |
| 99 | /* Array of backends in order of preference. */ |
| 100 | static const struct eventop *eventops[] = { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 101 | #ifdef EVENT__HAVE_EVENT_PORTS |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 102 | &evportops, |
| 103 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 104 | #ifdef EVENT__HAVE_WORKING_KQUEUE |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 105 | &kqops, |
| 106 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 107 | #ifdef EVENT__HAVE_EPOLL |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 108 | &epollops, |
| 109 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 110 | #ifdef EVENT__HAVE_DEVPOLL |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 111 | &devpollops, |
| 112 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 113 | #ifdef EVENT__HAVE_POLL |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 114 | &pollops, |
| 115 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 116 | #ifdef EVENT__HAVE_SELECT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 117 | &selectops, |
| 118 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 119 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 120 | &win32ops, |
| 121 | #endif |
| 122 | NULL |
| 123 | }; |
| 124 | |
| 125 | /* Global state; deprecated */ |
| 126 | struct event_base *event_global_current_base_ = NULL; |
| 127 | #define current_base event_global_current_base_ |
| 128 | |
| 129 | /* Global state */ |
| 130 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 131 | static void *event_self_cbarg_ptr_ = NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 132 | |
| 133 | /* Prototypes */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 134 | static void event_queue_insert_active(struct event_base *, struct event_callback *); |
| 135 | static void event_queue_insert_active_later(struct event_base *, struct event_callback *); |
| 136 | static void event_queue_insert_timeout(struct event_base *, struct event *); |
| 137 | static void event_queue_insert_inserted(struct event_base *, struct event *); |
| 138 | static void event_queue_remove_active(struct event_base *, struct event_callback *); |
| 139 | static void event_queue_remove_active_later(struct event_base *, struct event_callback *); |
| 140 | static void event_queue_remove_timeout(struct event_base *, struct event *); |
| 141 | static void event_queue_remove_inserted(struct event_base *, struct event *); |
| 142 | static void event_queue_make_later_events_active(struct event_base *base); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 143 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 144 | static int evthread_make_base_notifiable_nolock_(struct event_base *base); |
| 145 | static int event_del_(struct event *ev, int blocking); |
| 146 | |
| 147 | #ifdef USE_REINSERT_TIMEOUT |
| 148 | /* This code seems buggy; only turn it on if we find out what the trouble is. */ |
| 149 | static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx); |
| 150 | #endif |
| 151 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 152 | static int event_haveevents(struct event_base *); |
| 153 | |
| 154 | static int event_process_active(struct event_base *); |
| 155 | |
| 156 | static int timeout_next(struct event_base *, struct timeval **); |
| 157 | static void timeout_process(struct event_base *); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 158 | |
| 159 | static inline void event_signal_closure(struct event_base *, struct event *ev); |
| 160 | static inline void event_persist_closure(struct event_base *, struct event *ev); |
| 161 | |
| 162 | static int evthread_notify_base(struct event_base *base); |
| 163 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 164 | static void insert_common_timeout_inorder(struct common_timeout_list *ctl, |
| 165 | struct event *ev); |
| 166 | |
| 167 | #ifndef EVENT__DISABLE_DEBUG_MODE |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 168 | /* These functions implement a hashtable of which 'struct event *' structures |
| 169 | * have been setup or added. We don't want to trust the content of the struct |
| 170 | * event itself, since we're trying to work through cases where an event gets |
| 171 | * clobbered or freed. Instead, we keep a hashtable indexed by the pointer. |
| 172 | */ |
| 173 | |
| 174 | struct event_debug_entry { |
| 175 | HT_ENTRY(event_debug_entry) node; |
| 176 | const struct event *ptr; |
| 177 | unsigned added : 1; |
| 178 | }; |
| 179 | |
| 180 | static inline unsigned |
| 181 | hash_debug_entry(const struct event_debug_entry *e) |
| 182 | { |
| 183 | /* We need to do this silliness to convince compilers that we |
| 184 | * honestly mean to cast e->ptr to an integer, and discard any |
| 185 | * part of it that doesn't fit in an unsigned. |
| 186 | */ |
| 187 | unsigned u = (unsigned) ((ev_uintptr_t) e->ptr); |
| 188 | /* Our hashtable implementation is pretty sensitive to low bits, |
| 189 | * and every struct event is over 64 bytes in size, so we can |
| 190 | * just say >>6. */ |
| 191 | return (u >> 6); |
| 192 | } |
| 193 | |
| 194 | static inline int |
| 195 | eq_debug_entry(const struct event_debug_entry *a, |
| 196 | const struct event_debug_entry *b) |
| 197 | { |
| 198 | return a->ptr == b->ptr; |
| 199 | } |
| 200 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 201 | int event_debug_mode_on_ = 0; |
| 202 | |
| 203 | |
| 204 | #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) |
| 205 | /** |
| 206 | * @brief debug mode variable which is set for any function/structure that needs |
| 207 | * to be shared across threads (if thread support is enabled). |
| 208 | * |
| 209 | * When and if evthreads are initialized, this variable will be evaluated, |
| 210 | * and if set to something other than zero, this means the evthread setup |
| 211 | * functions were called out of order. |
| 212 | * |
| 213 | * See: "Locks and threading" in the documentation. |
| 214 | */ |
| 215 | int event_debug_created_threadable_ctx_ = 0; |
| 216 | #endif |
| 217 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 218 | /* Set if it's too late to enable event_debug_mode. */ |
| 219 | static int event_debug_mode_too_late = 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 220 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
| 221 | static void *event_debug_map_lock_ = NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 222 | #endif |
| 223 | static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map = |
| 224 | HT_INITIALIZER(); |
| 225 | |
| 226 | HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry, |
| 227 | eq_debug_entry) |
| 228 | HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry, |
| 229 | eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free) |
| 230 | |
| 231 | /* Macro: record that ev is now setup (that is, ready for an add) */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 232 | #define event_debug_note_setup_(ev) do { \ |
| 233 | if (event_debug_mode_on_) { \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 234 | struct event_debug_entry *dent,find; \ |
| 235 | find.ptr = (ev); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 236 | EVLOCK_LOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 237 | dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ |
| 238 | if (dent) { \ |
| 239 | dent->added = 0; \ |
| 240 | } else { \ |
| 241 | dent = mm_malloc(sizeof(*dent)); \ |
| 242 | if (!dent) \ |
| 243 | event_err(1, \ |
| 244 | "Out of memory in debugging code"); \ |
| 245 | dent->ptr = (ev); \ |
| 246 | dent->added = 0; \ |
| 247 | HT_INSERT(event_debug_map, &global_debug_map, dent); \ |
| 248 | } \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 249 | EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 250 | } \ |
| 251 | event_debug_mode_too_late = 1; \ |
| 252 | } while (0) |
| 253 | /* Macro: record that ev is no longer setup */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 254 | #define event_debug_note_teardown_(ev) do { \ |
| 255 | if (event_debug_mode_on_) { \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 256 | struct event_debug_entry *dent,find; \ |
| 257 | find.ptr = (ev); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 258 | EVLOCK_LOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 259 | dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \ |
| 260 | if (dent) \ |
| 261 | mm_free(dent); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 262 | EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 263 | } \ |
| 264 | event_debug_mode_too_late = 1; \ |
| 265 | } while (0) |
| 266 | /* Macro: record that ev is now added */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 267 | #define event_debug_note_add_(ev) do { \ |
| 268 | if (event_debug_mode_on_) { \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 269 | struct event_debug_entry *dent,find; \ |
| 270 | find.ptr = (ev); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 271 | EVLOCK_LOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 272 | dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ |
| 273 | if (dent) { \ |
| 274 | dent->added = 1; \ |
| 275 | } else { \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 276 | event_errx(EVENT_ERR_ABORT_, \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 277 | "%s: noting an add on a non-setup event %p" \ |
| 278 | " (events: 0x%x, fd: "EV_SOCK_FMT \ |
| 279 | ", flags: 0x%x)", \ |
| 280 | __func__, (ev), (ev)->ev_events, \ |
| 281 | EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ |
| 282 | } \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 283 | EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 284 | } \ |
| 285 | event_debug_mode_too_late = 1; \ |
| 286 | } while (0) |
| 287 | /* Macro: record that ev is no longer added */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 288 | #define event_debug_note_del_(ev) do { \ |
| 289 | if (event_debug_mode_on_) { \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 290 | struct event_debug_entry *dent,find; \ |
| 291 | find.ptr = (ev); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 292 | EVLOCK_LOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 293 | dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ |
| 294 | if (dent) { \ |
| 295 | dent->added = 0; \ |
| 296 | } else { \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 297 | event_errx(EVENT_ERR_ABORT_, \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 298 | "%s: noting a del on a non-setup event %p" \ |
| 299 | " (events: 0x%x, fd: "EV_SOCK_FMT \ |
| 300 | ", flags: 0x%x)", \ |
| 301 | __func__, (ev), (ev)->ev_events, \ |
| 302 | EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ |
| 303 | } \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 304 | EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 305 | } \ |
| 306 | event_debug_mode_too_late = 1; \ |
| 307 | } while (0) |
| 308 | /* Macro: assert that ev is setup (i.e., okay to add or inspect) */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 309 | #define event_debug_assert_is_setup_(ev) do { \ |
| 310 | if (event_debug_mode_on_) { \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 311 | struct event_debug_entry *dent,find; \ |
| 312 | find.ptr = (ev); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 313 | EVLOCK_LOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 314 | dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ |
| 315 | if (!dent) { \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 316 | event_errx(EVENT_ERR_ABORT_, \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 317 | "%s called on a non-initialized event %p" \ |
| 318 | " (events: 0x%x, fd: "EV_SOCK_FMT\ |
| 319 | ", flags: 0x%x)", \ |
| 320 | __func__, (ev), (ev)->ev_events, \ |
| 321 | EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ |
| 322 | } \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 323 | EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 324 | } \ |
| 325 | } while (0) |
| 326 | /* Macro: assert that ev is not added (i.e., okay to tear down or set |
| 327 | * up again) */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 328 | #define event_debug_assert_not_added_(ev) do { \ |
| 329 | if (event_debug_mode_on_) { \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 330 | struct event_debug_entry *dent,find; \ |
| 331 | find.ptr = (ev); \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 332 | EVLOCK_LOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 333 | dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ |
| 334 | if (dent && dent->added) { \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 335 | event_errx(EVENT_ERR_ABORT_, \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 336 | "%s called on an already added event %p" \ |
| 337 | " (events: 0x%x, fd: "EV_SOCK_FMT", " \ |
| 338 | "flags: 0x%x)", \ |
| 339 | __func__, (ev), (ev)->ev_events, \ |
| 340 | EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ |
| 341 | } \ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 342 | EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 343 | } \ |
| 344 | } while (0) |
| 345 | #else |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 346 | #define event_debug_note_setup_(ev) \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 347 | ((void)0) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 348 | #define event_debug_note_teardown_(ev) \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 349 | ((void)0) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 350 | #define event_debug_note_add_(ev) \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 351 | ((void)0) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 352 | #define event_debug_note_del_(ev) \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 353 | ((void)0) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 354 | #define event_debug_assert_is_setup_(ev) \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 355 | ((void)0) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 356 | #define event_debug_assert_not_added_(ev) \ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 357 | ((void)0) |
| 358 | #endif |
| 359 | |
| 360 | #define EVENT_BASE_ASSERT_LOCKED(base) \ |
| 361 | EVLOCK_ASSERT_LOCKED((base)->th_base_lock) |
| 362 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 363 | /* How often (in seconds) do we check for changes in wall clock time relative |
| 364 | * to monotonic time? Set this to -1 for 'never.' */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 365 | #define CLOCK_SYNC_INTERVAL 5 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 366 | |
| 367 | /** Set 'tp' to the current time according to 'base'. We must hold the lock |
| 368 | * on 'base'. If there is a cached time, return it. Otherwise, use |
| 369 | * clock_gettime or gettimeofday as appropriate to find out the right time. |
| 370 | * Return 0 on success, -1 on failure. |
| 371 | */ |
| 372 | static int |
| 373 | gettime(struct event_base *base, struct timeval *tp) |
| 374 | { |
| 375 | EVENT_BASE_ASSERT_LOCKED(base); |
| 376 | |
| 377 | if (base->tv_cache.tv_sec) { |
| 378 | *tp = base->tv_cache; |
| 379 | return (0); |
| 380 | } |
| 381 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 382 | if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) { |
| 383 | return -1; |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 384 | } |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 385 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 386 | if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL |
| 387 | < tp->tv_sec) { |
| 388 | struct timeval tv; |
| 389 | evutil_gettimeofday(&tv,NULL); |
| 390 | evutil_timersub(&tv, tp, &base->tv_clock_diff); |
| 391 | base->last_updated_clock_diff = tp->tv_sec; |
| 392 | } |
| 393 | |
| 394 | return 0; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 395 | } |
| 396 | |
| 397 | int |
| 398 | event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv) |
| 399 | { |
| 400 | int r; |
| 401 | if (!base) { |
| 402 | base = current_base; |
| 403 | if (!current_base) |
| 404 | return evutil_gettimeofday(tv, NULL); |
| 405 | } |
| 406 | |
| 407 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 408 | if (base->tv_cache.tv_sec == 0) { |
| 409 | r = evutil_gettimeofday(tv, NULL); |
| 410 | } else { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 411 | evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 412 | r = 0; |
| 413 | } |
| 414 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 415 | return r; |
| 416 | } |
| 417 | |
| 418 | /** Make 'base' have no current cached time. */ |
| 419 | static inline void |
| 420 | clear_time_cache(struct event_base *base) |
| 421 | { |
| 422 | base->tv_cache.tv_sec = 0; |
| 423 | } |
| 424 | |
| 425 | /** Replace the cached time in 'base' with the current time. */ |
| 426 | static inline void |
| 427 | update_time_cache(struct event_base *base) |
| 428 | { |
| 429 | base->tv_cache.tv_sec = 0; |
| 430 | if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME)) |
| 431 | gettime(base, &base->tv_cache); |
| 432 | } |
| 433 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 434 | int |
| 435 | event_base_update_cache_time(struct event_base *base) |
| 436 | { |
| 437 | |
| 438 | if (!base) { |
| 439 | base = current_base; |
| 440 | if (!current_base) |
| 441 | return -1; |
| 442 | } |
| 443 | |
| 444 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 445 | if (base->running_loop) |
| 446 | update_time_cache(base); |
| 447 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 448 | return 0; |
| 449 | } |
| 450 | |
| 451 | static inline struct event * |
| 452 | event_callback_to_event(struct event_callback *evcb) |
| 453 | { |
| 454 | EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT)); |
| 455 | return EVUTIL_UPCAST(evcb, struct event, ev_evcallback); |
| 456 | } |
| 457 | |
| 458 | static inline struct event_callback * |
| 459 | event_to_event_callback(struct event *ev) |
| 460 | { |
| 461 | return &ev->ev_evcallback; |
| 462 | } |
| 463 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 464 | struct event_base * |
| 465 | event_init(void) |
| 466 | { |
| 467 | struct event_base *base = event_base_new_with_config(NULL); |
| 468 | |
| 469 | if (base == NULL) { |
| 470 | event_errx(1, "%s: Unable to construct event_base", __func__); |
| 471 | return NULL; |
| 472 | } |
| 473 | |
| 474 | current_base = base; |
| 475 | |
| 476 | return (base); |
| 477 | } |
| 478 | |
| 479 | struct event_base * |
| 480 | event_base_new(void) |
| 481 | { |
| 482 | struct event_base *base = NULL; |
| 483 | struct event_config *cfg = event_config_new(); |
| 484 | if (cfg) { |
| 485 | base = event_base_new_with_config(cfg); |
| 486 | event_config_free(cfg); |
| 487 | } |
| 488 | return base; |
| 489 | } |
| 490 | |
| 491 | /** Return true iff 'method' is the name of a method that 'cfg' tells us to |
| 492 | * avoid. */ |
| 493 | static int |
| 494 | event_config_is_avoided_method(const struct event_config *cfg, |
| 495 | const char *method) |
| 496 | { |
| 497 | struct event_config_entry *entry; |
| 498 | |
| 499 | TAILQ_FOREACH(entry, &cfg->entries, next) { |
| 500 | if (entry->avoid_method != NULL && |
| 501 | strcmp(entry->avoid_method, method) == 0) |
| 502 | return (1); |
| 503 | } |
| 504 | |
| 505 | return (0); |
| 506 | } |
| 507 | |
| 508 | /** Return true iff 'method' is disabled according to the environment. */ |
| 509 | static int |
| 510 | event_is_method_disabled(const char *name) |
| 511 | { |
| 512 | char environment[64]; |
| 513 | int i; |
| 514 | |
| 515 | evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name); |
| 516 | for (i = 8; environment[i] != '\0'; ++i) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 517 | environment[i] = EVUTIL_TOUPPER_(environment[i]); |
| 518 | /* Note that evutil_getenv_() ignores the environment entirely if |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 519 | * we're setuid */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 520 | return (evutil_getenv_(environment) != NULL); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 521 | } |
| 522 | |
| 523 | int |
| 524 | event_base_get_features(const struct event_base *base) |
| 525 | { |
| 526 | return base->evsel->features; |
| 527 | } |
| 528 | |
| 529 | void |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 530 | event_enable_debug_mode(void) |
| 531 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 532 | #ifndef EVENT__DISABLE_DEBUG_MODE |
| 533 | if (event_debug_mode_on_) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 534 | event_errx(1, "%s was called twice!", __func__); |
| 535 | if (event_debug_mode_too_late) |
| 536 | event_errx(1, "%s must be called *before* creating any events " |
| 537 | "or event_bases",__func__); |
| 538 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 539 | event_debug_mode_on_ = 1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 540 | |
| 541 | HT_INIT(event_debug_map, &global_debug_map); |
| 542 | #endif |
| 543 | } |
| 544 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 545 | void |
| 546 | event_disable_debug_mode(void) |
| 547 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 548 | #ifndef EVENT__DISABLE_DEBUG_MODE |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 549 | struct event_debug_entry **ent, *victim; |
| 550 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 551 | EVLOCK_LOCK(event_debug_map_lock_, 0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 552 | for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) { |
| 553 | victim = *ent; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 554 | ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 555 | mm_free(victim); |
| 556 | } |
| 557 | HT_CLEAR(event_debug_map, &global_debug_map); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 558 | EVLOCK_UNLOCK(event_debug_map_lock_ , 0); |
| 559 | |
| 560 | event_debug_mode_on_ = 0; |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 561 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 562 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 563 | |
| 564 | struct event_base * |
| 565 | event_base_new_with_config(const struct event_config *cfg) |
| 566 | { |
| 567 | int i; |
| 568 | struct event_base *base; |
| 569 | int should_check_environment; |
| 570 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 571 | #ifndef EVENT__DISABLE_DEBUG_MODE |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 572 | event_debug_mode_too_late = 1; |
| 573 | #endif |
| 574 | |
| 575 | if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) { |
| 576 | event_warn("%s: calloc", __func__); |
| 577 | return NULL; |
| 578 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 579 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 580 | if (cfg) |
| 581 | base->flags = cfg->flags; |
| 582 | |
| 583 | should_check_environment = |
| 584 | !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV)); |
| 585 | |
| 586 | { |
| 587 | struct timeval tmp; |
| 588 | int precise_time = |
| 589 | cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER); |
| 590 | int flags; |
| 591 | if (should_check_environment && !precise_time) { |
| 592 | precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL; |
| 593 | base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER; |
| 594 | } |
| 595 | flags = precise_time ? EV_MONOT_PRECISE : 0; |
| 596 | evutil_configure_monotonic_time_(&base->monotonic_timer, flags); |
| 597 | |
| 598 | gettime(base, &tmp); |
| 599 | } |
| 600 | |
| 601 | min_heap_ctor_(&base->timeheap); |
| 602 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 603 | base->sig.ev_signal_pair[0] = -1; |
| 604 | base->sig.ev_signal_pair[1] = -1; |
| 605 | base->th_notify_fd[0] = -1; |
| 606 | base->th_notify_fd[1] = -1; |
| 607 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 608 | TAILQ_INIT(&base->active_later_queue); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 609 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 610 | evmap_io_initmap_(&base->io); |
| 611 | evmap_signal_initmap_(&base->sigmap); |
| 612 | event_changelist_init_(&base->changelist); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 613 | |
| 614 | base->evbase = NULL; |
| 615 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 616 | if (cfg) { |
| 617 | memcpy(&base->max_dispatch_time, |
| 618 | &cfg->max_dispatch_interval, sizeof(struct timeval)); |
| 619 | base->limit_callbacks_after_prio = |
| 620 | cfg->limit_callbacks_after_prio; |
| 621 | } else { |
| 622 | base->max_dispatch_time.tv_sec = -1; |
| 623 | base->limit_callbacks_after_prio = 1; |
| 624 | } |
| 625 | if (cfg && cfg->max_dispatch_callbacks >= 0) { |
| 626 | base->max_dispatch_callbacks = cfg->max_dispatch_callbacks; |
| 627 | } else { |
| 628 | base->max_dispatch_callbacks = INT_MAX; |
| 629 | } |
| 630 | if (base->max_dispatch_callbacks == INT_MAX && |
| 631 | base->max_dispatch_time.tv_sec == -1) |
| 632 | base->limit_callbacks_after_prio = INT_MAX; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 633 | |
| 634 | for (i = 0; eventops[i] && !base->evbase; i++) { |
| 635 | if (cfg != NULL) { |
| 636 | /* determine if this backend should be avoided */ |
| 637 | if (event_config_is_avoided_method(cfg, |
| 638 | eventops[i]->name)) |
| 639 | continue; |
| 640 | if ((eventops[i]->features & cfg->require_features) |
| 641 | != cfg->require_features) |
| 642 | continue; |
| 643 | } |
| 644 | |
| 645 | /* also obey the environment variables */ |
| 646 | if (should_check_environment && |
| 647 | event_is_method_disabled(eventops[i]->name)) |
| 648 | continue; |
| 649 | |
| 650 | base->evsel = eventops[i]; |
| 651 | |
| 652 | base->evbase = base->evsel->init(base); |
| 653 | } |
| 654 | |
| 655 | if (base->evbase == NULL) { |
| 656 | event_warnx("%s: no event mechanism available", |
| 657 | __func__); |
| 658 | base->evsel = NULL; |
| 659 | event_base_free(base); |
| 660 | return NULL; |
| 661 | } |
| 662 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 663 | if (evutil_getenv_("EVENT_SHOW_METHOD")) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 664 | event_msgx("libevent using: %s", base->evsel->name); |
| 665 | |
| 666 | /* allocate a single active event queue */ |
| 667 | if (event_base_priority_init(base, 1) < 0) { |
| 668 | event_base_free(base); |
| 669 | return NULL; |
| 670 | } |
| 671 | |
| 672 | /* prepare for threading */ |
| 673 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 674 | #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) |
| 675 | event_debug_created_threadable_ctx_ = 1; |
| 676 | #endif |
| 677 | |
| 678 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 679 | if (EVTHREAD_LOCKING_ENABLED() && |
| 680 | (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) { |
| 681 | int r; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 682 | EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 683 | EVTHREAD_ALLOC_COND(base->current_event_cond); |
| 684 | r = evthread_make_base_notifiable(base); |
| 685 | if (r<0) { |
| 686 | event_warnx("%s: Unable to make base notifiable.", __func__); |
| 687 | event_base_free(base); |
| 688 | return NULL; |
| 689 | } |
| 690 | } |
| 691 | #endif |
| 692 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 693 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 694 | if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP)) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 695 | event_base_start_iocp_(base, cfg->n_cpus_hint); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 696 | #endif |
| 697 | |
| 698 | return (base); |
| 699 | } |
| 700 | |
| 701 | int |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 702 | event_base_start_iocp_(struct event_base *base, int n_cpus) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 703 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 704 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 705 | if (base->iocp) |
| 706 | return 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 707 | base->iocp = event_iocp_port_launch_(n_cpus); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 708 | if (!base->iocp) { |
| 709 | event_warnx("%s: Couldn't launch IOCP", __func__); |
| 710 | return -1; |
| 711 | } |
| 712 | return 0; |
| 713 | #else |
| 714 | return -1; |
| 715 | #endif |
| 716 | } |
| 717 | |
| 718 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 719 | event_base_stop_iocp_(struct event_base *base) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 720 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 721 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 722 | int rv; |
| 723 | |
| 724 | if (!base->iocp) |
| 725 | return; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 726 | rv = event_iocp_shutdown_(base->iocp, -1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 727 | EVUTIL_ASSERT(rv >= 0); |
| 728 | base->iocp = NULL; |
| 729 | #endif |
| 730 | } |
| 731 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 732 | static int |
| 733 | event_base_cancel_single_callback_(struct event_base *base, |
| 734 | struct event_callback *evcb, |
| 735 | int run_finalizers) |
| 736 | { |
| 737 | int result = 0; |
| 738 | |
| 739 | if (evcb->evcb_flags & EVLIST_INIT) { |
| 740 | struct event *ev = event_callback_to_event(evcb); |
| 741 | if (!(ev->ev_flags & EVLIST_INTERNAL)) { |
| 742 | event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING); |
| 743 | result = 1; |
| 744 | } |
| 745 | } else { |
| 746 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 747 | event_callback_cancel_nolock_(base, evcb, 1); |
| 748 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 749 | result = 1; |
| 750 | } |
| 751 | |
| 752 | if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) { |
| 753 | switch (evcb->evcb_closure) { |
| 754 | case EV_CLOSURE_EVENT_FINALIZE: |
| 755 | case EV_CLOSURE_EVENT_FINALIZE_FREE: { |
| 756 | struct event *ev = event_callback_to_event(evcb); |
| 757 | ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg); |
| 758 | if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) |
| 759 | mm_free(ev); |
| 760 | break; |
| 761 | } |
| 762 | case EV_CLOSURE_CB_FINALIZE: |
| 763 | evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg); |
| 764 | break; |
| 765 | default: |
| 766 | break; |
| 767 | } |
| 768 | } |
| 769 | return result; |
| 770 | } |
| 771 | |
| 772 | static int event_base_free_queues_(struct event_base *base, int run_finalizers) |
| 773 | { |
| 774 | int deleted = 0, i; |
| 775 | |
| 776 | for (i = 0; i < base->nactivequeues; ++i) { |
| 777 | struct event_callback *evcb, *next; |
| 778 | for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) { |
| 779 | next = TAILQ_NEXT(evcb, evcb_active_next); |
| 780 | deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); |
| 781 | evcb = next; |
| 782 | } |
| 783 | } |
| 784 | |
| 785 | { |
| 786 | struct event_callback *evcb; |
| 787 | while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { |
| 788 | deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); |
| 789 | } |
| 790 | } |
| 791 | |
| 792 | return deleted; |
| 793 | } |
| 794 | |
| 795 | static void |
| 796 | event_base_free_(struct event_base *base, int run_finalizers) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 797 | { |
| 798 | int i, n_deleted=0; |
| 799 | struct event *ev; |
| 800 | /* XXXX grab the lock? If there is contention when one thread frees |
| 801 | * the base, then the contending thread will be very sad soon. */ |
| 802 | |
| 803 | /* event_base_free(NULL) is how to free the current_base if we |
| 804 | * made it with event_init and forgot to hold a reference to it. */ |
| 805 | if (base == NULL && current_base) |
| 806 | base = current_base; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 807 | /* Don't actually free NULL. */ |
| 808 | if (base == NULL) { |
| 809 | event_warnx("%s: no base to free", __func__); |
| 810 | return; |
| 811 | } |
| 812 | /* XXX(niels) - check for internal events first */ |
| 813 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 814 | #ifdef _WIN32 |
| 815 | event_base_stop_iocp_(base); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 816 | #endif |
| 817 | |
| 818 | /* threading fds if we have them */ |
| 819 | if (base->th_notify_fd[0] != -1) { |
| 820 | event_del(&base->th_notify); |
| 821 | EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); |
| 822 | if (base->th_notify_fd[1] != -1) |
| 823 | EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); |
| 824 | base->th_notify_fd[0] = -1; |
| 825 | base->th_notify_fd[1] = -1; |
| 826 | event_debug_unassign(&base->th_notify); |
| 827 | } |
| 828 | |
| 829 | /* Delete all non-internal events. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 830 | evmap_delete_all_(base); |
| 831 | |
| 832 | while ((ev = min_heap_top_(&base->timeheap)) != NULL) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 833 | event_del(ev); |
| 834 | ++n_deleted; |
| 835 | } |
| 836 | for (i = 0; i < base->n_common_timeouts; ++i) { |
| 837 | struct common_timeout_list *ctl = |
| 838 | base->common_timeout_queues[i]; |
| 839 | event_del(&ctl->timeout_event); /* Internal; doesn't count */ |
| 840 | event_debug_unassign(&ctl->timeout_event); |
| 841 | for (ev = TAILQ_FIRST(&ctl->events); ev; ) { |
| 842 | struct event *next = TAILQ_NEXT(ev, |
| 843 | ev_timeout_pos.ev_next_with_common_timeout); |
| 844 | if (!(ev->ev_flags & EVLIST_INTERNAL)) { |
| 845 | event_del(ev); |
| 846 | ++n_deleted; |
| 847 | } |
| 848 | ev = next; |
| 849 | } |
| 850 | mm_free(ctl); |
| 851 | } |
| 852 | if (base->common_timeout_queues) |
| 853 | mm_free(base->common_timeout_queues); |
| 854 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 855 | for (;;) { |
| 856 | /* For finalizers we can register yet another finalizer out from |
| 857 | * finalizer, and iff finalizer will be in active_later_queue we can |
| 858 | * add finalizer to activequeues, and we will have events in |
| 859 | * activequeues after this function returns, which is not what we want |
| 860 | * (we even have an assertion for this). |
| 861 | * |
| 862 | * A simple case is bufferevent with underlying (i.e. filters). |
| 863 | */ |
| 864 | int i = event_base_free_queues_(base, run_finalizers); |
| 865 | if (!i) { |
| 866 | break; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 867 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 868 | n_deleted += i; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 869 | } |
| 870 | |
| 871 | if (n_deleted) |
| 872 | event_debug(("%s: %d events were still set in base", |
| 873 | __func__, n_deleted)); |
| 874 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 875 | while (LIST_FIRST(&base->once_events)) { |
| 876 | struct event_once *eonce = LIST_FIRST(&base->once_events); |
| 877 | LIST_REMOVE(eonce, next_once); |
| 878 | mm_free(eonce); |
| 879 | } |
| 880 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 881 | if (base->evsel != NULL && base->evsel->dealloc != NULL) |
| 882 | base->evsel->dealloc(base); |
| 883 | |
| 884 | for (i = 0; i < base->nactivequeues; ++i) |
| 885 | EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i])); |
| 886 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 887 | EVUTIL_ASSERT(min_heap_empty_(&base->timeheap)); |
| 888 | min_heap_dtor_(&base->timeheap); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 889 | |
| 890 | mm_free(base->activequeues); |
| 891 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 892 | evmap_io_clear_(&base->io); |
| 893 | evmap_signal_clear_(&base->sigmap); |
| 894 | event_changelist_freemem_(&base->changelist); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 895 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 896 | EVTHREAD_FREE_LOCK(base->th_base_lock, 0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 897 | EVTHREAD_FREE_COND(base->current_event_cond); |
| 898 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 899 | /* If we're freeing current_base, there won't be a current_base. */ |
| 900 | if (base == current_base) |
| 901 | current_base = NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 902 | mm_free(base); |
| 903 | } |
| 904 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 905 | void |
| 906 | event_base_free_nofinalize(struct event_base *base) |
| 907 | { |
| 908 | event_base_free_(base, 0); |
| 909 | } |
| 910 | |
| 911 | void |
| 912 | event_base_free(struct event_base *base) |
| 913 | { |
| 914 | event_base_free_(base, 1); |
| 915 | } |
| 916 | |
| 917 | /* Fake eventop; used to disable the backend temporarily inside event_reinit |
| 918 | * so that we can call event_del() on an event without telling the backend. |
| 919 | */ |
| 920 | static int |
| 921 | nil_backend_del(struct event_base *b, evutil_socket_t fd, short old, |
| 922 | short events, void *fdinfo) |
| 923 | { |
| 924 | return 0; |
| 925 | } |
| 926 | const struct eventop nil_eventop = { |
| 927 | "nil", |
| 928 | NULL, /* init: unused. */ |
| 929 | NULL, /* add: unused. */ |
| 930 | nil_backend_del, /* del: used, so needs to be killed. */ |
| 931 | NULL, /* dispatch: unused. */ |
| 932 | NULL, /* dealloc: unused. */ |
| 933 | 0, 0, 0 |
| 934 | }; |
| 935 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 936 | /* reinitialize the event base after a fork */ |
| 937 | int |
| 938 | event_reinit(struct event_base *base) |
| 939 | { |
| 940 | const struct eventop *evsel; |
| 941 | int res = 0; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 942 | int was_notifiable = 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 943 | int had_signal_added = 0; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 944 | |
| 945 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 946 | |
| 947 | evsel = base->evsel; |
| 948 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 949 | /* check if this event mechanism requires reinit on the backend */ |
| 950 | if (evsel->need_reinit) { |
| 951 | /* We're going to call event_del() on our notify events (the |
| 952 | * ones that tell about signals and wakeup events). But we |
| 953 | * don't actually want to tell the backend to change its |
| 954 | * state, since it might still share some resource (a kqueue, |
| 955 | * an epoll fd) with the parent process, and we don't want to |
| 956 | * delete the fds from _that_ backend, we temporarily stub out |
| 957 | * the evsel with a replacement. |
| 958 | */ |
| 959 | base->evsel = &nil_eventop; |
| 960 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 961 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 962 | /* We need to re-create a new signal-notification fd and a new |
| 963 | * thread-notification fd. Otherwise, we'll still share those with |
| 964 | * the parent process, which would make any notification sent to them |
| 965 | * get received by one or both of the event loops, more or less at |
| 966 | * random. |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 967 | */ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 968 | if (base->sig.ev_signal_added) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 969 | event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK); |
| 970 | event_debug_unassign(&base->sig.ev_signal); |
| 971 | memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal)); |
| 972 | had_signal_added = 1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 973 | base->sig.ev_signal_added = 0; |
| 974 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 975 | if (base->sig.ev_signal_pair[0] != -1) |
| 976 | EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]); |
| 977 | if (base->sig.ev_signal_pair[1] != -1) |
| 978 | EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]); |
| 979 | if (base->th_notify_fn != NULL) { |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 980 | was_notifiable = 1; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 981 | base->th_notify_fn = NULL; |
| 982 | } |
| 983 | if (base->th_notify_fd[0] != -1) { |
| 984 | event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 985 | EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); |
| 986 | if (base->th_notify_fd[1] != -1) |
| 987 | EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); |
| 988 | base->th_notify_fd[0] = -1; |
| 989 | base->th_notify_fd[1] = -1; |
| 990 | event_debug_unassign(&base->th_notify); |
| 991 | } |
| 992 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 993 | /* Replace the original evsel. */ |
| 994 | base->evsel = evsel; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 995 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 996 | if (evsel->need_reinit) { |
| 997 | /* Reconstruct the backend through brute-force, so that we do |
| 998 | * not share any structures with the parent process. For some |
| 999 | * backends, this is necessary: epoll and kqueue, for |
| 1000 | * instance, have events associated with a kernel |
| 1001 | * structure. If didn't reinitialize, we'd share that |
| 1002 | * structure with the parent process, and any changes made by |
| 1003 | * the parent would affect our backend's behavior (and vice |
| 1004 | * versa). |
| 1005 | */ |
| 1006 | if (base->evsel->dealloc != NULL) |
| 1007 | base->evsel->dealloc(base); |
| 1008 | base->evbase = evsel->init(base); |
| 1009 | if (base->evbase == NULL) { |
| 1010 | event_errx(1, |
| 1011 | "%s: could not reinitialize event mechanism", |
| 1012 | __func__); |
| 1013 | res = -1; |
| 1014 | goto done; |
| 1015 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1016 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1017 | /* Empty out the changelist (if any): we are starting from a |
| 1018 | * blank slate. */ |
| 1019 | event_changelist_freemem_(&base->changelist); |
| 1020 | |
| 1021 | /* Tell the event maps to re-inform the backend about all |
| 1022 | * pending events. This will make the signal notification |
| 1023 | * event get re-created if necessary. */ |
| 1024 | if (evmap_reinit_(base) < 0) |
| 1025 | res = -1; |
| 1026 | } else { |
| 1027 | res = evsig_init_(base); |
| 1028 | if (res == 0 && had_signal_added) { |
| 1029 | res = event_add_nolock_(&base->sig.ev_signal, NULL, 0); |
| 1030 | if (res == 0) |
| 1031 | base->sig.ev_signal_added = 1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1032 | } |
| 1033 | } |
| 1034 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1035 | /* If we were notifiable before, and nothing just exploded, become |
| 1036 | * notifiable again. */ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1037 | if (was_notifiable && res == 0) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1038 | res = evthread_make_base_notifiable_nolock_(base); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1039 | |
| 1040 | done: |
| 1041 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1042 | return (res); |
| 1043 | } |
| 1044 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1045 | /* Get the monotonic time for this event_base' timer */ |
| 1046 | int |
| 1047 | event_gettime_monotonic(struct event_base *base, struct timeval *tv) |
| 1048 | { |
| 1049 | int rv = -1; |
| 1050 | |
| 1051 | if (base && tv) { |
| 1052 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1053 | rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv); |
| 1054 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1055 | } |
| 1056 | |
| 1057 | return rv; |
| 1058 | } |
| 1059 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1060 | const char ** |
| 1061 | event_get_supported_methods(void) |
| 1062 | { |
| 1063 | static const char **methods = NULL; |
| 1064 | const struct eventop **method; |
| 1065 | const char **tmp; |
| 1066 | int i = 0, k; |
| 1067 | |
| 1068 | /* count all methods */ |
| 1069 | for (method = &eventops[0]; *method != NULL; ++method) { |
| 1070 | ++i; |
| 1071 | } |
| 1072 | |
| 1073 | /* allocate one more than we need for the NULL pointer */ |
| 1074 | tmp = mm_calloc((i + 1), sizeof(char *)); |
| 1075 | if (tmp == NULL) |
| 1076 | return (NULL); |
| 1077 | |
| 1078 | /* populate the array with the supported methods */ |
| 1079 | for (k = 0, i = 0; eventops[k] != NULL; ++k) { |
| 1080 | tmp[i++] = eventops[k]->name; |
| 1081 | } |
| 1082 | tmp[i] = NULL; |
| 1083 | |
| 1084 | if (methods != NULL) |
| 1085 | mm_free((char**)methods); |
| 1086 | |
| 1087 | methods = tmp; |
| 1088 | |
| 1089 | return (methods); |
| 1090 | } |
| 1091 | |
| 1092 | struct event_config * |
| 1093 | event_config_new(void) |
| 1094 | { |
| 1095 | struct event_config *cfg = mm_calloc(1, sizeof(*cfg)); |
| 1096 | |
| 1097 | if (cfg == NULL) |
| 1098 | return (NULL); |
| 1099 | |
| 1100 | TAILQ_INIT(&cfg->entries); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1101 | cfg->max_dispatch_interval.tv_sec = -1; |
| 1102 | cfg->max_dispatch_callbacks = INT_MAX; |
| 1103 | cfg->limit_callbacks_after_prio = 1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1104 | |
| 1105 | return (cfg); |
| 1106 | } |
| 1107 | |
| 1108 | static void |
| 1109 | event_config_entry_free(struct event_config_entry *entry) |
| 1110 | { |
| 1111 | if (entry->avoid_method != NULL) |
| 1112 | mm_free((char *)entry->avoid_method); |
| 1113 | mm_free(entry); |
| 1114 | } |
| 1115 | |
| 1116 | void |
| 1117 | event_config_free(struct event_config *cfg) |
| 1118 | { |
| 1119 | struct event_config_entry *entry; |
| 1120 | |
| 1121 | while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) { |
| 1122 | TAILQ_REMOVE(&cfg->entries, entry, next); |
| 1123 | event_config_entry_free(entry); |
| 1124 | } |
| 1125 | mm_free(cfg); |
| 1126 | } |
| 1127 | |
| 1128 | int |
| 1129 | event_config_set_flag(struct event_config *cfg, int flag) |
| 1130 | { |
| 1131 | if (!cfg) |
| 1132 | return -1; |
| 1133 | cfg->flags |= flag; |
| 1134 | return 0; |
| 1135 | } |
| 1136 | |
| 1137 | int |
| 1138 | event_config_avoid_method(struct event_config *cfg, const char *method) |
| 1139 | { |
| 1140 | struct event_config_entry *entry = mm_malloc(sizeof(*entry)); |
| 1141 | if (entry == NULL) |
| 1142 | return (-1); |
| 1143 | |
| 1144 | if ((entry->avoid_method = mm_strdup(method)) == NULL) { |
| 1145 | mm_free(entry); |
| 1146 | return (-1); |
| 1147 | } |
| 1148 | |
| 1149 | TAILQ_INSERT_TAIL(&cfg->entries, entry, next); |
| 1150 | |
| 1151 | return (0); |
| 1152 | } |
| 1153 | |
| 1154 | int |
| 1155 | event_config_require_features(struct event_config *cfg, |
| 1156 | int features) |
| 1157 | { |
| 1158 | if (!cfg) |
| 1159 | return (-1); |
| 1160 | cfg->require_features = features; |
| 1161 | return (0); |
| 1162 | } |
| 1163 | |
| 1164 | int |
| 1165 | event_config_set_num_cpus_hint(struct event_config *cfg, int cpus) |
| 1166 | { |
| 1167 | if (!cfg) |
| 1168 | return (-1); |
| 1169 | cfg->n_cpus_hint = cpus; |
| 1170 | return (0); |
| 1171 | } |
| 1172 | |
| 1173 | int |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1174 | event_config_set_max_dispatch_interval(struct event_config *cfg, |
| 1175 | const struct timeval *max_interval, int max_callbacks, int min_priority) |
| 1176 | { |
| 1177 | if (max_interval) |
| 1178 | memcpy(&cfg->max_dispatch_interval, max_interval, |
| 1179 | sizeof(struct timeval)); |
| 1180 | else |
| 1181 | cfg->max_dispatch_interval.tv_sec = -1; |
| 1182 | cfg->max_dispatch_callbacks = |
| 1183 | max_callbacks >= 0 ? max_callbacks : INT_MAX; |
| 1184 | if (min_priority < 0) |
| 1185 | min_priority = 0; |
| 1186 | cfg->limit_callbacks_after_prio = min_priority; |
| 1187 | return (0); |
| 1188 | } |
| 1189 | |
| 1190 | int |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1191 | event_priority_init(int npriorities) |
| 1192 | { |
| 1193 | return event_base_priority_init(current_base, npriorities); |
| 1194 | } |
| 1195 | |
| 1196 | int |
| 1197 | event_base_priority_init(struct event_base *base, int npriorities) |
| 1198 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1199 | int i, r; |
| 1200 | r = -1; |
| 1201 | |
| 1202 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1203 | |
| 1204 | if (N_ACTIVE_CALLBACKS(base) || npriorities < 1 |
| 1205 | || npriorities >= EVENT_MAX_PRIORITIES) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1206 | goto err; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1207 | |
| 1208 | if (npriorities == base->nactivequeues) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1209 | goto ok; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1210 | |
| 1211 | if (base->nactivequeues) { |
| 1212 | mm_free(base->activequeues); |
| 1213 | base->nactivequeues = 0; |
| 1214 | } |
| 1215 | |
| 1216 | /* Allocate our priority queues */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1217 | base->activequeues = (struct evcallback_list *) |
| 1218 | mm_calloc(npriorities, sizeof(struct evcallback_list)); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1219 | if (base->activequeues == NULL) { |
| 1220 | event_warn("%s: calloc", __func__); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1221 | goto err; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1222 | } |
| 1223 | base->nactivequeues = npriorities; |
| 1224 | |
| 1225 | for (i = 0; i < base->nactivequeues; ++i) { |
| 1226 | TAILQ_INIT(&base->activequeues[i]); |
| 1227 | } |
| 1228 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1229 | ok: |
| 1230 | r = 0; |
| 1231 | err: |
| 1232 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1233 | return (r); |
| 1234 | } |
| 1235 | |
| 1236 | int |
| 1237 | event_base_get_npriorities(struct event_base *base) |
| 1238 | { |
| 1239 | |
| 1240 | int n; |
| 1241 | if (base == NULL) |
| 1242 | base = current_base; |
| 1243 | |
| 1244 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1245 | n = base->nactivequeues; |
| 1246 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1247 | return (n); |
| 1248 | } |
| 1249 | |
| 1250 | int |
| 1251 | event_base_get_num_events(struct event_base *base, unsigned int type) |
| 1252 | { |
| 1253 | int r = 0; |
| 1254 | |
| 1255 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1256 | |
| 1257 | if (type & EVENT_BASE_COUNT_ACTIVE) |
| 1258 | r += base->event_count_active; |
| 1259 | |
| 1260 | if (type & EVENT_BASE_COUNT_VIRTUAL) |
| 1261 | r += base->virtual_event_count; |
| 1262 | |
| 1263 | if (type & EVENT_BASE_COUNT_ADDED) |
| 1264 | r += base->event_count; |
| 1265 | |
| 1266 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1267 | |
| 1268 | return r; |
| 1269 | } |
| 1270 | |
| 1271 | int |
| 1272 | event_base_get_max_events(struct event_base *base, unsigned int type, int clear) |
| 1273 | { |
| 1274 | int r = 0; |
| 1275 | |
| 1276 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1277 | |
| 1278 | if (type & EVENT_BASE_COUNT_ACTIVE) { |
| 1279 | r += base->event_count_active_max; |
| 1280 | if (clear) |
| 1281 | base->event_count_active_max = 0; |
| 1282 | } |
| 1283 | |
| 1284 | if (type & EVENT_BASE_COUNT_VIRTUAL) { |
| 1285 | r += base->virtual_event_count_max; |
| 1286 | if (clear) |
| 1287 | base->virtual_event_count_max = 0; |
| 1288 | } |
| 1289 | |
| 1290 | if (type & EVENT_BASE_COUNT_ADDED) { |
| 1291 | r += base->event_count_max; |
| 1292 | if (clear) |
| 1293 | base->event_count_max = 0; |
| 1294 | } |
| 1295 | |
| 1296 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1297 | |
| 1298 | return r; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1299 | } |
| 1300 | |
| 1301 | /* Returns true iff we're currently watching any events. */ |
| 1302 | static int |
| 1303 | event_haveevents(struct event_base *base) |
| 1304 | { |
| 1305 | /* Caller must hold th_base_lock */ |
| 1306 | return (base->virtual_event_count > 0 || base->event_count > 0); |
| 1307 | } |
| 1308 | |
| 1309 | /* "closure" function called when processing active signal events */ |
| 1310 | static inline void |
| 1311 | event_signal_closure(struct event_base *base, struct event *ev) |
| 1312 | { |
| 1313 | short ncalls; |
| 1314 | int should_break; |
| 1315 | |
| 1316 | /* Allows deletes to work */ |
| 1317 | ncalls = ev->ev_ncalls; |
| 1318 | if (ncalls != 0) |
| 1319 | ev->ev_pncalls = &ncalls; |
| 1320 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1321 | while (ncalls) { |
| 1322 | ncalls--; |
| 1323 | ev->ev_ncalls = ncalls; |
| 1324 | if (ncalls == 0) |
| 1325 | ev->ev_pncalls = NULL; |
| 1326 | (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg); |
| 1327 | |
| 1328 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1329 | should_break = base->event_break; |
| 1330 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1331 | |
| 1332 | if (should_break) { |
| 1333 | if (ncalls != 0) |
| 1334 | ev->ev_pncalls = NULL; |
| 1335 | return; |
| 1336 | } |
| 1337 | } |
| 1338 | } |
| 1339 | |
| 1340 | /* Common timeouts are special timeouts that are handled as queues rather than |
| 1341 | * in the minheap. This is more efficient than the minheap if we happen to |
| 1342 | * know that we're going to get several thousands of timeout events all with |
| 1343 | * the same timeout value. |
| 1344 | * |
| 1345 | * Since all our timeout handling code assumes timevals can be copied, |
| 1346 | * assigned, etc, we can't use "magic pointer" to encode these common |
| 1347 | * timeouts. Searching through a list to see if every timeout is common could |
| 1348 | * also get inefficient. Instead, we take advantage of the fact that tv_usec |
| 1349 | * is 32 bits long, but only uses 20 of those bits (since it can never be over |
| 1350 | * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits |
| 1351 | * of index into the event_base's aray of common timeouts. |
| 1352 | */ |
| 1353 | |
| 1354 | #define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK |
| 1355 | #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000 |
| 1356 | #define COMMON_TIMEOUT_IDX_SHIFT 20 |
| 1357 | #define COMMON_TIMEOUT_MASK 0xf0000000 |
| 1358 | #define COMMON_TIMEOUT_MAGIC 0x50000000 |
| 1359 | |
| 1360 | #define COMMON_TIMEOUT_IDX(tv) \ |
| 1361 | (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT) |
| 1362 | |
| 1363 | /** Return true iff if 'tv' is a common timeout in 'base' */ |
| 1364 | static inline int |
| 1365 | is_common_timeout(const struct timeval *tv, |
| 1366 | const struct event_base *base) |
| 1367 | { |
| 1368 | int idx; |
| 1369 | if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC) |
| 1370 | return 0; |
| 1371 | idx = COMMON_TIMEOUT_IDX(tv); |
| 1372 | return idx < base->n_common_timeouts; |
| 1373 | } |
| 1374 | |
| 1375 | /* True iff tv1 and tv2 have the same common-timeout index, or if neither |
| 1376 | * one is a common timeout. */ |
| 1377 | static inline int |
| 1378 | is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2) |
| 1379 | { |
| 1380 | return (tv1->tv_usec & ~MICROSECONDS_MASK) == |
| 1381 | (tv2->tv_usec & ~MICROSECONDS_MASK); |
| 1382 | } |
| 1383 | |
| 1384 | /** Requires that 'tv' is a common timeout. Return the corresponding |
| 1385 | * common_timeout_list. */ |
| 1386 | static inline struct common_timeout_list * |
| 1387 | get_common_timeout_list(struct event_base *base, const struct timeval *tv) |
| 1388 | { |
| 1389 | return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)]; |
| 1390 | } |
| 1391 | |
| 1392 | #if 0 |
| 1393 | static inline int |
| 1394 | common_timeout_ok(const struct timeval *tv, |
| 1395 | struct event_base *base) |
| 1396 | { |
| 1397 | const struct timeval *expect = |
| 1398 | &get_common_timeout_list(base, tv)->duration; |
| 1399 | return tv->tv_sec == expect->tv_sec && |
| 1400 | tv->tv_usec == expect->tv_usec; |
| 1401 | } |
| 1402 | #endif |
| 1403 | |
| 1404 | /* Add the timeout for the first event in given common timeout list to the |
| 1405 | * event_base's minheap. */ |
| 1406 | static void |
| 1407 | common_timeout_schedule(struct common_timeout_list *ctl, |
| 1408 | const struct timeval *now, struct event *head) |
| 1409 | { |
| 1410 | struct timeval timeout = head->ev_timeout; |
| 1411 | timeout.tv_usec &= MICROSECONDS_MASK; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1412 | event_add_nolock_(&ctl->timeout_event, &timeout, 1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1413 | } |
| 1414 | |
| 1415 | /* Callback: invoked when the timeout for a common timeout queue triggers. |
| 1416 | * This means that (at least) the first event in that queue should be run, |
| 1417 | * and the timeout should be rescheduled if there are more events. */ |
| 1418 | static void |
| 1419 | common_timeout_callback(evutil_socket_t fd, short what, void *arg) |
| 1420 | { |
| 1421 | struct timeval now; |
| 1422 | struct common_timeout_list *ctl = arg; |
| 1423 | struct event_base *base = ctl->base; |
| 1424 | struct event *ev = NULL; |
| 1425 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1426 | gettime(base, &now); |
| 1427 | while (1) { |
| 1428 | ev = TAILQ_FIRST(&ctl->events); |
| 1429 | if (!ev || ev->ev_timeout.tv_sec > now.tv_sec || |
| 1430 | (ev->ev_timeout.tv_sec == now.tv_sec && |
| 1431 | (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec)) |
| 1432 | break; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1433 | event_del_nolock_(ev, EVENT_DEL_NOBLOCK); |
| 1434 | event_active_nolock_(ev, EV_TIMEOUT, 1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1435 | } |
| 1436 | if (ev) |
| 1437 | common_timeout_schedule(ctl, &now, ev); |
| 1438 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1439 | } |
| 1440 | |
| 1441 | #define MAX_COMMON_TIMEOUTS 256 |
| 1442 | |
| 1443 | const struct timeval * |
| 1444 | event_base_init_common_timeout(struct event_base *base, |
| 1445 | const struct timeval *duration) |
| 1446 | { |
| 1447 | int i; |
| 1448 | struct timeval tv; |
| 1449 | const struct timeval *result=NULL; |
| 1450 | struct common_timeout_list *new_ctl; |
| 1451 | |
| 1452 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1453 | if (duration->tv_usec > 1000000) { |
| 1454 | memcpy(&tv, duration, sizeof(struct timeval)); |
| 1455 | if (is_common_timeout(duration, base)) |
| 1456 | tv.tv_usec &= MICROSECONDS_MASK; |
| 1457 | tv.tv_sec += tv.tv_usec / 1000000; |
| 1458 | tv.tv_usec %= 1000000; |
| 1459 | duration = &tv; |
| 1460 | } |
| 1461 | for (i = 0; i < base->n_common_timeouts; ++i) { |
| 1462 | const struct common_timeout_list *ctl = |
| 1463 | base->common_timeout_queues[i]; |
| 1464 | if (duration->tv_sec == ctl->duration.tv_sec && |
| 1465 | duration->tv_usec == |
| 1466 | (ctl->duration.tv_usec & MICROSECONDS_MASK)) { |
| 1467 | EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base)); |
| 1468 | result = &ctl->duration; |
| 1469 | goto done; |
| 1470 | } |
| 1471 | } |
| 1472 | if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) { |
| 1473 | event_warnx("%s: Too many common timeouts already in use; " |
| 1474 | "we only support %d per event_base", __func__, |
| 1475 | MAX_COMMON_TIMEOUTS); |
| 1476 | goto done; |
| 1477 | } |
| 1478 | if (base->n_common_timeouts_allocated == base->n_common_timeouts) { |
| 1479 | int n = base->n_common_timeouts < 16 ? 16 : |
| 1480 | base->n_common_timeouts*2; |
| 1481 | struct common_timeout_list **newqueues = |
| 1482 | mm_realloc(base->common_timeout_queues, |
| 1483 | n*sizeof(struct common_timeout_queue *)); |
| 1484 | if (!newqueues) { |
| 1485 | event_warn("%s: realloc",__func__); |
| 1486 | goto done; |
| 1487 | } |
| 1488 | base->n_common_timeouts_allocated = n; |
| 1489 | base->common_timeout_queues = newqueues; |
| 1490 | } |
| 1491 | new_ctl = mm_calloc(1, sizeof(struct common_timeout_list)); |
| 1492 | if (!new_ctl) { |
| 1493 | event_warn("%s: calloc",__func__); |
| 1494 | goto done; |
| 1495 | } |
| 1496 | TAILQ_INIT(&new_ctl->events); |
| 1497 | new_ctl->duration.tv_sec = duration->tv_sec; |
| 1498 | new_ctl->duration.tv_usec = |
| 1499 | duration->tv_usec | COMMON_TIMEOUT_MAGIC | |
| 1500 | (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT); |
| 1501 | evtimer_assign(&new_ctl->timeout_event, base, |
| 1502 | common_timeout_callback, new_ctl); |
| 1503 | new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL; |
| 1504 | event_priority_set(&new_ctl->timeout_event, 0); |
| 1505 | new_ctl->base = base; |
| 1506 | base->common_timeout_queues[base->n_common_timeouts++] = new_ctl; |
| 1507 | result = &new_ctl->duration; |
| 1508 | |
| 1509 | done: |
| 1510 | if (result) |
| 1511 | EVUTIL_ASSERT(is_common_timeout(result, base)); |
| 1512 | |
| 1513 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1514 | return result; |
| 1515 | } |
| 1516 | |
| 1517 | /* Closure function invoked when we're activating a persistent event. */ |
| 1518 | static inline void |
| 1519 | event_persist_closure(struct event_base *base, struct event *ev) |
| 1520 | { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1521 | void (*evcb_callback)(evutil_socket_t, short, void *); |
| 1522 | |
| 1523 | // Other fields of *ev that must be stored before executing |
| 1524 | evutil_socket_t evcb_fd; |
| 1525 | short evcb_res; |
| 1526 | void *evcb_arg; |
| 1527 | |
| 1528 | /* reschedule the persistent event if we have a timeout. */ |
| 1529 | if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) { |
| 1530 | /* If there was a timeout, we want it to run at an interval of |
| 1531 | * ev_io_timeout after the last time it was _scheduled_ for, |
| 1532 | * not ev_io_timeout after _now_. If it fired for another |
| 1533 | * reason, though, the timeout ought to start ticking _now_. */ |
| 1534 | struct timeval run_at, relative_to, delay, now; |
| 1535 | ev_uint32_t usec_mask = 0; |
| 1536 | EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout, |
| 1537 | &ev->ev_io_timeout)); |
| 1538 | gettime(base, &now); |
| 1539 | if (is_common_timeout(&ev->ev_timeout, base)) { |
| 1540 | delay = ev->ev_io_timeout; |
| 1541 | usec_mask = delay.tv_usec & ~MICROSECONDS_MASK; |
| 1542 | delay.tv_usec &= MICROSECONDS_MASK; |
| 1543 | if (ev->ev_res & EV_TIMEOUT) { |
| 1544 | relative_to = ev->ev_timeout; |
| 1545 | relative_to.tv_usec &= MICROSECONDS_MASK; |
| 1546 | } else { |
| 1547 | relative_to = now; |
| 1548 | } |
| 1549 | } else { |
| 1550 | delay = ev->ev_io_timeout; |
| 1551 | if (ev->ev_res & EV_TIMEOUT) { |
| 1552 | relative_to = ev->ev_timeout; |
| 1553 | } else { |
| 1554 | relative_to = now; |
| 1555 | } |
| 1556 | } |
| 1557 | evutil_timeradd(&relative_to, &delay, &run_at); |
| 1558 | if (evutil_timercmp(&run_at, &now, <)) { |
| 1559 | /* Looks like we missed at least one invocation due to |
| 1560 | * a clock jump, not running the event loop for a |
| 1561 | * while, really slow callbacks, or |
| 1562 | * something. Reschedule relative to now. |
| 1563 | */ |
| 1564 | evutil_timeradd(&now, &delay, &run_at); |
| 1565 | } |
| 1566 | run_at.tv_usec |= usec_mask; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1567 | event_add_nolock_(ev, &run_at, 1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1568 | } |
| 1569 | |
| 1570 | // Save our callback before we release the lock |
| 1571 | evcb_callback = ev->ev_callback; |
| 1572 | evcb_fd = ev->ev_fd; |
| 1573 | evcb_res = ev->ev_res; |
| 1574 | evcb_arg = ev->ev_arg; |
| 1575 | |
| 1576 | // Release the lock |
| 1577 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1578 | |
| 1579 | // Execute the callback |
| 1580 | (evcb_callback)(evcb_fd, evcb_res, evcb_arg); |
| 1581 | } |
| 1582 | |
| 1583 | /* |
| 1584 | Helper for event_process_active to process all the events in a single queue, |
| 1585 | releasing the lock as we go. This function requires that the lock be held |
| 1586 | when it's invoked. Returns -1 if we get a signal or an event_break that |
| 1587 | means we should stop processing any active events now. Otherwise returns |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1588 | the number of non-internal event_callbacks that we processed. |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1589 | */ |
| 1590 | static int |
| 1591 | event_process_active_single_queue(struct event_base *base, |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1592 | struct evcallback_list *activeq, |
| 1593 | int max_to_process, const struct timeval *endtime) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1594 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1595 | struct event_callback *evcb; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1596 | int count = 0; |
| 1597 | |
| 1598 | EVUTIL_ASSERT(activeq != NULL); |
| 1599 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1600 | for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) { |
| 1601 | struct event *ev=NULL; |
| 1602 | if (evcb->evcb_flags & EVLIST_INIT) { |
| 1603 | ev = event_callback_to_event(evcb); |
| 1604 | |
| 1605 | if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING) |
| 1606 | event_queue_remove_active(base, evcb); |
| 1607 | else |
| 1608 | event_del_nolock_(ev, EVENT_DEL_NOBLOCK); |
| 1609 | event_debug(( |
| 1610 | "event_process_active: event: %p, %s%s%scall %p", |
| 1611 | ev, |
| 1612 | ev->ev_res & EV_READ ? "EV_READ " : " ", |
| 1613 | ev->ev_res & EV_WRITE ? "EV_WRITE " : " ", |
| 1614 | ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ", |
| 1615 | ev->ev_callback)); |
| 1616 | } else { |
| 1617 | event_queue_remove_active(base, evcb); |
| 1618 | event_debug(("event_process_active: event_callback %p, " |
| 1619 | "closure %d, call %p", |
| 1620 | evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback)); |
| 1621 | } |
| 1622 | |
| 1623 | if (!(evcb->evcb_flags & EVLIST_INTERNAL)) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1624 | ++count; |
| 1625 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1626 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1627 | base->current_event = evcb; |
| 1628 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1629 | base->current_event_waiters = 0; |
| 1630 | #endif |
| 1631 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1632 | switch (evcb->evcb_closure) { |
| 1633 | case EV_CLOSURE_EVENT_SIGNAL: |
| 1634 | EVUTIL_ASSERT(ev != NULL); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1635 | event_signal_closure(base, ev); |
| 1636 | break; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1637 | case EV_CLOSURE_EVENT_PERSIST: |
| 1638 | EVUTIL_ASSERT(ev != NULL); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1639 | event_persist_closure(base, ev); |
| 1640 | break; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1641 | case EV_CLOSURE_EVENT: { |
| 1642 | void (*evcb_callback)(evutil_socket_t, short, void *); |
| 1643 | EVUTIL_ASSERT(ev != NULL); |
| 1644 | evcb_callback = *ev->ev_callback; |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 1645 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1646 | evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg); |
| 1647 | } |
| 1648 | break; |
| 1649 | case EV_CLOSURE_CB_SELF: { |
| 1650 | void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb; |
| 1651 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1652 | evcb_selfcb(evcb, evcb->evcb_arg); |
| 1653 | } |
| 1654 | break; |
| 1655 | case EV_CLOSURE_EVENT_FINALIZE: |
| 1656 | case EV_CLOSURE_EVENT_FINALIZE_FREE: { |
| 1657 | void (*evcb_evfinalize)(struct event *, void *); |
| 1658 | int evcb_closure = evcb->evcb_closure; |
| 1659 | EVUTIL_ASSERT(ev != NULL); |
| 1660 | base->current_event = NULL; |
| 1661 | evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize; |
| 1662 | EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); |
| 1663 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1664 | evcb_evfinalize(ev, ev->ev_arg); |
| 1665 | event_debug_note_teardown_(ev); |
| 1666 | if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) |
| 1667 | mm_free(ev); |
| 1668 | } |
| 1669 | break; |
| 1670 | case EV_CLOSURE_CB_FINALIZE: { |
| 1671 | void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize; |
| 1672 | base->current_event = NULL; |
| 1673 | EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); |
| 1674 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1675 | evcb_cbfinalize(evcb, evcb->evcb_arg); |
| 1676 | } |
| 1677 | break; |
| 1678 | default: |
| 1679 | EVUTIL_ASSERT(0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1680 | } |
| 1681 | |
| 1682 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1683 | base->current_event = NULL; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1684 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1685 | if (base->current_event_waiters) { |
| 1686 | base->current_event_waiters = 0; |
| 1687 | EVTHREAD_COND_BROADCAST(base->current_event_cond); |
| 1688 | } |
| 1689 | #endif |
| 1690 | |
| 1691 | if (base->event_break) |
| 1692 | return -1; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1693 | if (count >= max_to_process) |
| 1694 | return count; |
| 1695 | if (count && endtime) { |
| 1696 | struct timeval now; |
| 1697 | update_time_cache(base); |
| 1698 | gettime(base, &now); |
| 1699 | if (evutil_timercmp(&now, endtime, >=)) |
| 1700 | return count; |
| 1701 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1702 | if (base->event_continue) |
| 1703 | break; |
| 1704 | } |
| 1705 | return count; |
| 1706 | } |
| 1707 | |
| 1708 | /* |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1709 | * Active events are stored in priority queues. Lower priorities are always |
| 1710 | * process before higher priorities. Low priority events can starve high |
| 1711 | * priority ones. |
| 1712 | */ |
| 1713 | |
| 1714 | static int |
| 1715 | event_process_active(struct event_base *base) |
| 1716 | { |
| 1717 | /* Caller must hold th_base_lock */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1718 | struct evcallback_list *activeq = NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1719 | int i, c = 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1720 | const struct timeval *endtime; |
| 1721 | struct timeval tv; |
| 1722 | const int maxcb = base->max_dispatch_callbacks; |
| 1723 | const int limit_after_prio = base->limit_callbacks_after_prio; |
| 1724 | if (base->max_dispatch_time.tv_sec >= 0) { |
| 1725 | update_time_cache(base); |
| 1726 | gettime(base, &tv); |
| 1727 | evutil_timeradd(&base->max_dispatch_time, &tv, &tv); |
| 1728 | endtime = &tv; |
| 1729 | } else { |
| 1730 | endtime = NULL; |
| 1731 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1732 | |
| 1733 | for (i = 0; i < base->nactivequeues; ++i) { |
| 1734 | if (TAILQ_FIRST(&base->activequeues[i]) != NULL) { |
| 1735 | base->event_running_priority = i; |
| 1736 | activeq = &base->activequeues[i]; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1737 | if (i < limit_after_prio) |
| 1738 | c = event_process_active_single_queue(base, activeq, |
| 1739 | INT_MAX, NULL); |
| 1740 | else |
| 1741 | c = event_process_active_single_queue(base, activeq, |
| 1742 | maxcb, endtime); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1743 | if (c < 0) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1744 | goto done; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1745 | } else if (c > 0) |
| 1746 | break; /* Processed a real event; do not |
| 1747 | * consider lower-priority events */ |
| 1748 | /* If we get here, all of the events we processed |
| 1749 | * were internal. Continue. */ |
| 1750 | } |
| 1751 | } |
| 1752 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1753 | done: |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1754 | base->event_running_priority = -1; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1755 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1756 | return c; |
| 1757 | } |
| 1758 | |
| 1759 | /* |
| 1760 | * Wait continuously for events. We exit only if no events are left. |
| 1761 | */ |
| 1762 | |
| 1763 | int |
| 1764 | event_dispatch(void) |
| 1765 | { |
| 1766 | return (event_loop(0)); |
| 1767 | } |
| 1768 | |
| 1769 | int |
| 1770 | event_base_dispatch(struct event_base *event_base) |
| 1771 | { |
| 1772 | return (event_base_loop(event_base, 0)); |
| 1773 | } |
| 1774 | |
| 1775 | const char * |
| 1776 | event_base_get_method(const struct event_base *base) |
| 1777 | { |
| 1778 | EVUTIL_ASSERT(base); |
| 1779 | return (base->evsel->name); |
| 1780 | } |
| 1781 | |
| 1782 | /** Callback: used to implement event_base_loopexit by telling the event_base |
| 1783 | * that it's time to exit its loop. */ |
| 1784 | static void |
| 1785 | event_loopexit_cb(evutil_socket_t fd, short what, void *arg) |
| 1786 | { |
| 1787 | struct event_base *base = arg; |
| 1788 | base->event_gotterm = 1; |
| 1789 | } |
| 1790 | |
| 1791 | int |
| 1792 | event_loopexit(const struct timeval *tv) |
| 1793 | { |
| 1794 | return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, |
| 1795 | current_base, tv)); |
| 1796 | } |
| 1797 | |
| 1798 | int |
| 1799 | event_base_loopexit(struct event_base *event_base, const struct timeval *tv) |
| 1800 | { |
| 1801 | return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, |
| 1802 | event_base, tv)); |
| 1803 | } |
| 1804 | |
| 1805 | int |
| 1806 | event_loopbreak(void) |
| 1807 | { |
| 1808 | return (event_base_loopbreak(current_base)); |
| 1809 | } |
| 1810 | |
| 1811 | int |
| 1812 | event_base_loopbreak(struct event_base *event_base) |
| 1813 | { |
| 1814 | int r = 0; |
| 1815 | if (event_base == NULL) |
| 1816 | return (-1); |
| 1817 | |
| 1818 | EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); |
| 1819 | event_base->event_break = 1; |
| 1820 | |
| 1821 | if (EVBASE_NEED_NOTIFY(event_base)) { |
| 1822 | r = evthread_notify_base(event_base); |
| 1823 | } else { |
| 1824 | r = (0); |
| 1825 | } |
| 1826 | EVBASE_RELEASE_LOCK(event_base, th_base_lock); |
| 1827 | return r; |
| 1828 | } |
| 1829 | |
| 1830 | int |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1831 | event_base_loopcontinue(struct event_base *event_base) |
| 1832 | { |
| 1833 | int r = 0; |
| 1834 | if (event_base == NULL) |
| 1835 | return (-1); |
| 1836 | |
| 1837 | EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); |
| 1838 | event_base->event_continue = 1; |
| 1839 | |
| 1840 | if (EVBASE_NEED_NOTIFY(event_base)) { |
| 1841 | r = evthread_notify_base(event_base); |
| 1842 | } else { |
| 1843 | r = (0); |
| 1844 | } |
| 1845 | EVBASE_RELEASE_LOCK(event_base, th_base_lock); |
| 1846 | return r; |
| 1847 | } |
| 1848 | |
| 1849 | int |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1850 | event_base_got_break(struct event_base *event_base) |
| 1851 | { |
| 1852 | int res; |
| 1853 | EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); |
| 1854 | res = event_base->event_break; |
| 1855 | EVBASE_RELEASE_LOCK(event_base, th_base_lock); |
| 1856 | return res; |
| 1857 | } |
| 1858 | |
| 1859 | int |
| 1860 | event_base_got_exit(struct event_base *event_base) |
| 1861 | { |
| 1862 | int res; |
| 1863 | EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); |
| 1864 | res = event_base->event_gotterm; |
| 1865 | EVBASE_RELEASE_LOCK(event_base, th_base_lock); |
| 1866 | return res; |
| 1867 | } |
| 1868 | |
| 1869 | /* not thread safe */ |
| 1870 | |
| 1871 | int |
| 1872 | event_loop(int flags) |
| 1873 | { |
| 1874 | return event_base_loop(current_base, flags); |
| 1875 | } |
| 1876 | |
| 1877 | int |
| 1878 | event_base_loop(struct event_base *base, int flags) |
| 1879 | { |
| 1880 | const struct eventop *evsel = base->evsel; |
| 1881 | struct timeval tv; |
| 1882 | struct timeval *tv_p; |
| 1883 | int res, done, retval = 0; |
| 1884 | |
| 1885 | /* Grab the lock. We will release it inside evsel.dispatch, and again |
| 1886 | * as we invoke user callbacks. */ |
| 1887 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 1888 | |
| 1889 | if (base->running_loop) { |
| 1890 | event_warnx("%s: reentrant invocation. Only one event_base_loop" |
| 1891 | " can run on each event_base at once.", __func__); |
| 1892 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1893 | return -1; |
| 1894 | } |
| 1895 | |
| 1896 | base->running_loop = 1; |
| 1897 | |
| 1898 | clear_time_cache(base); |
| 1899 | |
| 1900 | if (base->sig.ev_signal_added && base->sig.ev_n_signals_added) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1901 | evsig_set_base_(base); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1902 | |
| 1903 | done = 0; |
| 1904 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1905 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1906 | base->th_owner_id = EVTHREAD_GET_ID(); |
| 1907 | #endif |
| 1908 | |
| 1909 | base->event_gotterm = base->event_break = 0; |
| 1910 | |
| 1911 | while (!done) { |
| 1912 | base->event_continue = 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1913 | base->n_deferreds_queued = 0; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1914 | |
| 1915 | /* Terminate the loop if we have been asked to */ |
| 1916 | if (base->event_gotterm) { |
| 1917 | break; |
| 1918 | } |
| 1919 | |
| 1920 | if (base->event_break) { |
| 1921 | break; |
| 1922 | } |
| 1923 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1924 | tv_p = &tv; |
| 1925 | if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) { |
| 1926 | timeout_next(base, &tv_p); |
| 1927 | } else { |
| 1928 | /* |
| 1929 | * if we have active events, we just poll new events |
| 1930 | * without waiting. |
| 1931 | */ |
| 1932 | evutil_timerclear(&tv); |
| 1933 | } |
| 1934 | |
| 1935 | /* If we have no events, we just exit */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1936 | if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) && |
| 1937 | !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1938 | event_debug(("%s: no events registered.", __func__)); |
| 1939 | retval = 1; |
| 1940 | goto done; |
| 1941 | } |
| 1942 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1943 | event_queue_make_later_events_active(base); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1944 | |
| 1945 | clear_time_cache(base); |
| 1946 | |
| 1947 | res = evsel->dispatch(base, tv_p); |
| 1948 | |
| 1949 | if (res == -1) { |
| 1950 | event_debug(("%s: dispatch returned unsuccessfully.", |
| 1951 | __func__)); |
| 1952 | retval = -1; |
| 1953 | goto done; |
| 1954 | } |
| 1955 | |
| 1956 | update_time_cache(base); |
| 1957 | |
| 1958 | timeout_process(base); |
| 1959 | |
| 1960 | if (N_ACTIVE_CALLBACKS(base)) { |
| 1961 | int n = event_process_active(base); |
| 1962 | if ((flags & EVLOOP_ONCE) |
| 1963 | && N_ACTIVE_CALLBACKS(base) == 0 |
| 1964 | && n != 0) |
| 1965 | done = 1; |
| 1966 | } else if (flags & EVLOOP_NONBLOCK) |
| 1967 | done = 1; |
| 1968 | } |
| 1969 | event_debug(("%s: asked to terminate loop.", __func__)); |
| 1970 | |
| 1971 | done: |
| 1972 | clear_time_cache(base); |
| 1973 | base->running_loop = 0; |
| 1974 | |
| 1975 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 1976 | |
| 1977 | return (retval); |
| 1978 | } |
| 1979 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1980 | /* One-time callback to implement event_base_once: invokes the user callback, |
| 1981 | * then deletes the allocated storage */ |
| 1982 | static void |
| 1983 | event_once_cb(evutil_socket_t fd, short events, void *arg) |
| 1984 | { |
| 1985 | struct event_once *eonce = arg; |
| 1986 | |
| 1987 | (*eonce->cb)(fd, events, eonce->arg); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 1988 | EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock); |
| 1989 | LIST_REMOVE(eonce, next_once); |
| 1990 | EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 1991 | event_debug_unassign(&eonce->ev); |
| 1992 | mm_free(eonce); |
| 1993 | } |
| 1994 | |
| 1995 | /* not threadsafe, event scheduled once. */ |
| 1996 | int |
| 1997 | event_once(evutil_socket_t fd, short events, |
| 1998 | void (*callback)(evutil_socket_t, short, void *), |
| 1999 | void *arg, const struct timeval *tv) |
| 2000 | { |
| 2001 | return event_base_once(current_base, fd, events, callback, arg, tv); |
| 2002 | } |
| 2003 | |
| 2004 | /* Schedules an event once */ |
| 2005 | int |
| 2006 | event_base_once(struct event_base *base, evutil_socket_t fd, short events, |
| 2007 | void (*callback)(evutil_socket_t, short, void *), |
| 2008 | void *arg, const struct timeval *tv) |
| 2009 | { |
| 2010 | struct event_once *eonce; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2011 | int res = 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2012 | int activate = 0; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2013 | |
| 2014 | /* We cannot support signals that just fire once, or persistent |
| 2015 | * events. */ |
| 2016 | if (events & (EV_SIGNAL|EV_PERSIST)) |
| 2017 | return (-1); |
| 2018 | |
| 2019 | if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL) |
| 2020 | return (-1); |
| 2021 | |
| 2022 | eonce->cb = callback; |
| 2023 | eonce->arg = arg; |
| 2024 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2025 | if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) { |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 2026 | evtimer_assign(&eonce->ev, base, event_once_cb, eonce); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2027 | |
| 2028 | if (tv == NULL || ! evutil_timerisset(tv)) { |
| 2029 | /* If the event is going to become active immediately, |
| 2030 | * don't put it on the timeout queue. This is one |
| 2031 | * idiom for scheduling a callback, so let's make |
| 2032 | * it fast (and order-preserving). */ |
| 2033 | activate = 1; |
| 2034 | } |
| 2035 | } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) { |
| 2036 | events &= EV_READ|EV_WRITE|EV_CLOSED; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2037 | |
| 2038 | event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce); |
| 2039 | } else { |
| 2040 | /* Bad event combination */ |
| 2041 | mm_free(eonce); |
| 2042 | return (-1); |
| 2043 | } |
| 2044 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2045 | if (res == 0) { |
| 2046 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 2047 | if (activate) |
| 2048 | event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1); |
| 2049 | else |
| 2050 | res = event_add_nolock_(&eonce->ev, tv, 0); |
| 2051 | |
| 2052 | if (res != 0) { |
| 2053 | mm_free(eonce); |
| 2054 | return (res); |
| 2055 | } else { |
| 2056 | LIST_INSERT_HEAD(&base->once_events, eonce, next_once); |
| 2057 | } |
| 2058 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2059 | } |
| 2060 | |
| 2061 | return (0); |
| 2062 | } |
| 2063 | |
| 2064 | int |
| 2065 | event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg) |
| 2066 | { |
| 2067 | if (!base) |
| 2068 | base = current_base; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2069 | if (arg == &event_self_cbarg_ptr_) |
| 2070 | arg = ev; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2071 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2072 | event_debug_assert_not_added_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2073 | |
| 2074 | ev->ev_base = base; |
| 2075 | |
| 2076 | ev->ev_callback = callback; |
| 2077 | ev->ev_arg = arg; |
| 2078 | ev->ev_fd = fd; |
| 2079 | ev->ev_events = events; |
| 2080 | ev->ev_res = 0; |
| 2081 | ev->ev_flags = EVLIST_INIT; |
| 2082 | ev->ev_ncalls = 0; |
| 2083 | ev->ev_pncalls = NULL; |
| 2084 | |
| 2085 | if (events & EV_SIGNAL) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2086 | if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2087 | event_warnx("%s: EV_SIGNAL is not compatible with " |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2088 | "EV_READ, EV_WRITE or EV_CLOSED", __func__); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2089 | return -1; |
| 2090 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2091 | ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2092 | } else { |
| 2093 | if (events & EV_PERSIST) { |
| 2094 | evutil_timerclear(&ev->ev_io_timeout); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2095 | ev->ev_closure = EV_CLOSURE_EVENT_PERSIST; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2096 | } else { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2097 | ev->ev_closure = EV_CLOSURE_EVENT; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2098 | } |
| 2099 | } |
| 2100 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2101 | min_heap_elem_init_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2102 | |
| 2103 | if (base != NULL) { |
| 2104 | /* by default, we put new events into the middle priority */ |
| 2105 | ev->ev_pri = base->nactivequeues / 2; |
| 2106 | } |
| 2107 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2108 | event_debug_note_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2109 | |
| 2110 | return 0; |
| 2111 | } |
| 2112 | |
| 2113 | int |
| 2114 | event_base_set(struct event_base *base, struct event *ev) |
| 2115 | { |
| 2116 | /* Only innocent events may be assigned to a different base */ |
| 2117 | if (ev->ev_flags != EVLIST_INIT) |
| 2118 | return (-1); |
| 2119 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2120 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2121 | |
| 2122 | ev->ev_base = base; |
| 2123 | ev->ev_pri = base->nactivequeues/2; |
| 2124 | |
| 2125 | return (0); |
| 2126 | } |
| 2127 | |
| 2128 | void |
| 2129 | event_set(struct event *ev, evutil_socket_t fd, short events, |
| 2130 | void (*callback)(evutil_socket_t, short, void *), void *arg) |
| 2131 | { |
| 2132 | int r; |
| 2133 | r = event_assign(ev, current_base, fd, events, callback, arg); |
| 2134 | EVUTIL_ASSERT(r == 0); |
| 2135 | } |
| 2136 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2137 | void * |
| 2138 | event_self_cbarg(void) |
| 2139 | { |
| 2140 | return &event_self_cbarg_ptr_; |
| 2141 | } |
| 2142 | |
| 2143 | struct event * |
| 2144 | event_base_get_running_event(struct event_base *base) |
| 2145 | { |
| 2146 | struct event *ev = NULL; |
| 2147 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 2148 | if (EVBASE_IN_THREAD(base)) { |
| 2149 | struct event_callback *evcb = base->current_event; |
| 2150 | if (evcb->evcb_flags & EVLIST_INIT) |
| 2151 | ev = event_callback_to_event(evcb); |
| 2152 | } |
| 2153 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 2154 | return ev; |
| 2155 | } |
| 2156 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2157 | struct event * |
| 2158 | event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg) |
| 2159 | { |
| 2160 | struct event *ev; |
| 2161 | ev = mm_malloc(sizeof(struct event)); |
| 2162 | if (ev == NULL) |
| 2163 | return (NULL); |
| 2164 | if (event_assign(ev, base, fd, events, cb, arg) < 0) { |
| 2165 | mm_free(ev); |
| 2166 | return (NULL); |
| 2167 | } |
| 2168 | |
| 2169 | return (ev); |
| 2170 | } |
| 2171 | |
| 2172 | void |
| 2173 | event_free(struct event *ev) |
| 2174 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2175 | /* This is disabled, so that events which have been finalized be a |
| 2176 | * valid target for event_free(). That's */ |
| 2177 | // event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2178 | |
| 2179 | /* make sure that this event won't be coming back to haunt us. */ |
| 2180 | event_del(ev); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2181 | event_debug_note_teardown_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2182 | mm_free(ev); |
| 2183 | |
| 2184 | } |
| 2185 | |
| 2186 | void |
| 2187 | event_debug_unassign(struct event *ev) |
| 2188 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2189 | event_debug_assert_not_added_(ev); |
| 2190 | event_debug_note_teardown_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2191 | |
| 2192 | ev->ev_flags &= ~EVLIST_INIT; |
| 2193 | } |
| 2194 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2195 | #define EVENT_FINALIZE_FREE_ 0x10000 |
| 2196 | static int |
| 2197 | event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb) |
| 2198 | { |
| 2199 | ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ? |
| 2200 | EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE; |
| 2201 | |
| 2202 | event_del_nolock_(ev, EVENT_DEL_NOBLOCK); |
| 2203 | ev->ev_closure = closure; |
| 2204 | ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb; |
| 2205 | event_active_nolock_(ev, EV_FINALIZE, 1); |
| 2206 | ev->ev_flags |= EVLIST_FINALIZING; |
| 2207 | return 0; |
| 2208 | } |
| 2209 | |
| 2210 | static int |
| 2211 | event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb) |
| 2212 | { |
| 2213 | int r; |
| 2214 | struct event_base *base = ev->ev_base; |
| 2215 | if (EVUTIL_FAILURE_CHECK(!base)) { |
| 2216 | event_warnx("%s: event has no event_base set.", __func__); |
| 2217 | return -1; |
| 2218 | } |
| 2219 | |
| 2220 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 2221 | r = event_finalize_nolock_(base, flags, ev, cb); |
| 2222 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 2223 | return r; |
| 2224 | } |
| 2225 | |
| 2226 | int |
| 2227 | event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) |
| 2228 | { |
| 2229 | return event_finalize_impl_(flags, ev, cb); |
| 2230 | } |
| 2231 | |
| 2232 | int |
| 2233 | event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) |
| 2234 | { |
| 2235 | return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb); |
| 2236 | } |
| 2237 | |
| 2238 | void |
| 2239 | event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) |
| 2240 | { |
| 2241 | struct event *ev = NULL; |
| 2242 | if (evcb->evcb_flags & EVLIST_INIT) { |
| 2243 | ev = event_callback_to_event(evcb); |
| 2244 | event_del_nolock_(ev, EVENT_DEL_NOBLOCK); |
| 2245 | } else { |
| 2246 | event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/ |
| 2247 | } |
| 2248 | |
| 2249 | evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE; |
| 2250 | evcb->evcb_cb_union.evcb_cbfinalize = cb; |
| 2251 | event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/ |
| 2252 | evcb->evcb_flags |= EVLIST_FINALIZING; |
| 2253 | } |
| 2254 | |
| 2255 | void |
| 2256 | event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) |
| 2257 | { |
| 2258 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 2259 | event_callback_finalize_nolock_(base, flags, evcb, cb); |
| 2260 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 2261 | } |
| 2262 | |
| 2263 | /** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided |
| 2264 | * callback will be invoked on *one of them*, after they have *all* been |
| 2265 | * finalized. */ |
| 2266 | int |
| 2267 | event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *)) |
| 2268 | { |
| 2269 | int n_pending = 0, i; |
| 2270 | |
| 2271 | if (base == NULL) |
| 2272 | base = current_base; |
| 2273 | |
| 2274 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 2275 | |
| 2276 | event_debug(("%s: %d events finalizing", __func__, n_cbs)); |
| 2277 | |
| 2278 | /* At most one can be currently executing; the rest we just |
| 2279 | * cancel... But we always make sure that the finalize callback |
| 2280 | * runs. */ |
| 2281 | for (i = 0; i < n_cbs; ++i) { |
| 2282 | struct event_callback *evcb = evcbs[i]; |
| 2283 | if (evcb == base->current_event) { |
| 2284 | event_callback_finalize_nolock_(base, 0, evcb, cb); |
| 2285 | ++n_pending; |
| 2286 | } else { |
| 2287 | event_callback_cancel_nolock_(base, evcb, 0); |
| 2288 | } |
| 2289 | } |
| 2290 | |
| 2291 | if (n_pending == 0) { |
| 2292 | /* Just do the first one. */ |
| 2293 | event_callback_finalize_nolock_(base, 0, evcbs[0], cb); |
| 2294 | } |
| 2295 | |
| 2296 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 2297 | return 0; |
| 2298 | } |
| 2299 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2300 | /* |
| 2301 | * Set's the priority of an event - if an event is already scheduled |
| 2302 | * changing the priority is going to fail. |
| 2303 | */ |
| 2304 | |
| 2305 | int |
| 2306 | event_priority_set(struct event *ev, int pri) |
| 2307 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2308 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2309 | |
| 2310 | if (ev->ev_flags & EVLIST_ACTIVE) |
| 2311 | return (-1); |
| 2312 | if (pri < 0 || pri >= ev->ev_base->nactivequeues) |
| 2313 | return (-1); |
| 2314 | |
| 2315 | ev->ev_pri = pri; |
| 2316 | |
| 2317 | return (0); |
| 2318 | } |
| 2319 | |
| 2320 | /* |
| 2321 | * Checks if a specific event is pending or scheduled. |
| 2322 | */ |
| 2323 | |
| 2324 | int |
| 2325 | event_pending(const struct event *ev, short event, struct timeval *tv) |
| 2326 | { |
| 2327 | int flags = 0; |
| 2328 | |
| 2329 | if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) { |
| 2330 | event_warnx("%s: event has no event_base set.", __func__); |
| 2331 | return 0; |
| 2332 | } |
| 2333 | |
| 2334 | EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2335 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2336 | |
| 2337 | if (ev->ev_flags & EVLIST_INSERTED) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2338 | flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)); |
| 2339 | if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2340 | flags |= ev->ev_res; |
| 2341 | if (ev->ev_flags & EVLIST_TIMEOUT) |
| 2342 | flags |= EV_TIMEOUT; |
| 2343 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2344 | event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2345 | |
| 2346 | /* See if there is a timeout that we should report */ |
| 2347 | if (tv != NULL && (flags & event & EV_TIMEOUT)) { |
| 2348 | struct timeval tmp = ev->ev_timeout; |
| 2349 | tmp.tv_usec &= MICROSECONDS_MASK; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2350 | /* correctly remamp to real time */ |
| 2351 | evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2352 | } |
| 2353 | |
| 2354 | EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); |
| 2355 | |
| 2356 | return (flags & event); |
| 2357 | } |
| 2358 | |
| 2359 | int |
| 2360 | event_initialized(const struct event *ev) |
| 2361 | { |
| 2362 | if (!(ev->ev_flags & EVLIST_INIT)) |
| 2363 | return 0; |
| 2364 | |
| 2365 | return 1; |
| 2366 | } |
| 2367 | |
| 2368 | void |
| 2369 | event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out) |
| 2370 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2371 | event_debug_assert_is_setup_(event); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2372 | |
| 2373 | if (base_out) |
| 2374 | *base_out = event->ev_base; |
| 2375 | if (fd_out) |
| 2376 | *fd_out = event->ev_fd; |
| 2377 | if (events_out) |
| 2378 | *events_out = event->ev_events; |
| 2379 | if (callback_out) |
| 2380 | *callback_out = event->ev_callback; |
| 2381 | if (arg_out) |
| 2382 | *arg_out = event->ev_arg; |
| 2383 | } |
| 2384 | |
| 2385 | size_t |
| 2386 | event_get_struct_event_size(void) |
| 2387 | { |
| 2388 | return sizeof(struct event); |
| 2389 | } |
| 2390 | |
| 2391 | evutil_socket_t |
| 2392 | event_get_fd(const struct event *ev) |
| 2393 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2394 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2395 | return ev->ev_fd; |
| 2396 | } |
| 2397 | |
| 2398 | struct event_base * |
| 2399 | event_get_base(const struct event *ev) |
| 2400 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2401 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2402 | return ev->ev_base; |
| 2403 | } |
| 2404 | |
| 2405 | short |
| 2406 | event_get_events(const struct event *ev) |
| 2407 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2408 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2409 | return ev->ev_events; |
| 2410 | } |
| 2411 | |
| 2412 | event_callback_fn |
| 2413 | event_get_callback(const struct event *ev) |
| 2414 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2415 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2416 | return ev->ev_callback; |
| 2417 | } |
| 2418 | |
| 2419 | void * |
| 2420 | event_get_callback_arg(const struct event *ev) |
| 2421 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2422 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2423 | return ev->ev_arg; |
| 2424 | } |
| 2425 | |
| 2426 | int |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2427 | event_get_priority(const struct event *ev) |
| 2428 | { |
| 2429 | event_debug_assert_is_setup_(ev); |
| 2430 | return ev->ev_pri; |
| 2431 | } |
| 2432 | |
| 2433 | int |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2434 | event_add(struct event *ev, const struct timeval *tv) |
| 2435 | { |
| 2436 | int res; |
| 2437 | |
| 2438 | if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { |
| 2439 | event_warnx("%s: event has no event_base set.", __func__); |
| 2440 | return -1; |
| 2441 | } |
| 2442 | |
| 2443 | EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); |
| 2444 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2445 | res = event_add_nolock_(ev, tv, 0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2446 | |
| 2447 | EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); |
| 2448 | |
| 2449 | return (res); |
| 2450 | } |
| 2451 | |
| 2452 | /* Helper callback: wake an event_base from another thread. This version |
| 2453 | * works by writing a byte to one end of a socketpair, so that the event_base |
| 2454 | * listening on the other end will wake up as the corresponding event |
| 2455 | * triggers */ |
| 2456 | static int |
| 2457 | evthread_notify_base_default(struct event_base *base) |
| 2458 | { |
| 2459 | char buf[1]; |
| 2460 | int r; |
| 2461 | buf[0] = (char) 0; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2462 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2463 | r = send(base->th_notify_fd[1], buf, 1, 0); |
| 2464 | #else |
| 2465 | r = write(base->th_notify_fd[1], buf, 1); |
| 2466 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2467 | return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2468 | } |
| 2469 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2470 | #ifdef EVENT__HAVE_EVENTFD |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2471 | /* Helper callback: wake an event_base from another thread. This version |
| 2472 | * assumes that you have a working eventfd() implementation. */ |
| 2473 | static int |
| 2474 | evthread_notify_base_eventfd(struct event_base *base) |
| 2475 | { |
| 2476 | ev_uint64_t msg = 1; |
| 2477 | int r; |
| 2478 | do { |
| 2479 | r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg)); |
| 2480 | } while (r < 0 && errno == EAGAIN); |
| 2481 | |
| 2482 | return (r < 0) ? -1 : 0; |
| 2483 | } |
| 2484 | #endif |
| 2485 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2486 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2487 | /** Tell the thread currently running the event_loop for base (if any) that it |
| 2488 | * needs to stop waiting in its dispatch function (if it is) and process all |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2489 | * active callbacks. */ |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2490 | static int |
| 2491 | evthread_notify_base(struct event_base *base) |
| 2492 | { |
| 2493 | EVENT_BASE_ASSERT_LOCKED(base); |
| 2494 | if (!base->th_notify_fn) |
| 2495 | return -1; |
| 2496 | if (base->is_notify_pending) |
| 2497 | return 0; |
| 2498 | base->is_notify_pending = 1; |
| 2499 | return base->th_notify_fn(base); |
| 2500 | } |
| 2501 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2502 | /* Implementation function to remove a timeout on a currently pending event. |
| 2503 | */ |
| 2504 | int |
| 2505 | event_remove_timer_nolock_(struct event *ev) |
| 2506 | { |
| 2507 | struct event_base *base = ev->ev_base; |
| 2508 | |
| 2509 | EVENT_BASE_ASSERT_LOCKED(base); |
| 2510 | event_debug_assert_is_setup_(ev); |
| 2511 | |
| 2512 | event_debug(("event_remove_timer_nolock: event: %p", ev)); |
| 2513 | |
| 2514 | /* If it's not pending on a timeout, we don't need to do anything. */ |
| 2515 | if (ev->ev_flags & EVLIST_TIMEOUT) { |
| 2516 | event_queue_remove_timeout(base, ev); |
| 2517 | evutil_timerclear(&ev->ev_.ev_io.ev_timeout); |
| 2518 | } |
| 2519 | |
| 2520 | return (0); |
| 2521 | } |
| 2522 | |
| 2523 | int |
| 2524 | event_remove_timer(struct event *ev) |
| 2525 | { |
| 2526 | int res; |
| 2527 | |
| 2528 | if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { |
| 2529 | event_warnx("%s: event has no event_base set.", __func__); |
| 2530 | return -1; |
| 2531 | } |
| 2532 | |
| 2533 | EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); |
| 2534 | |
| 2535 | res = event_remove_timer_nolock_(ev); |
| 2536 | |
| 2537 | EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); |
| 2538 | |
| 2539 | return (res); |
| 2540 | } |
| 2541 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2542 | /* Implementation function to add an event. Works just like event_add, |
| 2543 | * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set, |
| 2544 | * we treat tv as an absolute time, not as an interval to add to the current |
| 2545 | * time */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2546 | int |
| 2547 | event_add_nolock_(struct event *ev, const struct timeval *tv, |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2548 | int tv_is_absolute) |
| 2549 | { |
| 2550 | struct event_base *base = ev->ev_base; |
| 2551 | int res = 0; |
| 2552 | int notify = 0; |
| 2553 | |
| 2554 | EVENT_BASE_ASSERT_LOCKED(base); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2555 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2556 | |
| 2557 | event_debug(( |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2558 | "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p", |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2559 | ev, |
| 2560 | EV_SOCK_ARG(ev->ev_fd), |
| 2561 | ev->ev_events & EV_READ ? "EV_READ " : " ", |
| 2562 | ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2563 | ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ", |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2564 | tv ? "EV_TIMEOUT " : " ", |
| 2565 | ev->ev_callback)); |
| 2566 | |
| 2567 | EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); |
| 2568 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2569 | if (ev->ev_flags & EVLIST_FINALIZING) { |
| 2570 | /* XXXX debug */ |
| 2571 | return (-1); |
| 2572 | } |
| 2573 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2574 | /* |
| 2575 | * prepare for timeout insertion further below, if we get a |
| 2576 | * failure on any step, we should not change any state. |
| 2577 | */ |
| 2578 | if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2579 | if (min_heap_reserve_(&base->timeheap, |
| 2580 | 1 + min_heap_size_(&base->timeheap)) == -1) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2581 | return (-1); /* ENOMEM == errno */ |
| 2582 | } |
| 2583 | |
| 2584 | /* If the main thread is currently executing a signal event's |
| 2585 | * callback, and we are not the main thread, then we want to wait |
| 2586 | * until the callback is done before we mess with the event, or else |
| 2587 | * we can race on ev_ncalls and ev_pncalls below. */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2588 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
| 2589 | if (base->current_event == event_to_event_callback(ev) && |
| 2590 | (ev->ev_events & EV_SIGNAL) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2591 | && !EVBASE_IN_THREAD(base)) { |
| 2592 | ++base->current_event_waiters; |
| 2593 | EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); |
| 2594 | } |
| 2595 | #endif |
| 2596 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2597 | if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) && |
| 2598 | !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { |
| 2599 | if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) |
| 2600 | res = evmap_io_add_(base, ev->ev_fd, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2601 | else if (ev->ev_events & EV_SIGNAL) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2602 | res = evmap_signal_add_(base, (int)ev->ev_fd, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2603 | if (res != -1) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2604 | event_queue_insert_inserted(base, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2605 | if (res == 1) { |
| 2606 | /* evmap says we need to notify the main thread. */ |
| 2607 | notify = 1; |
| 2608 | res = 0; |
| 2609 | } |
| 2610 | } |
| 2611 | |
| 2612 | /* |
| 2613 | * we should change the timeout state only if the previous event |
| 2614 | * addition succeeded. |
| 2615 | */ |
| 2616 | if (res != -1 && tv != NULL) { |
| 2617 | struct timeval now; |
| 2618 | int common_timeout; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2619 | #ifdef USE_REINSERT_TIMEOUT |
| 2620 | int was_common; |
| 2621 | int old_timeout_idx; |
| 2622 | #endif |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2623 | |
| 2624 | /* |
| 2625 | * for persistent timeout events, we remember the |
| 2626 | * timeout value and re-add the event. |
| 2627 | * |
| 2628 | * If tv_is_absolute, this was already set. |
| 2629 | */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2630 | if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2631 | ev->ev_io_timeout = *tv; |
| 2632 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2633 | #ifndef USE_REINSERT_TIMEOUT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2634 | if (ev->ev_flags & EVLIST_TIMEOUT) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2635 | event_queue_remove_timeout(base, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2636 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2637 | #endif |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2638 | |
| 2639 | /* Check if it is active due to a timeout. Rescheduling |
| 2640 | * this timeout before the callback can be executed |
| 2641 | * removes it from the active list. */ |
| 2642 | if ((ev->ev_flags & EVLIST_ACTIVE) && |
| 2643 | (ev->ev_res & EV_TIMEOUT)) { |
| 2644 | if (ev->ev_events & EV_SIGNAL) { |
| 2645 | /* See if we are just active executing |
| 2646 | * this event in a loop |
| 2647 | */ |
| 2648 | if (ev->ev_ncalls && ev->ev_pncalls) { |
| 2649 | /* Abort loop */ |
| 2650 | *ev->ev_pncalls = 0; |
| 2651 | } |
| 2652 | } |
| 2653 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2654 | event_queue_remove_active(base, event_to_event_callback(ev)); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2655 | } |
| 2656 | |
| 2657 | gettime(base, &now); |
| 2658 | |
| 2659 | common_timeout = is_common_timeout(tv, base); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2660 | #ifdef USE_REINSERT_TIMEOUT |
| 2661 | was_common = is_common_timeout(&ev->ev_timeout, base); |
| 2662 | old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout); |
| 2663 | #endif |
| 2664 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2665 | if (tv_is_absolute) { |
| 2666 | ev->ev_timeout = *tv; |
| 2667 | } else if (common_timeout) { |
| 2668 | struct timeval tmp = *tv; |
| 2669 | tmp.tv_usec &= MICROSECONDS_MASK; |
| 2670 | evutil_timeradd(&now, &tmp, &ev->ev_timeout); |
| 2671 | ev->ev_timeout.tv_usec |= |
| 2672 | (tv->tv_usec & ~MICROSECONDS_MASK); |
| 2673 | } else { |
| 2674 | evutil_timeradd(&now, tv, &ev->ev_timeout); |
| 2675 | } |
| 2676 | |
| 2677 | event_debug(( |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2678 | "event_add: event %p, timeout in %d seconds %d useconds, call %p", |
| 2679 | ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback)); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2680 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2681 | #ifdef USE_REINSERT_TIMEOUT |
| 2682 | event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx); |
| 2683 | #else |
| 2684 | event_queue_insert_timeout(base, ev); |
| 2685 | #endif |
| 2686 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2687 | if (common_timeout) { |
| 2688 | struct common_timeout_list *ctl = |
| 2689 | get_common_timeout_list(base, &ev->ev_timeout); |
| 2690 | if (ev == TAILQ_FIRST(&ctl->events)) { |
| 2691 | common_timeout_schedule(ctl, &now, ev); |
| 2692 | } |
| 2693 | } else { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2694 | struct event* top = NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2695 | /* See if the earliest timeout is now earlier than it |
| 2696 | * was before: if so, we will need to tell the main |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2697 | * thread to wake up earlier than it would otherwise. |
| 2698 | * We double check the timeout of the top element to |
| 2699 | * handle time distortions due to system suspension. |
| 2700 | */ |
| 2701 | if (min_heap_elt_is_top_(ev)) |
| 2702 | notify = 1; |
| 2703 | else if ((top = min_heap_top_(&base->timeheap)) != NULL && |
| 2704 | evutil_timercmp(&top->ev_timeout, &now, <)) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2705 | notify = 1; |
| 2706 | } |
| 2707 | } |
| 2708 | |
| 2709 | /* if we are not in the right thread, we need to wake up the loop */ |
| 2710 | if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) |
| 2711 | evthread_notify_base(base); |
| 2712 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2713 | event_debug_note_add_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2714 | |
| 2715 | return (res); |
| 2716 | } |
| 2717 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2718 | static int |
| 2719 | event_del_(struct event *ev, int blocking) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2720 | { |
| 2721 | int res; |
| 2722 | |
| 2723 | if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { |
| 2724 | event_warnx("%s: event has no event_base set.", __func__); |
| 2725 | return -1; |
| 2726 | } |
| 2727 | |
| 2728 | EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); |
| 2729 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2730 | res = event_del_nolock_(ev, blocking); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2731 | |
| 2732 | EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); |
| 2733 | |
| 2734 | return (res); |
| 2735 | } |
| 2736 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2737 | int |
| 2738 | event_del(struct event *ev) |
| 2739 | { |
| 2740 | return event_del_(ev, EVENT_DEL_AUTOBLOCK); |
| 2741 | } |
| 2742 | |
| 2743 | int |
| 2744 | event_del_block(struct event *ev) |
| 2745 | { |
| 2746 | return event_del_(ev, EVENT_DEL_BLOCK); |
| 2747 | } |
| 2748 | |
| 2749 | int |
| 2750 | event_del_noblock(struct event *ev) |
| 2751 | { |
| 2752 | return event_del_(ev, EVENT_DEL_NOBLOCK); |
| 2753 | } |
| 2754 | |
| 2755 | /** Helper for event_del: always called with th_base_lock held. |
| 2756 | * |
| 2757 | * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK, |
| 2758 | * EVEN_IF_FINALIZING} values. See those for more information. |
| 2759 | */ |
| 2760 | int |
| 2761 | event_del_nolock_(struct event *ev, int blocking) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2762 | { |
| 2763 | struct event_base *base; |
| 2764 | int res = 0, notify = 0; |
| 2765 | |
| 2766 | event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p", |
| 2767 | ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback)); |
| 2768 | |
| 2769 | /* An event without a base has not been added */ |
| 2770 | if (ev->ev_base == NULL) |
| 2771 | return (-1); |
| 2772 | |
| 2773 | EVENT_BASE_ASSERT_LOCKED(ev->ev_base); |
| 2774 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2775 | if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) { |
| 2776 | if (ev->ev_flags & EVLIST_FINALIZING) { |
| 2777 | /* XXXX Debug */ |
| 2778 | return 0; |
| 2779 | } |
| 2780 | } |
| 2781 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2782 | /* If the main thread is currently executing this event's callback, |
| 2783 | * and we are not the main thread, then we want to wait until the |
| 2784 | * callback is done before we start removing the event. That way, |
| 2785 | * when this function returns, it will be safe to free the |
| 2786 | * user-supplied argument. */ |
| 2787 | base = ev->ev_base; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2788 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
| 2789 | if (blocking != EVENT_DEL_NOBLOCK && |
| 2790 | base->current_event == event_to_event_callback(ev) && |
| 2791 | !EVBASE_IN_THREAD(base) && |
| 2792 | (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2793 | ++base->current_event_waiters; |
| 2794 | EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); |
| 2795 | } |
| 2796 | #endif |
| 2797 | |
| 2798 | EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); |
| 2799 | |
| 2800 | /* See if we are just active executing this event in a loop */ |
| 2801 | if (ev->ev_events & EV_SIGNAL) { |
| 2802 | if (ev->ev_ncalls && ev->ev_pncalls) { |
| 2803 | /* Abort loop */ |
| 2804 | *ev->ev_pncalls = 0; |
| 2805 | } |
| 2806 | } |
| 2807 | |
| 2808 | if (ev->ev_flags & EVLIST_TIMEOUT) { |
| 2809 | /* NOTE: We never need to notify the main thread because of a |
| 2810 | * deleted timeout event: all that could happen if we don't is |
| 2811 | * that the dispatch loop might wake up too early. But the |
| 2812 | * point of notifying the main thread _is_ to wake up the |
| 2813 | * dispatch loop early anyway, so we wouldn't gain anything by |
| 2814 | * doing it. |
| 2815 | */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2816 | event_queue_remove_timeout(base, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2817 | } |
| 2818 | |
| 2819 | if (ev->ev_flags & EVLIST_ACTIVE) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2820 | event_queue_remove_active(base, event_to_event_callback(ev)); |
| 2821 | else if (ev->ev_flags & EVLIST_ACTIVE_LATER) |
| 2822 | event_queue_remove_active_later(base, event_to_event_callback(ev)); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2823 | |
| 2824 | if (ev->ev_flags & EVLIST_INSERTED) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2825 | event_queue_remove_inserted(base, ev); |
| 2826 | if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) |
| 2827 | res = evmap_io_del_(base, ev->ev_fd, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2828 | else |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2829 | res = evmap_signal_del_(base, (int)ev->ev_fd, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2830 | if (res == 1) { |
| 2831 | /* evmap says we need to notify the main thread. */ |
| 2832 | notify = 1; |
| 2833 | res = 0; |
| 2834 | } |
| 2835 | } |
| 2836 | |
| 2837 | /* if we are not in the right thread, we need to wake up the loop */ |
| 2838 | if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) |
| 2839 | evthread_notify_base(base); |
| 2840 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2841 | event_debug_note_del_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2842 | |
| 2843 | return (res); |
| 2844 | } |
| 2845 | |
| 2846 | void |
| 2847 | event_active(struct event *ev, int res, short ncalls) |
| 2848 | { |
| 2849 | if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { |
| 2850 | event_warnx("%s: event has no event_base set.", __func__); |
| 2851 | return; |
| 2852 | } |
| 2853 | |
| 2854 | EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); |
| 2855 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2856 | event_debug_assert_is_setup_(ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2857 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2858 | event_active_nolock_(ev, res, ncalls); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2859 | |
| 2860 | EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); |
| 2861 | } |
| 2862 | |
| 2863 | |
| 2864 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2865 | event_active_nolock_(struct event *ev, int res, short ncalls) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2866 | { |
| 2867 | struct event_base *base; |
| 2868 | |
| 2869 | event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p", |
| 2870 | ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback)); |
| 2871 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2872 | base = ev->ev_base; |
| 2873 | EVENT_BASE_ASSERT_LOCKED(base); |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 2874 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2875 | if (ev->ev_flags & EVLIST_FINALIZING) { |
| 2876 | /* XXXX debug */ |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 2877 | return; |
| 2878 | } |
| 2879 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2880 | switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { |
| 2881 | default: |
| 2882 | case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: |
| 2883 | EVUTIL_ASSERT(0); |
| 2884 | break; |
| 2885 | case EVLIST_ACTIVE: |
| 2886 | /* We get different kinds of events, add them together */ |
| 2887 | ev->ev_res |= res; |
| 2888 | return; |
| 2889 | case EVLIST_ACTIVE_LATER: |
| 2890 | ev->ev_res |= res; |
| 2891 | break; |
| 2892 | case 0: |
| 2893 | ev->ev_res = res; |
| 2894 | break; |
| 2895 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2896 | |
| 2897 | if (ev->ev_pri < base->event_running_priority) |
| 2898 | base->event_continue = 1; |
| 2899 | |
| 2900 | if (ev->ev_events & EV_SIGNAL) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2901 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
| 2902 | if (base->current_event == event_to_event_callback(ev) && |
| 2903 | !EVBASE_IN_THREAD(base)) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2904 | ++base->current_event_waiters; |
| 2905 | EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); |
| 2906 | } |
| 2907 | #endif |
| 2908 | ev->ev_ncalls = ncalls; |
| 2909 | ev->ev_pncalls = NULL; |
| 2910 | } |
| 2911 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2912 | event_callback_activate_nolock_(base, event_to_event_callback(ev)); |
| 2913 | } |
| 2914 | |
| 2915 | void |
| 2916 | event_active_later_(struct event *ev, int res) |
| 2917 | { |
| 2918 | EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); |
| 2919 | event_active_later_nolock_(ev, res); |
| 2920 | EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); |
| 2921 | } |
| 2922 | |
| 2923 | void |
| 2924 | event_active_later_nolock_(struct event *ev, int res) |
| 2925 | { |
| 2926 | struct event_base *base = ev->ev_base; |
| 2927 | EVENT_BASE_ASSERT_LOCKED(base); |
| 2928 | |
| 2929 | if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { |
| 2930 | /* We get different kinds of events, add them together */ |
| 2931 | ev->ev_res |= res; |
| 2932 | return; |
| 2933 | } |
| 2934 | |
| 2935 | ev->ev_res = res; |
| 2936 | |
| 2937 | event_callback_activate_later_nolock_(base, event_to_event_callback(ev)); |
| 2938 | } |
| 2939 | |
| 2940 | int |
| 2941 | event_callback_activate_(struct event_base *base, |
| 2942 | struct event_callback *evcb) |
| 2943 | { |
| 2944 | int r; |
| 2945 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 2946 | r = event_callback_activate_nolock_(base, evcb); |
| 2947 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 2948 | return r; |
| 2949 | } |
| 2950 | |
| 2951 | int |
| 2952 | event_callback_activate_nolock_(struct event_base *base, |
| 2953 | struct event_callback *evcb) |
| 2954 | { |
| 2955 | int r = 1; |
| 2956 | |
| 2957 | if (evcb->evcb_flags & EVLIST_FINALIZING) |
| 2958 | return 0; |
| 2959 | |
| 2960 | switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { |
| 2961 | default: |
| 2962 | EVUTIL_ASSERT(0); |
| 2963 | case EVLIST_ACTIVE_LATER: |
| 2964 | event_queue_remove_active_later(base, evcb); |
| 2965 | r = 0; |
| 2966 | break; |
| 2967 | case EVLIST_ACTIVE: |
| 2968 | return 0; |
| 2969 | case 0: |
| 2970 | break; |
| 2971 | } |
| 2972 | |
| 2973 | event_queue_insert_active(base, evcb); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2974 | |
| 2975 | if (EVBASE_NEED_NOTIFY(base)) |
| 2976 | evthread_notify_base(base); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2977 | |
| 2978 | return r; |
| 2979 | } |
| 2980 | |
| 2981 | int |
| 2982 | event_callback_activate_later_nolock_(struct event_base *base, |
| 2983 | struct event_callback *evcb) |
| 2984 | { |
| 2985 | if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) |
| 2986 | return 0; |
| 2987 | |
| 2988 | event_queue_insert_active_later(base, evcb); |
| 2989 | if (EVBASE_NEED_NOTIFY(base)) |
| 2990 | evthread_notify_base(base); |
| 2991 | return 1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2992 | } |
| 2993 | |
| 2994 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2995 | event_callback_init_(struct event_base *base, |
| 2996 | struct event_callback *cb) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 2997 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 2998 | memset(cb, 0, sizeof(*cb)); |
| 2999 | cb->evcb_pri = base->nactivequeues - 1; |
| 3000 | } |
| 3001 | |
| 3002 | int |
| 3003 | event_callback_cancel_(struct event_base *base, |
| 3004 | struct event_callback *evcb) |
| 3005 | { |
| 3006 | int r; |
| 3007 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3008 | r = event_callback_cancel_nolock_(base, evcb, 0); |
| 3009 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3010 | return r; |
| 3011 | } |
| 3012 | |
| 3013 | int |
| 3014 | event_callback_cancel_nolock_(struct event_base *base, |
| 3015 | struct event_callback *evcb, int even_if_finalizing) |
| 3016 | { |
| 3017 | if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing) |
| 3018 | return 0; |
| 3019 | |
| 3020 | if (evcb->evcb_flags & EVLIST_INIT) |
| 3021 | return event_del_nolock_(event_callback_to_event(evcb), |
| 3022 | even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK); |
| 3023 | |
| 3024 | switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { |
| 3025 | default: |
| 3026 | case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: |
| 3027 | EVUTIL_ASSERT(0); |
| 3028 | break; |
| 3029 | case EVLIST_ACTIVE: |
| 3030 | /* We get different kinds of events, add them together */ |
| 3031 | event_queue_remove_active(base, evcb); |
| 3032 | return 0; |
| 3033 | case EVLIST_ACTIVE_LATER: |
| 3034 | event_queue_remove_active_later(base, evcb); |
| 3035 | break; |
| 3036 | case 0: |
| 3037 | break; |
| 3038 | } |
| 3039 | |
| 3040 | return 0; |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3041 | } |
| 3042 | |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3043 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3044 | event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg) |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3045 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3046 | memset(cb, 0, sizeof(*cb)); |
| 3047 | cb->evcb_cb_union.evcb_selfcb = fn; |
| 3048 | cb->evcb_arg = arg; |
| 3049 | cb->evcb_pri = priority; |
| 3050 | cb->evcb_closure = EV_CLOSURE_CB_SELF; |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3051 | } |
| 3052 | |
| 3053 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3054 | event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority) |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3055 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3056 | cb->evcb_pri = priority; |
| 3057 | } |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3058 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3059 | void |
| 3060 | event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb) |
| 3061 | { |
| 3062 | if (!base) |
| 3063 | base = current_base; |
| 3064 | event_callback_cancel_(base, cb); |
| 3065 | } |
| 3066 | |
| 3067 | #define MAX_DEFERREDS_QUEUED 32 |
| 3068 | int |
| 3069 | event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb) |
| 3070 | { |
| 3071 | int r = 1; |
| 3072 | if (!base) |
| 3073 | base = current_base; |
| 3074 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3075 | if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) { |
| 3076 | r = event_callback_activate_later_nolock_(base, cb); |
| 3077 | } else { |
| 3078 | r = event_callback_activate_nolock_(base, cb); |
| 3079 | if (r) { |
| 3080 | ++base->n_deferreds_queued; |
| 3081 | } |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3082 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3083 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3084 | return r; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3085 | } |
| 3086 | |
| 3087 | static int |
| 3088 | timeout_next(struct event_base *base, struct timeval **tv_p) |
| 3089 | { |
| 3090 | /* Caller must hold th_base_lock */ |
| 3091 | struct timeval now; |
| 3092 | struct event *ev; |
| 3093 | struct timeval *tv = *tv_p; |
| 3094 | int res = 0; |
| 3095 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3096 | ev = min_heap_top_(&base->timeheap); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3097 | |
| 3098 | if (ev == NULL) { |
| 3099 | /* if no time-based events are active wait for I/O */ |
| 3100 | *tv_p = NULL; |
| 3101 | goto out; |
| 3102 | } |
| 3103 | |
| 3104 | if (gettime(base, &now) == -1) { |
| 3105 | res = -1; |
| 3106 | goto out; |
| 3107 | } |
| 3108 | |
| 3109 | if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { |
| 3110 | evutil_timerclear(tv); |
| 3111 | goto out; |
| 3112 | } |
| 3113 | |
| 3114 | evutil_timersub(&ev->ev_timeout, &now, tv); |
| 3115 | |
| 3116 | EVUTIL_ASSERT(tv->tv_sec >= 0); |
| 3117 | EVUTIL_ASSERT(tv->tv_usec >= 0); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3118 | event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec)); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3119 | |
| 3120 | out: |
| 3121 | return (res); |
| 3122 | } |
| 3123 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3124 | /* Activate every event whose timeout has elapsed. */ |
| 3125 | static void |
| 3126 | timeout_process(struct event_base *base) |
| 3127 | { |
| 3128 | /* Caller must hold lock. */ |
| 3129 | struct timeval now; |
| 3130 | struct event *ev; |
| 3131 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3132 | if (min_heap_empty_(&base->timeheap)) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3133 | return; |
| 3134 | } |
| 3135 | |
| 3136 | gettime(base, &now); |
| 3137 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3138 | while ((ev = min_heap_top_(&base->timeheap))) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3139 | if (evutil_timercmp(&ev->ev_timeout, &now, >)) |
| 3140 | break; |
| 3141 | |
| 3142 | /* delete this event from the I/O queues */ |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3143 | event_del_nolock_(ev, EVENT_DEL_NOBLOCK); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3144 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3145 | event_debug(("timeout_process: event: %p, call %p", |
| 3146 | ev, ev->ev_callback)); |
| 3147 | event_active_nolock_(ev, EV_TIMEOUT, 1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3148 | } |
| 3149 | } |
| 3150 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3151 | #if (EVLIST_INTERNAL >> 4) != 1 |
| 3152 | #error "Mismatch for value of EVLIST_INTERNAL" |
| 3153 | #endif |
| 3154 | |
| 3155 | #ifndef MAX |
| 3156 | #define MAX(a,b) (((a)>(b))?(a):(b)) |
| 3157 | #endif |
| 3158 | |
| 3159 | #define MAX_EVENT_COUNT(var, v) var = MAX(var, v) |
| 3160 | |
| 3161 | /* These are a fancy way to spell |
| 3162 | if (flags & EVLIST_INTERNAL) |
| 3163 | base->event_count--/++; |
| 3164 | */ |
| 3165 | #define DECR_EVENT_COUNT(base,flags) \ |
| 3166 | ((base)->event_count -= (~((flags) >> 4) & 1)) |
| 3167 | #define INCR_EVENT_COUNT(base,flags) do { \ |
| 3168 | ((base)->event_count += (~((flags) >> 4) & 1)); \ |
| 3169 | MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \ |
| 3170 | } while (0) |
| 3171 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3172 | static void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3173 | event_queue_remove_inserted(struct event_base *base, struct event *ev) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3174 | { |
| 3175 | EVENT_BASE_ASSERT_LOCKED(base); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3176 | if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3177 | event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3178 | ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED); |
| 3179 | return; |
| 3180 | } |
| 3181 | DECR_EVENT_COUNT(base, ev->ev_flags); |
| 3182 | ev->ev_flags &= ~EVLIST_INSERTED; |
| 3183 | } |
| 3184 | static void |
| 3185 | event_queue_remove_active(struct event_base *base, struct event_callback *evcb) |
| 3186 | { |
| 3187 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3188 | if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) { |
| 3189 | event_errx(1, "%s: %p not on queue %x", __func__, |
| 3190 | evcb, EVLIST_ACTIVE); |
| 3191 | return; |
| 3192 | } |
| 3193 | DECR_EVENT_COUNT(base, evcb->evcb_flags); |
| 3194 | evcb->evcb_flags &= ~EVLIST_ACTIVE; |
| 3195 | base->event_count_active--; |
| 3196 | |
| 3197 | TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri], |
| 3198 | evcb, evcb_active_next); |
| 3199 | } |
| 3200 | static void |
| 3201 | event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb) |
| 3202 | { |
| 3203 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3204 | if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) { |
| 3205 | event_errx(1, "%s: %p not on queue %x", __func__, |
| 3206 | evcb, EVLIST_ACTIVE_LATER); |
| 3207 | return; |
| 3208 | } |
| 3209 | DECR_EVENT_COUNT(base, evcb->evcb_flags); |
| 3210 | evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER; |
| 3211 | base->event_count_active--; |
| 3212 | |
| 3213 | TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); |
| 3214 | } |
| 3215 | static void |
| 3216 | event_queue_remove_timeout(struct event_base *base, struct event *ev) |
| 3217 | { |
| 3218 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3219 | if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) { |
| 3220 | event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, |
| 3221 | ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT); |
| 3222 | return; |
| 3223 | } |
| 3224 | DECR_EVENT_COUNT(base, ev->ev_flags); |
| 3225 | ev->ev_flags &= ~EVLIST_TIMEOUT; |
| 3226 | |
| 3227 | if (is_common_timeout(&ev->ev_timeout, base)) { |
| 3228 | struct common_timeout_list *ctl = |
| 3229 | get_common_timeout_list(base, &ev->ev_timeout); |
| 3230 | TAILQ_REMOVE(&ctl->events, ev, |
| 3231 | ev_timeout_pos.ev_next_with_common_timeout); |
| 3232 | } else { |
| 3233 | min_heap_erase_(&base->timeheap, ev); |
| 3234 | } |
| 3235 | } |
| 3236 | |
| 3237 | #ifdef USE_REINSERT_TIMEOUT |
| 3238 | /* Remove and reinsert 'ev' into the timeout queue. */ |
| 3239 | static void |
| 3240 | event_queue_reinsert_timeout(struct event_base *base, struct event *ev, |
| 3241 | int was_common, int is_common, int old_timeout_idx) |
| 3242 | { |
| 3243 | struct common_timeout_list *ctl; |
| 3244 | if (!(ev->ev_flags & EVLIST_TIMEOUT)) { |
| 3245 | event_queue_insert_timeout(base, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3246 | return; |
| 3247 | } |
| 3248 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3249 | switch ((was_common<<1) | is_common) { |
| 3250 | case 3: /* Changing from one common timeout to another */ |
| 3251 | ctl = base->common_timeout_queues[old_timeout_idx]; |
| 3252 | TAILQ_REMOVE(&ctl->events, ev, |
| 3253 | ev_timeout_pos.ev_next_with_common_timeout); |
| 3254 | ctl = get_common_timeout_list(base, &ev->ev_timeout); |
| 3255 | insert_common_timeout_inorder(ctl, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3256 | break; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3257 | case 2: /* Was common; is no longer common */ |
| 3258 | ctl = base->common_timeout_queues[old_timeout_idx]; |
| 3259 | TAILQ_REMOVE(&ctl->events, ev, |
| 3260 | ev_timeout_pos.ev_next_with_common_timeout); |
| 3261 | min_heap_push_(&base->timeheap, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3262 | break; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3263 | case 1: /* Wasn't common; has become common. */ |
| 3264 | min_heap_erase_(&base->timeheap, ev); |
| 3265 | ctl = get_common_timeout_list(base, &ev->ev_timeout); |
| 3266 | insert_common_timeout_inorder(ctl, ev); |
| 3267 | break; |
| 3268 | case 0: /* was in heap; is still on heap. */ |
| 3269 | min_heap_adjust_(&base->timeheap, ev); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3270 | break; |
| 3271 | default: |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3272 | EVUTIL_ASSERT(0); /* unreachable */ |
| 3273 | break; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3274 | } |
| 3275 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3276 | #endif |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3277 | |
| 3278 | /* Add 'ev' to the common timeout list in 'ev'. */ |
| 3279 | static void |
| 3280 | insert_common_timeout_inorder(struct common_timeout_list *ctl, |
| 3281 | struct event *ev) |
| 3282 | { |
| 3283 | struct event *e; |
| 3284 | /* By all logic, we should just be able to append 'ev' to the end of |
| 3285 | * ctl->events, since the timeout on each 'ev' is set to {the common |
| 3286 | * timeout} + {the time when we add the event}, and so the events |
| 3287 | * should arrive in order of their timeeouts. But just in case |
| 3288 | * there's some wacky threading issue going on, we do a search from |
| 3289 | * the end of 'ev' to find the right insertion point. |
| 3290 | */ |
| 3291 | TAILQ_FOREACH_REVERSE(e, &ctl->events, |
| 3292 | event_list, ev_timeout_pos.ev_next_with_common_timeout) { |
| 3293 | /* This timercmp is a little sneaky, since both ev and e have |
| 3294 | * magic values in tv_usec. Fortunately, they ought to have |
| 3295 | * the _same_ magic values in tv_usec. Let's assert for that. |
| 3296 | */ |
| 3297 | EVUTIL_ASSERT( |
| 3298 | is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout)); |
| 3299 | if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) { |
| 3300 | TAILQ_INSERT_AFTER(&ctl->events, e, ev, |
| 3301 | ev_timeout_pos.ev_next_with_common_timeout); |
| 3302 | return; |
| 3303 | } |
| 3304 | } |
| 3305 | TAILQ_INSERT_HEAD(&ctl->events, ev, |
| 3306 | ev_timeout_pos.ev_next_with_common_timeout); |
| 3307 | } |
| 3308 | |
| 3309 | static void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3310 | event_queue_insert_inserted(struct event_base *base, struct event *ev) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3311 | { |
| 3312 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3313 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3314 | if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) { |
| 3315 | event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__, |
| 3316 | ev, EV_SOCK_ARG(ev->ev_fd)); |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3317 | return; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3318 | } |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3319 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3320 | INCR_EVENT_COUNT(base, ev->ev_flags); |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3321 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3322 | ev->ev_flags |= EVLIST_INSERTED; |
| 3323 | } |
| 3324 | |
| 3325 | static void |
| 3326 | event_queue_insert_active(struct event_base *base, struct event_callback *evcb) |
| 3327 | { |
| 3328 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3329 | |
| 3330 | if (evcb->evcb_flags & EVLIST_ACTIVE) { |
| 3331 | /* Double insertion is possible for active events */ |
| 3332 | return; |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3333 | } |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3334 | |
| 3335 | INCR_EVENT_COUNT(base, evcb->evcb_flags); |
| 3336 | |
| 3337 | evcb->evcb_flags |= EVLIST_ACTIVE; |
| 3338 | |
| 3339 | base->event_count_active++; |
| 3340 | MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); |
| 3341 | EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); |
| 3342 | TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], |
| 3343 | evcb, evcb_active_next); |
| 3344 | } |
| 3345 | |
| 3346 | static void |
| 3347 | event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb) |
| 3348 | { |
| 3349 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3350 | if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) { |
| 3351 | /* Double insertion is possible */ |
| 3352 | return; |
| 3353 | } |
| 3354 | |
| 3355 | INCR_EVENT_COUNT(base, evcb->evcb_flags); |
| 3356 | evcb->evcb_flags |= EVLIST_ACTIVE_LATER; |
| 3357 | base->event_count_active++; |
| 3358 | MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); |
| 3359 | EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); |
| 3360 | TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next); |
| 3361 | } |
| 3362 | |
| 3363 | static void |
| 3364 | event_queue_insert_timeout(struct event_base *base, struct event *ev) |
| 3365 | { |
| 3366 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3367 | |
| 3368 | if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) { |
| 3369 | event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__, |
| 3370 | ev, EV_SOCK_ARG(ev->ev_fd)); |
| 3371 | return; |
| 3372 | } |
| 3373 | |
| 3374 | INCR_EVENT_COUNT(base, ev->ev_flags); |
| 3375 | |
| 3376 | ev->ev_flags |= EVLIST_TIMEOUT; |
| 3377 | |
| 3378 | if (is_common_timeout(&ev->ev_timeout, base)) { |
| 3379 | struct common_timeout_list *ctl = |
| 3380 | get_common_timeout_list(base, &ev->ev_timeout); |
| 3381 | insert_common_timeout_inorder(ctl, ev); |
| 3382 | } else { |
| 3383 | min_heap_push_(&base->timeheap, ev); |
| 3384 | } |
| 3385 | } |
| 3386 | |
| 3387 | static void |
| 3388 | event_queue_make_later_events_active(struct event_base *base) |
| 3389 | { |
| 3390 | struct event_callback *evcb; |
| 3391 | EVENT_BASE_ASSERT_LOCKED(base); |
| 3392 | |
| 3393 | while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { |
| 3394 | TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); |
| 3395 | evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE; |
| 3396 | EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); |
| 3397 | TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next); |
| 3398 | base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3399 | } |
| 3400 | } |
| 3401 | |
| 3402 | /* Functions for debugging */ |
| 3403 | |
| 3404 | const char * |
| 3405 | event_get_version(void) |
| 3406 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3407 | return (EVENT__VERSION); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3408 | } |
| 3409 | |
| 3410 | ev_uint32_t |
| 3411 | event_get_version_number(void) |
| 3412 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3413 | return (EVENT__NUMERIC_VERSION); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3414 | } |
| 3415 | |
| 3416 | /* |
| 3417 | * No thread-safe interface needed - the information should be the same |
| 3418 | * for all threads. |
| 3419 | */ |
| 3420 | |
| 3421 | const char * |
| 3422 | event_get_method(void) |
| 3423 | { |
| 3424 | return (current_base->evsel->name); |
| 3425 | } |
| 3426 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3427 | #ifndef EVENT__DISABLE_MM_REPLACEMENT |
| 3428 | static void *(*mm_malloc_fn_)(size_t sz) = NULL; |
| 3429 | static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL; |
| 3430 | static void (*mm_free_fn_)(void *p) = NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3431 | |
| 3432 | void * |
| 3433 | event_mm_malloc_(size_t sz) |
| 3434 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3435 | if (sz == 0) |
| 3436 | return NULL; |
| 3437 | |
| 3438 | if (mm_malloc_fn_) |
| 3439 | return mm_malloc_fn_(sz); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3440 | else |
| 3441 | return malloc(sz); |
| 3442 | } |
| 3443 | |
| 3444 | void * |
| 3445 | event_mm_calloc_(size_t count, size_t size) |
| 3446 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3447 | if (count == 0 || size == 0) |
| 3448 | return NULL; |
| 3449 | |
| 3450 | if (mm_malloc_fn_) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3451 | size_t sz = count * size; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3452 | void *p = NULL; |
| 3453 | if (count > EV_SIZE_MAX / size) |
| 3454 | goto error; |
| 3455 | p = mm_malloc_fn_(sz); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3456 | if (p) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3457 | return memset(p, 0, sz); |
| 3458 | } else { |
| 3459 | void *p = calloc(count, size); |
| 3460 | #ifdef _WIN32 |
| 3461 | /* Windows calloc doesn't reliably set ENOMEM */ |
| 3462 | if (p == NULL) |
| 3463 | goto error; |
| 3464 | #endif |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3465 | return p; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3466 | } |
| 3467 | |
| 3468 | error: |
| 3469 | errno = ENOMEM; |
| 3470 | return NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3471 | } |
| 3472 | |
| 3473 | char * |
| 3474 | event_mm_strdup_(const char *str) |
| 3475 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3476 | if (!str) { |
| 3477 | errno = EINVAL; |
| 3478 | return NULL; |
| 3479 | } |
| 3480 | |
| 3481 | if (mm_malloc_fn_) { |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3482 | size_t ln = strlen(str); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3483 | void *p = NULL; |
| 3484 | if (ln == EV_SIZE_MAX) |
| 3485 | goto error; |
| 3486 | p = mm_malloc_fn_(ln+1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3487 | if (p) |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3488 | return memcpy(p, str, ln+1); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3489 | } else |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3490 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3491 | return _strdup(str); |
| 3492 | #else |
| 3493 | return strdup(str); |
| 3494 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3495 | |
| 3496 | error: |
| 3497 | errno = ENOMEM; |
| 3498 | return NULL; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3499 | } |
| 3500 | |
| 3501 | void * |
| 3502 | event_mm_realloc_(void *ptr, size_t sz) |
| 3503 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3504 | if (mm_realloc_fn_) |
| 3505 | return mm_realloc_fn_(ptr, sz); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3506 | else |
| 3507 | return realloc(ptr, sz); |
| 3508 | } |
| 3509 | |
| 3510 | void |
| 3511 | event_mm_free_(void *ptr) |
| 3512 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3513 | if (mm_free_fn_) |
| 3514 | mm_free_fn_(ptr); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3515 | else |
| 3516 | free(ptr); |
| 3517 | } |
| 3518 | |
| 3519 | void |
| 3520 | event_set_mem_functions(void *(*malloc_fn)(size_t sz), |
| 3521 | void *(*realloc_fn)(void *ptr, size_t sz), |
| 3522 | void (*free_fn)(void *ptr)) |
| 3523 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3524 | mm_malloc_fn_ = malloc_fn; |
| 3525 | mm_realloc_fn_ = realloc_fn; |
| 3526 | mm_free_fn_ = free_fn; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3527 | } |
| 3528 | #endif |
| 3529 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3530 | #ifdef EVENT__HAVE_EVENTFD |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3531 | static void |
| 3532 | evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg) |
| 3533 | { |
| 3534 | ev_uint64_t msg; |
| 3535 | ev_ssize_t r; |
| 3536 | struct event_base *base = arg; |
| 3537 | |
| 3538 | r = read(fd, (void*) &msg, sizeof(msg)); |
| 3539 | if (r<0 && errno != EAGAIN) { |
| 3540 | event_sock_warn(fd, "Error reading from eventfd"); |
| 3541 | } |
| 3542 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3543 | base->is_notify_pending = 0; |
| 3544 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3545 | } |
| 3546 | #endif |
| 3547 | |
| 3548 | static void |
| 3549 | evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg) |
| 3550 | { |
| 3551 | unsigned char buf[1024]; |
| 3552 | struct event_base *base = arg; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3553 | #ifdef _WIN32 |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3554 | while (recv(fd, (char*)buf, sizeof(buf), 0) > 0) |
| 3555 | ; |
| 3556 | #else |
| 3557 | while (read(fd, (char*)buf, sizeof(buf)) > 0) |
| 3558 | ; |
| 3559 | #endif |
| 3560 | |
| 3561 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3562 | base->is_notify_pending = 0; |
| 3563 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3564 | } |
| 3565 | |
| 3566 | int |
| 3567 | evthread_make_base_notifiable(struct event_base *base) |
| 3568 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3569 | int r; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3570 | if (!base) |
| 3571 | return -1; |
| 3572 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3573 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3574 | r = evthread_make_base_notifiable_nolock_(base); |
| 3575 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3576 | return r; |
| 3577 | } |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3578 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3579 | static int |
| 3580 | evthread_make_base_notifiable_nolock_(struct event_base *base) |
| 3581 | { |
| 3582 | void (*cb)(evutil_socket_t, short, void *); |
| 3583 | int (*notify)(struct event_base *); |
| 3584 | |
| 3585 | if (base->th_notify_fn != NULL) { |
| 3586 | /* The base is already notifiable: we're doing fine. */ |
| 3587 | return 0; |
| 3588 | } |
| 3589 | |
| 3590 | #if defined(EVENT__HAVE_WORKING_KQUEUE) |
| 3591 | if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) { |
| 3592 | base->th_notify_fn = event_kq_notify_base_; |
| 3593 | /* No need to add an event here; the backend can wake |
| 3594 | * itself up just fine. */ |
| 3595 | return 0; |
| 3596 | } |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3597 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3598 | |
| 3599 | #ifdef EVENT__HAVE_EVENTFD |
| 3600 | base->th_notify_fd[0] = evutil_eventfd_(0, |
| 3601 | EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3602 | if (base->th_notify_fd[0] >= 0) { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3603 | base->th_notify_fd[1] = -1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3604 | notify = evthread_notify_base_eventfd; |
| 3605 | cb = evthread_notify_drain_eventfd; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3606 | } else |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3607 | #endif |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3608 | if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) { |
| 3609 | notify = evthread_notify_base_default; |
| 3610 | cb = evthread_notify_drain_default; |
| 3611 | } else { |
| 3612 | return -1; |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3613 | } |
Josh Gao | 83a0c9c | 2017-08-10 12:30:25 -0700 | [diff] [blame] | 3614 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3615 | base->th_notify_fn = notify; |
| 3616 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3617 | /* prepare an event that we can use for wakeup */ |
| 3618 | event_assign(&base->th_notify, base, base->th_notify_fd[0], |
| 3619 | EV_READ|EV_PERSIST, cb, base); |
| 3620 | |
| 3621 | /* we need to mark this as internal event */ |
| 3622 | base->th_notify.ev_flags |= EVLIST_INTERNAL; |
| 3623 | event_priority_set(&base->th_notify, 0); |
| 3624 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3625 | return event_add_nolock_(&base->th_notify, NULL, 0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3626 | } |
| 3627 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3628 | int |
| 3629 | event_base_foreach_event_nolock_(struct event_base *base, |
| 3630 | event_base_foreach_event_cb fn, void *arg) |
| 3631 | { |
| 3632 | int r, i; |
| 3633 | unsigned u; |
| 3634 | struct event *ev; |
| 3635 | |
| 3636 | /* Start out with all the EVLIST_INSERTED events. */ |
| 3637 | if ((r = evmap_foreach_event_(base, fn, arg))) |
| 3638 | return r; |
| 3639 | |
| 3640 | /* Okay, now we deal with those events that have timeouts and are in |
| 3641 | * the min-heap. */ |
| 3642 | for (u = 0; u < base->timeheap.n; ++u) { |
| 3643 | ev = base->timeheap.p[u]; |
| 3644 | if (ev->ev_flags & EVLIST_INSERTED) { |
| 3645 | /* we already processed this one */ |
| 3646 | continue; |
| 3647 | } |
| 3648 | if ((r = fn(base, ev, arg))) |
| 3649 | return r; |
| 3650 | } |
| 3651 | |
| 3652 | /* Now for the events in one of the timeout queues. |
| 3653 | * the min-heap. */ |
| 3654 | for (i = 0; i < base->n_common_timeouts; ++i) { |
| 3655 | struct common_timeout_list *ctl = |
| 3656 | base->common_timeout_queues[i]; |
| 3657 | TAILQ_FOREACH(ev, &ctl->events, |
| 3658 | ev_timeout_pos.ev_next_with_common_timeout) { |
| 3659 | if (ev->ev_flags & EVLIST_INSERTED) { |
| 3660 | /* we already processed this one */ |
| 3661 | continue; |
| 3662 | } |
| 3663 | if ((r = fn(base, ev, arg))) |
| 3664 | return r; |
| 3665 | } |
| 3666 | } |
| 3667 | |
| 3668 | /* Finally, we deal wit all the active events that we haven't touched |
| 3669 | * yet. */ |
| 3670 | for (i = 0; i < base->nactivequeues; ++i) { |
| 3671 | struct event_callback *evcb; |
| 3672 | TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { |
| 3673 | if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) { |
| 3674 | /* This isn't an event (evlist_init clear), or |
| 3675 | * we already processed it. (inserted or |
| 3676 | * timeout set */ |
| 3677 | continue; |
| 3678 | } |
| 3679 | ev = event_callback_to_event(evcb); |
| 3680 | if ((r = fn(base, ev, arg))) |
| 3681 | return r; |
| 3682 | } |
| 3683 | } |
| 3684 | |
| 3685 | return 0; |
| 3686 | } |
| 3687 | |
| 3688 | /* Helper for event_base_dump_events: called on each event in the event base; |
| 3689 | * dumps only the inserted events. */ |
| 3690 | static int |
| 3691 | dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg) |
| 3692 | { |
| 3693 | FILE *output = arg; |
| 3694 | const char *gloss = (e->ev_events & EV_SIGNAL) ? |
| 3695 | "sig" : "fd "; |
| 3696 | |
| 3697 | if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT))) |
| 3698 | return 0; |
| 3699 | |
| 3700 | fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s", |
| 3701 | (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), |
| 3702 | (e->ev_events&EV_READ)?" Read":"", |
| 3703 | (e->ev_events&EV_WRITE)?" Write":"", |
| 3704 | (e->ev_events&EV_CLOSED)?" EOF":"", |
| 3705 | (e->ev_events&EV_SIGNAL)?" Signal":"", |
| 3706 | (e->ev_events&EV_PERSIST)?" Persist":"", |
| 3707 | (e->ev_flags&EVLIST_INTERNAL)?" Internal":""); |
| 3708 | if (e->ev_flags & EVLIST_TIMEOUT) { |
| 3709 | struct timeval tv; |
| 3710 | tv.tv_sec = e->ev_timeout.tv_sec; |
| 3711 | tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK; |
| 3712 | evutil_timeradd(&tv, &base->tv_clock_diff, &tv); |
| 3713 | fprintf(output, " Timeout=%ld.%06d", |
| 3714 | (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK)); |
| 3715 | } |
| 3716 | fputc('\n', output); |
| 3717 | |
| 3718 | return 0; |
| 3719 | } |
| 3720 | |
| 3721 | /* Helper for event_base_dump_events: called on each event in the event base; |
| 3722 | * dumps only the active events. */ |
| 3723 | static int |
| 3724 | dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg) |
| 3725 | { |
| 3726 | FILE *output = arg; |
| 3727 | const char *gloss = (e->ev_events & EV_SIGNAL) ? |
| 3728 | "sig" : "fd "; |
| 3729 | |
| 3730 | if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) |
| 3731 | return 0; |
| 3732 | |
| 3733 | fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n", |
| 3734 | (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri, |
| 3735 | (e->ev_res&EV_READ)?" Read":"", |
| 3736 | (e->ev_res&EV_WRITE)?" Write":"", |
| 3737 | (e->ev_res&EV_CLOSED)?" EOF":"", |
| 3738 | (e->ev_res&EV_SIGNAL)?" Signal":"", |
| 3739 | (e->ev_res&EV_TIMEOUT)?" Timeout":"", |
| 3740 | (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"", |
| 3741 | (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":""); |
| 3742 | |
| 3743 | return 0; |
| 3744 | } |
| 3745 | |
| 3746 | int |
| 3747 | event_base_foreach_event(struct event_base *base, |
| 3748 | event_base_foreach_event_cb fn, void *arg) |
| 3749 | { |
| 3750 | int r; |
| 3751 | if ((!fn) || (!base)) { |
| 3752 | return -1; |
| 3753 | } |
| 3754 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3755 | r = event_base_foreach_event_nolock_(base, fn, arg); |
| 3756 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3757 | return r; |
| 3758 | } |
| 3759 | |
| 3760 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3761 | void |
| 3762 | event_base_dump_events(struct event_base *base, FILE *output) |
| 3763 | { |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3764 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3765 | fprintf(output, "Inserted events:\n"); |
| 3766 | event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output); |
| 3767 | |
| 3768 | fprintf(output, "Active events:\n"); |
| 3769 | event_base_foreach_event_nolock_(base, dump_active_event_fn, output); |
Elliott Hughes | 2a572d1 | 2017-08-07 14:18:18 -0700 | [diff] [blame] | 3770 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3771 | } |
| 3772 | |
| 3773 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3774 | event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events) |
| 3775 | { |
| 3776 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3777 | evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED)); |
| 3778 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3779 | } |
| 3780 | |
| 3781 | void |
| 3782 | event_base_active_by_signal(struct event_base *base, int sig) |
| 3783 | { |
| 3784 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3785 | evmap_signal_active_(base, sig, 1); |
| 3786 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3787 | } |
| 3788 | |
| 3789 | |
| 3790 | void |
| 3791 | event_base_add_virtual_(struct event_base *base) |
| 3792 | { |
| 3793 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3794 | base->virtual_event_count++; |
| 3795 | MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count); |
| 3796 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3797 | } |
| 3798 | |
| 3799 | void |
| 3800 | event_base_del_virtual_(struct event_base *base) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3801 | { |
| 3802 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3803 | EVUTIL_ASSERT(base->virtual_event_count > 0); |
| 3804 | base->virtual_event_count--; |
| 3805 | if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base)) |
| 3806 | evthread_notify_base(base); |
| 3807 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3808 | } |
| 3809 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3810 | static void |
| 3811 | event_free_debug_globals_locks(void) |
| 3812 | { |
| 3813 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
| 3814 | #ifndef EVENT__DISABLE_DEBUG_MODE |
| 3815 | if (event_debug_map_lock_ != NULL) { |
| 3816 | EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0); |
| 3817 | event_debug_map_lock_ = NULL; |
| 3818 | evthreadimpl_disable_lock_debugging_(); |
| 3819 | } |
| 3820 | #endif /* EVENT__DISABLE_DEBUG_MODE */ |
| 3821 | #endif /* EVENT__DISABLE_THREAD_SUPPORT */ |
| 3822 | return; |
| 3823 | } |
| 3824 | |
| 3825 | static void |
| 3826 | event_free_debug_globals(void) |
| 3827 | { |
| 3828 | event_free_debug_globals_locks(); |
| 3829 | } |
| 3830 | |
| 3831 | static void |
| 3832 | event_free_evsig_globals(void) |
| 3833 | { |
| 3834 | evsig_free_globals_(); |
| 3835 | } |
| 3836 | |
| 3837 | static void |
| 3838 | event_free_evutil_globals(void) |
| 3839 | { |
| 3840 | evutil_free_globals_(); |
| 3841 | } |
| 3842 | |
| 3843 | static void |
| 3844 | event_free_globals(void) |
| 3845 | { |
| 3846 | event_free_debug_globals(); |
| 3847 | event_free_evsig_globals(); |
| 3848 | event_free_evutil_globals(); |
| 3849 | } |
| 3850 | |
| 3851 | void |
| 3852 | libevent_global_shutdown(void) |
| 3853 | { |
| 3854 | event_disable_debug_mode(); |
| 3855 | event_free_globals(); |
| 3856 | } |
| 3857 | |
| 3858 | #ifndef EVENT__DISABLE_THREAD_SUPPORT |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3859 | int |
| 3860 | event_global_setup_locks_(const int enable_locks) |
| 3861 | { |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3862 | #ifndef EVENT__DISABLE_DEBUG_MODE |
| 3863 | EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3864 | #endif |
| 3865 | if (evsig_global_setup_locks_(enable_locks) < 0) |
| 3866 | return -1; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3867 | if (evutil_global_setup_locks_(enable_locks) < 0) |
| 3868 | return -1; |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3869 | if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0) |
| 3870 | return -1; |
| 3871 | return 0; |
| 3872 | } |
| 3873 | #endif |
| 3874 | |
| 3875 | void |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3876 | event_base_assert_ok_(struct event_base *base) |
| 3877 | { |
| 3878 | EVBASE_ACQUIRE_LOCK(base, th_base_lock); |
| 3879 | event_base_assert_ok_nolock_(base); |
| 3880 | EVBASE_RELEASE_LOCK(base, th_base_lock); |
| 3881 | } |
| 3882 | |
| 3883 | void |
| 3884 | event_base_assert_ok_nolock_(struct event_base *base) |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3885 | { |
| 3886 | int i; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3887 | int count; |
| 3888 | |
| 3889 | /* First do checks on the per-fd and per-signal lists */ |
| 3890 | evmap_check_integrity_(base); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3891 | |
| 3892 | /* Check the heap property */ |
| 3893 | for (i = 1; i < (int)base->timeheap.n; ++i) { |
| 3894 | int parent = (i - 1) / 2; |
| 3895 | struct event *ev, *p_ev; |
| 3896 | ev = base->timeheap.p[i]; |
| 3897 | p_ev = base->timeheap.p[parent]; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3898 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3899 | EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=)); |
| 3900 | EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i); |
| 3901 | } |
| 3902 | |
| 3903 | /* Check that the common timeouts are fine */ |
| 3904 | for (i = 0; i < base->n_common_timeouts; ++i) { |
| 3905 | struct common_timeout_list *ctl = base->common_timeout_queues[i]; |
| 3906 | struct event *last=NULL, *ev; |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3907 | |
| 3908 | EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout); |
| 3909 | |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3910 | TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) { |
| 3911 | if (last) |
| 3912 | EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=)); |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3913 | EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3914 | EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base)); |
| 3915 | EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i); |
| 3916 | last = ev; |
| 3917 | } |
| 3918 | } |
| 3919 | |
Narayan Kamath | fc74cb4 | 2017-09-13 12:53:52 +0100 | [diff] [blame] | 3920 | /* Check the active queues. */ |
| 3921 | count = 0; |
| 3922 | for (i = 0; i < base->nactivequeues; ++i) { |
| 3923 | struct event_callback *evcb; |
| 3924 | EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next); |
| 3925 | TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { |
| 3926 | EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE); |
| 3927 | EVUTIL_ASSERT(evcb->evcb_pri == i); |
| 3928 | ++count; |
| 3929 | } |
| 3930 | } |
| 3931 | |
| 3932 | { |
| 3933 | struct event_callback *evcb; |
| 3934 | TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) { |
| 3935 | EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER); |
| 3936 | ++count; |
| 3937 | } |
| 3938 | } |
| 3939 | EVUTIL_ASSERT(count == base->event_count_active); |
Christopher Wiley | e867981 | 2015-07-01 13:36:18 -0700 | [diff] [blame] | 3940 | } |