blob: 503003e249a29909465e05421ed865c772e4648c [file] [log] [blame]
Christopher Wileye8679812015-07-01 13:36:18 -07001/*
2 * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27#include "event2/event-config.h"
Narayan Kamathfc74cb42017-09-13 12:53:52 +010028#include "evconfig-private.h"
Christopher Wileye8679812015-07-01 13:36:18 -070029
Narayan Kamathfc74cb42017-09-13 12:53:52 +010030#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -070031#include <winsock2.h>
32#define WIN32_LEAN_AND_MEAN
33#include <windows.h>
34#undef WIN32_LEAN_AND_MEAN
35#endif
36#include <sys/types.h>
Narayan Kamathfc74cb42017-09-13 12:53:52 +010037#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
Christopher Wileye8679812015-07-01 13:36:18 -070038#include <sys/time.h>
39#endif
40#include <sys/queue.h>
Narayan Kamathfc74cb42017-09-13 12:53:52 +010041#ifdef EVENT__HAVE_SYS_SOCKET_H
Christopher Wileye8679812015-07-01 13:36:18 -070042#include <sys/socket.h>
43#endif
44#include <stdio.h>
45#include <stdlib.h>
Narayan Kamathfc74cb42017-09-13 12:53:52 +010046#ifdef EVENT__HAVE_UNISTD_H
Christopher Wileye8679812015-07-01 13:36:18 -070047#include <unistd.h>
48#endif
Christopher Wileye8679812015-07-01 13:36:18 -070049#include <ctype.h>
50#include <errno.h>
51#include <signal.h>
52#include <string.h>
53#include <time.h>
Narayan Kamathfc74cb42017-09-13 12:53:52 +010054#include <limits.h>
Christopher Wileye8679812015-07-01 13:36:18 -070055
56#include "event2/event.h"
57#include "event2/event_struct.h"
58#include "event2/event_compat.h"
59#include "event-internal.h"
60#include "defer-internal.h"
61#include "evthread-internal.h"
62#include "event2/thread.h"
63#include "event2/util.h"
64#include "log-internal.h"
65#include "evmap-internal.h"
66#include "iocp-internal.h"
67#include "changelist-internal.h"
Narayan Kamathfc74cb42017-09-13 12:53:52 +010068#define HT_NO_CACHE_HASH_VALUES
Christopher Wileye8679812015-07-01 13:36:18 -070069#include "ht-internal.h"
70#include "util-internal.h"
71
Narayan Kamathfc74cb42017-09-13 12:53:52 +010072
73#ifdef EVENT__HAVE_WORKING_KQUEUE
74#include "kqueue-internal.h"
75#endif
76
77#ifdef EVENT__HAVE_EVENT_PORTS
Christopher Wileye8679812015-07-01 13:36:18 -070078extern const struct eventop evportops;
79#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +010080#ifdef EVENT__HAVE_SELECT
Christopher Wileye8679812015-07-01 13:36:18 -070081extern const struct eventop selectops;
82#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +010083#ifdef EVENT__HAVE_POLL
Christopher Wileye8679812015-07-01 13:36:18 -070084extern const struct eventop pollops;
85#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +010086#ifdef EVENT__HAVE_EPOLL
Christopher Wileye8679812015-07-01 13:36:18 -070087extern const struct eventop epollops;
88#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +010089#ifdef EVENT__HAVE_WORKING_KQUEUE
Christopher Wileye8679812015-07-01 13:36:18 -070090extern const struct eventop kqops;
91#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +010092#ifdef EVENT__HAVE_DEVPOLL
Christopher Wileye8679812015-07-01 13:36:18 -070093extern const struct eventop devpollops;
94#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +010095#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -070096extern const struct eventop win32ops;
97#endif
98
99/* Array of backends in order of preference. */
100static const struct eventop *eventops[] = {
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100101#ifdef EVENT__HAVE_EVENT_PORTS
Christopher Wileye8679812015-07-01 13:36:18 -0700102 &evportops,
103#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100104#ifdef EVENT__HAVE_WORKING_KQUEUE
Christopher Wileye8679812015-07-01 13:36:18 -0700105 &kqops,
106#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100107#ifdef EVENT__HAVE_EPOLL
Christopher Wileye8679812015-07-01 13:36:18 -0700108 &epollops,
109#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100110#ifdef EVENT__HAVE_DEVPOLL
Christopher Wileye8679812015-07-01 13:36:18 -0700111 &devpollops,
112#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100113#ifdef EVENT__HAVE_POLL
Christopher Wileye8679812015-07-01 13:36:18 -0700114 &pollops,
115#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100116#ifdef EVENT__HAVE_SELECT
Christopher Wileye8679812015-07-01 13:36:18 -0700117 &selectops,
118#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100119#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -0700120 &win32ops,
121#endif
122 NULL
123};
124
125/* Global state; deprecated */
126struct event_base *event_global_current_base_ = NULL;
127#define current_base event_global_current_base_
128
129/* Global state */
130
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100131static void *event_self_cbarg_ptr_ = NULL;
Christopher Wileye8679812015-07-01 13:36:18 -0700132
133/* Prototypes */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100134static void event_queue_insert_active(struct event_base *, struct event_callback *);
135static void event_queue_insert_active_later(struct event_base *, struct event_callback *);
136static void event_queue_insert_timeout(struct event_base *, struct event *);
137static void event_queue_insert_inserted(struct event_base *, struct event *);
138static void event_queue_remove_active(struct event_base *, struct event_callback *);
139static void event_queue_remove_active_later(struct event_base *, struct event_callback *);
140static void event_queue_remove_timeout(struct event_base *, struct event *);
141static void event_queue_remove_inserted(struct event_base *, struct event *);
142static void event_queue_make_later_events_active(struct event_base *base);
Christopher Wileye8679812015-07-01 13:36:18 -0700143
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100144static int evthread_make_base_notifiable_nolock_(struct event_base *base);
145static int event_del_(struct event *ev, int blocking);
146
147#ifdef USE_REINSERT_TIMEOUT
148/* This code seems buggy; only turn it on if we find out what the trouble is. */
149static void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
150#endif
151
Christopher Wileye8679812015-07-01 13:36:18 -0700152static int event_haveevents(struct event_base *);
153
154static int event_process_active(struct event_base *);
155
156static int timeout_next(struct event_base *, struct timeval **);
157static void timeout_process(struct event_base *);
Christopher Wileye8679812015-07-01 13:36:18 -0700158
159static inline void event_signal_closure(struct event_base *, struct event *ev);
160static inline void event_persist_closure(struct event_base *, struct event *ev);
161
162static int evthread_notify_base(struct event_base *base);
163
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100164static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
165 struct event *ev);
166
167#ifndef EVENT__DISABLE_DEBUG_MODE
Christopher Wileye8679812015-07-01 13:36:18 -0700168/* These functions implement a hashtable of which 'struct event *' structures
169 * have been setup or added. We don't want to trust the content of the struct
170 * event itself, since we're trying to work through cases where an event gets
171 * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
172 */
173
174struct event_debug_entry {
175 HT_ENTRY(event_debug_entry) node;
176 const struct event *ptr;
177 unsigned added : 1;
178};
179
180static inline unsigned
181hash_debug_entry(const struct event_debug_entry *e)
182{
183 /* We need to do this silliness to convince compilers that we
184 * honestly mean to cast e->ptr to an integer, and discard any
185 * part of it that doesn't fit in an unsigned.
186 */
187 unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
188 /* Our hashtable implementation is pretty sensitive to low bits,
189 * and every struct event is over 64 bytes in size, so we can
190 * just say >>6. */
191 return (u >> 6);
192}
193
194static inline int
195eq_debug_entry(const struct event_debug_entry *a,
196 const struct event_debug_entry *b)
197{
198 return a->ptr == b->ptr;
199}
200
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100201int event_debug_mode_on_ = 0;
202
203
204#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
205/**
206 * @brief debug mode variable which is set for any function/structure that needs
207 * to be shared across threads (if thread support is enabled).
208 *
209 * When and if evthreads are initialized, this variable will be evaluated,
210 * and if set to something other than zero, this means the evthread setup
211 * functions were called out of order.
212 *
213 * See: "Locks and threading" in the documentation.
214 */
215int event_debug_created_threadable_ctx_ = 0;
216#endif
217
Christopher Wileye8679812015-07-01 13:36:18 -0700218/* Set if it's too late to enable event_debug_mode. */
219static int event_debug_mode_too_late = 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100220#ifndef EVENT__DISABLE_THREAD_SUPPORT
221static void *event_debug_map_lock_ = NULL;
Christopher Wileye8679812015-07-01 13:36:18 -0700222#endif
223static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
224 HT_INITIALIZER();
225
226HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
227 eq_debug_entry)
228HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
229 eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
230
231/* Macro: record that ev is now setup (that is, ready for an add) */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100232#define event_debug_note_setup_(ev) do { \
233 if (event_debug_mode_on_) { \
Christopher Wileye8679812015-07-01 13:36:18 -0700234 struct event_debug_entry *dent,find; \
235 find.ptr = (ev); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100236 EVLOCK_LOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700237 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
238 if (dent) { \
239 dent->added = 0; \
240 } else { \
241 dent = mm_malloc(sizeof(*dent)); \
242 if (!dent) \
243 event_err(1, \
244 "Out of memory in debugging code"); \
245 dent->ptr = (ev); \
246 dent->added = 0; \
247 HT_INSERT(event_debug_map, &global_debug_map, dent); \
248 } \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100249 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700250 } \
251 event_debug_mode_too_late = 1; \
252 } while (0)
253/* Macro: record that ev is no longer setup */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100254#define event_debug_note_teardown_(ev) do { \
255 if (event_debug_mode_on_) { \
Christopher Wileye8679812015-07-01 13:36:18 -0700256 struct event_debug_entry *dent,find; \
257 find.ptr = (ev); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100258 EVLOCK_LOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700259 dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
260 if (dent) \
261 mm_free(dent); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100262 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700263 } \
264 event_debug_mode_too_late = 1; \
265 } while (0)
266/* Macro: record that ev is now added */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100267#define event_debug_note_add_(ev) do { \
268 if (event_debug_mode_on_) { \
Christopher Wileye8679812015-07-01 13:36:18 -0700269 struct event_debug_entry *dent,find; \
270 find.ptr = (ev); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100271 EVLOCK_LOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700272 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
273 if (dent) { \
274 dent->added = 1; \
275 } else { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100276 event_errx(EVENT_ERR_ABORT_, \
Christopher Wileye8679812015-07-01 13:36:18 -0700277 "%s: noting an add on a non-setup event %p" \
278 " (events: 0x%x, fd: "EV_SOCK_FMT \
279 ", flags: 0x%x)", \
280 __func__, (ev), (ev)->ev_events, \
281 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
282 } \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100283 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700284 } \
285 event_debug_mode_too_late = 1; \
286 } while (0)
287/* Macro: record that ev is no longer added */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100288#define event_debug_note_del_(ev) do { \
289 if (event_debug_mode_on_) { \
Christopher Wileye8679812015-07-01 13:36:18 -0700290 struct event_debug_entry *dent,find; \
291 find.ptr = (ev); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100292 EVLOCK_LOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700293 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
294 if (dent) { \
295 dent->added = 0; \
296 } else { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100297 event_errx(EVENT_ERR_ABORT_, \
Christopher Wileye8679812015-07-01 13:36:18 -0700298 "%s: noting a del on a non-setup event %p" \
299 " (events: 0x%x, fd: "EV_SOCK_FMT \
300 ", flags: 0x%x)", \
301 __func__, (ev), (ev)->ev_events, \
302 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
303 } \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100304 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700305 } \
306 event_debug_mode_too_late = 1; \
307 } while (0)
308/* Macro: assert that ev is setup (i.e., okay to add or inspect) */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100309#define event_debug_assert_is_setup_(ev) do { \
310 if (event_debug_mode_on_) { \
Christopher Wileye8679812015-07-01 13:36:18 -0700311 struct event_debug_entry *dent,find; \
312 find.ptr = (ev); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100313 EVLOCK_LOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700314 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
315 if (!dent) { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100316 event_errx(EVENT_ERR_ABORT_, \
Christopher Wileye8679812015-07-01 13:36:18 -0700317 "%s called on a non-initialized event %p" \
318 " (events: 0x%x, fd: "EV_SOCK_FMT\
319 ", flags: 0x%x)", \
320 __func__, (ev), (ev)->ev_events, \
321 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
322 } \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100323 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700324 } \
325 } while (0)
326/* Macro: assert that ev is not added (i.e., okay to tear down or set
327 * up again) */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100328#define event_debug_assert_not_added_(ev) do { \
329 if (event_debug_mode_on_) { \
Christopher Wileye8679812015-07-01 13:36:18 -0700330 struct event_debug_entry *dent,find; \
331 find.ptr = (ev); \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100332 EVLOCK_LOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700333 dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
334 if (dent && dent->added) { \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100335 event_errx(EVENT_ERR_ABORT_, \
Christopher Wileye8679812015-07-01 13:36:18 -0700336 "%s called on an already added event %p" \
337 " (events: 0x%x, fd: "EV_SOCK_FMT", " \
338 "flags: 0x%x)", \
339 __func__, (ev), (ev)->ev_events, \
340 EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \
341 } \
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100342 EVLOCK_UNLOCK(event_debug_map_lock_, 0); \
Christopher Wileye8679812015-07-01 13:36:18 -0700343 } \
344 } while (0)
345#else
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100346#define event_debug_note_setup_(ev) \
Christopher Wileye8679812015-07-01 13:36:18 -0700347 ((void)0)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100348#define event_debug_note_teardown_(ev) \
Christopher Wileye8679812015-07-01 13:36:18 -0700349 ((void)0)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100350#define event_debug_note_add_(ev) \
Christopher Wileye8679812015-07-01 13:36:18 -0700351 ((void)0)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100352#define event_debug_note_del_(ev) \
Christopher Wileye8679812015-07-01 13:36:18 -0700353 ((void)0)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100354#define event_debug_assert_is_setup_(ev) \
Christopher Wileye8679812015-07-01 13:36:18 -0700355 ((void)0)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100356#define event_debug_assert_not_added_(ev) \
Christopher Wileye8679812015-07-01 13:36:18 -0700357 ((void)0)
358#endif
359
360#define EVENT_BASE_ASSERT_LOCKED(base) \
361 EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
362
Christopher Wileye8679812015-07-01 13:36:18 -0700363/* How often (in seconds) do we check for changes in wall clock time relative
364 * to monotonic time? Set this to -1 for 'never.' */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100365#define CLOCK_SYNC_INTERVAL 5
Christopher Wileye8679812015-07-01 13:36:18 -0700366
367/** Set 'tp' to the current time according to 'base'. We must hold the lock
368 * on 'base'. If there is a cached time, return it. Otherwise, use
369 * clock_gettime or gettimeofday as appropriate to find out the right time.
370 * Return 0 on success, -1 on failure.
371 */
372static int
373gettime(struct event_base *base, struct timeval *tp)
374{
375 EVENT_BASE_ASSERT_LOCKED(base);
376
377 if (base->tv_cache.tv_sec) {
378 *tp = base->tv_cache;
379 return (0);
380 }
381
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100382 if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
383 return -1;
Josh Gao83a0c9c2017-08-10 12:30:25 -0700384 }
Josh Gao83a0c9c2017-08-10 12:30:25 -0700385
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100386 if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
387 < tp->tv_sec) {
388 struct timeval tv;
389 evutil_gettimeofday(&tv,NULL);
390 evutil_timersub(&tv, tp, &base->tv_clock_diff);
391 base->last_updated_clock_diff = tp->tv_sec;
392 }
393
394 return 0;
Christopher Wileye8679812015-07-01 13:36:18 -0700395}
396
397int
398event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
399{
400 int r;
401 if (!base) {
402 base = current_base;
403 if (!current_base)
404 return evutil_gettimeofday(tv, NULL);
405 }
406
407 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
408 if (base->tv_cache.tv_sec == 0) {
409 r = evutil_gettimeofday(tv, NULL);
410 } else {
Christopher Wileye8679812015-07-01 13:36:18 -0700411 evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
Christopher Wileye8679812015-07-01 13:36:18 -0700412 r = 0;
413 }
414 EVBASE_RELEASE_LOCK(base, th_base_lock);
415 return r;
416}
417
418/** Make 'base' have no current cached time. */
419static inline void
420clear_time_cache(struct event_base *base)
421{
422 base->tv_cache.tv_sec = 0;
423}
424
425/** Replace the cached time in 'base' with the current time. */
426static inline void
427update_time_cache(struct event_base *base)
428{
429 base->tv_cache.tv_sec = 0;
430 if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
431 gettime(base, &base->tv_cache);
432}
433
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100434int
435event_base_update_cache_time(struct event_base *base)
436{
437
438 if (!base) {
439 base = current_base;
440 if (!current_base)
441 return -1;
442 }
443
444 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
445 if (base->running_loop)
446 update_time_cache(base);
447 EVBASE_RELEASE_LOCK(base, th_base_lock);
448 return 0;
449}
450
451static inline struct event *
452event_callback_to_event(struct event_callback *evcb)
453{
454 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
455 return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
456}
457
458static inline struct event_callback *
459event_to_event_callback(struct event *ev)
460{
461 return &ev->ev_evcallback;
462}
463
Christopher Wileye8679812015-07-01 13:36:18 -0700464struct event_base *
465event_init(void)
466{
467 struct event_base *base = event_base_new_with_config(NULL);
468
469 if (base == NULL) {
470 event_errx(1, "%s: Unable to construct event_base", __func__);
471 return NULL;
472 }
473
474 current_base = base;
475
476 return (base);
477}
478
479struct event_base *
480event_base_new(void)
481{
482 struct event_base *base = NULL;
483 struct event_config *cfg = event_config_new();
484 if (cfg) {
485 base = event_base_new_with_config(cfg);
486 event_config_free(cfg);
487 }
488 return base;
489}
490
491/** Return true iff 'method' is the name of a method that 'cfg' tells us to
492 * avoid. */
493static int
494event_config_is_avoided_method(const struct event_config *cfg,
495 const char *method)
496{
497 struct event_config_entry *entry;
498
499 TAILQ_FOREACH(entry, &cfg->entries, next) {
500 if (entry->avoid_method != NULL &&
501 strcmp(entry->avoid_method, method) == 0)
502 return (1);
503 }
504
505 return (0);
506}
507
508/** Return true iff 'method' is disabled according to the environment. */
509static int
510event_is_method_disabled(const char *name)
511{
512 char environment[64];
513 int i;
514
515 evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
516 for (i = 8; environment[i] != '\0'; ++i)
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100517 environment[i] = EVUTIL_TOUPPER_(environment[i]);
518 /* Note that evutil_getenv_() ignores the environment entirely if
Christopher Wileye8679812015-07-01 13:36:18 -0700519 * we're setuid */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100520 return (evutil_getenv_(environment) != NULL);
Christopher Wileye8679812015-07-01 13:36:18 -0700521}
522
523int
524event_base_get_features(const struct event_base *base)
525{
526 return base->evsel->features;
527}
528
529void
Christopher Wileye8679812015-07-01 13:36:18 -0700530event_enable_debug_mode(void)
531{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100532#ifndef EVENT__DISABLE_DEBUG_MODE
533 if (event_debug_mode_on_)
Christopher Wileye8679812015-07-01 13:36:18 -0700534 event_errx(1, "%s was called twice!", __func__);
535 if (event_debug_mode_too_late)
536 event_errx(1, "%s must be called *before* creating any events "
537 "or event_bases",__func__);
538
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100539 event_debug_mode_on_ = 1;
Christopher Wileye8679812015-07-01 13:36:18 -0700540
541 HT_INIT(event_debug_map, &global_debug_map);
542#endif
543}
544
Christopher Wileye8679812015-07-01 13:36:18 -0700545void
546event_disable_debug_mode(void)
547{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100548#ifndef EVENT__DISABLE_DEBUG_MODE
Christopher Wileye8679812015-07-01 13:36:18 -0700549 struct event_debug_entry **ent, *victim;
550
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100551 EVLOCK_LOCK(event_debug_map_lock_, 0);
Christopher Wileye8679812015-07-01 13:36:18 -0700552 for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
553 victim = *ent;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100554 ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
Christopher Wileye8679812015-07-01 13:36:18 -0700555 mm_free(victim);
556 }
557 HT_CLEAR(event_debug_map, &global_debug_map);
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100558 EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
559
560 event_debug_mode_on_ = 0;
Josh Gao83a0c9c2017-08-10 12:30:25 -0700561#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100562}
Christopher Wileye8679812015-07-01 13:36:18 -0700563
564struct event_base *
565event_base_new_with_config(const struct event_config *cfg)
566{
567 int i;
568 struct event_base *base;
569 int should_check_environment;
570
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100571#ifndef EVENT__DISABLE_DEBUG_MODE
Christopher Wileye8679812015-07-01 13:36:18 -0700572 event_debug_mode_too_late = 1;
573#endif
574
575 if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
576 event_warn("%s: calloc", __func__);
577 return NULL;
578 }
Christopher Wileye8679812015-07-01 13:36:18 -0700579
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100580 if (cfg)
581 base->flags = cfg->flags;
582
583 should_check_environment =
584 !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
585
586 {
587 struct timeval tmp;
588 int precise_time =
589 cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
590 int flags;
591 if (should_check_environment && !precise_time) {
592 precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
593 base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
594 }
595 flags = precise_time ? EV_MONOT_PRECISE : 0;
596 evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
597
598 gettime(base, &tmp);
599 }
600
601 min_heap_ctor_(&base->timeheap);
602
Christopher Wileye8679812015-07-01 13:36:18 -0700603 base->sig.ev_signal_pair[0] = -1;
604 base->sig.ev_signal_pair[1] = -1;
605 base->th_notify_fd[0] = -1;
606 base->th_notify_fd[1] = -1;
607
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100608 TAILQ_INIT(&base->active_later_queue);
Christopher Wileye8679812015-07-01 13:36:18 -0700609
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100610 evmap_io_initmap_(&base->io);
611 evmap_signal_initmap_(&base->sigmap);
612 event_changelist_init_(&base->changelist);
Christopher Wileye8679812015-07-01 13:36:18 -0700613
614 base->evbase = NULL;
615
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100616 if (cfg) {
617 memcpy(&base->max_dispatch_time,
618 &cfg->max_dispatch_interval, sizeof(struct timeval));
619 base->limit_callbacks_after_prio =
620 cfg->limit_callbacks_after_prio;
621 } else {
622 base->max_dispatch_time.tv_sec = -1;
623 base->limit_callbacks_after_prio = 1;
624 }
625 if (cfg && cfg->max_dispatch_callbacks >= 0) {
626 base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
627 } else {
628 base->max_dispatch_callbacks = INT_MAX;
629 }
630 if (base->max_dispatch_callbacks == INT_MAX &&
631 base->max_dispatch_time.tv_sec == -1)
632 base->limit_callbacks_after_prio = INT_MAX;
Christopher Wileye8679812015-07-01 13:36:18 -0700633
634 for (i = 0; eventops[i] && !base->evbase; i++) {
635 if (cfg != NULL) {
636 /* determine if this backend should be avoided */
637 if (event_config_is_avoided_method(cfg,
638 eventops[i]->name))
639 continue;
640 if ((eventops[i]->features & cfg->require_features)
641 != cfg->require_features)
642 continue;
643 }
644
645 /* also obey the environment variables */
646 if (should_check_environment &&
647 event_is_method_disabled(eventops[i]->name))
648 continue;
649
650 base->evsel = eventops[i];
651
652 base->evbase = base->evsel->init(base);
653 }
654
655 if (base->evbase == NULL) {
656 event_warnx("%s: no event mechanism available",
657 __func__);
658 base->evsel = NULL;
659 event_base_free(base);
660 return NULL;
661 }
662
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100663 if (evutil_getenv_("EVENT_SHOW_METHOD"))
Christopher Wileye8679812015-07-01 13:36:18 -0700664 event_msgx("libevent using: %s", base->evsel->name);
665
666 /* allocate a single active event queue */
667 if (event_base_priority_init(base, 1) < 0) {
668 event_base_free(base);
669 return NULL;
670 }
671
672 /* prepare for threading */
673
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100674#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
675 event_debug_created_threadable_ctx_ = 1;
676#endif
677
678#ifndef EVENT__DISABLE_THREAD_SUPPORT
Christopher Wileye8679812015-07-01 13:36:18 -0700679 if (EVTHREAD_LOCKING_ENABLED() &&
680 (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
681 int r;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100682 EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
Christopher Wileye8679812015-07-01 13:36:18 -0700683 EVTHREAD_ALLOC_COND(base->current_event_cond);
684 r = evthread_make_base_notifiable(base);
685 if (r<0) {
686 event_warnx("%s: Unable to make base notifiable.", __func__);
687 event_base_free(base);
688 return NULL;
689 }
690 }
691#endif
692
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100693#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -0700694 if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100695 event_base_start_iocp_(base, cfg->n_cpus_hint);
Christopher Wileye8679812015-07-01 13:36:18 -0700696#endif
697
698 return (base);
699}
700
701int
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100702event_base_start_iocp_(struct event_base *base, int n_cpus)
Christopher Wileye8679812015-07-01 13:36:18 -0700703{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100704#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -0700705 if (base->iocp)
706 return 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100707 base->iocp = event_iocp_port_launch_(n_cpus);
Christopher Wileye8679812015-07-01 13:36:18 -0700708 if (!base->iocp) {
709 event_warnx("%s: Couldn't launch IOCP", __func__);
710 return -1;
711 }
712 return 0;
713#else
714 return -1;
715#endif
716}
717
718void
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100719event_base_stop_iocp_(struct event_base *base)
Christopher Wileye8679812015-07-01 13:36:18 -0700720{
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100721#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -0700722 int rv;
723
724 if (!base->iocp)
725 return;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100726 rv = event_iocp_shutdown_(base->iocp, -1);
Christopher Wileye8679812015-07-01 13:36:18 -0700727 EVUTIL_ASSERT(rv >= 0);
728 base->iocp = NULL;
729#endif
730}
731
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100732static int
733event_base_cancel_single_callback_(struct event_base *base,
734 struct event_callback *evcb,
735 int run_finalizers)
736{
737 int result = 0;
738
739 if (evcb->evcb_flags & EVLIST_INIT) {
740 struct event *ev = event_callback_to_event(evcb);
741 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
742 event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
743 result = 1;
744 }
745 } else {
746 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
747 event_callback_cancel_nolock_(base, evcb, 1);
748 EVBASE_RELEASE_LOCK(base, th_base_lock);
749 result = 1;
750 }
751
752 if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
753 switch (evcb->evcb_closure) {
754 case EV_CLOSURE_EVENT_FINALIZE:
755 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
756 struct event *ev = event_callback_to_event(evcb);
757 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
758 if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
759 mm_free(ev);
760 break;
761 }
762 case EV_CLOSURE_CB_FINALIZE:
763 evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
764 break;
765 default:
766 break;
767 }
768 }
769 return result;
770}
771
772static int event_base_free_queues_(struct event_base *base, int run_finalizers)
773{
774 int deleted = 0, i;
775
776 for (i = 0; i < base->nactivequeues; ++i) {
777 struct event_callback *evcb, *next;
778 for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
779 next = TAILQ_NEXT(evcb, evcb_active_next);
780 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
781 evcb = next;
782 }
783 }
784
785 {
786 struct event_callback *evcb;
787 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
788 deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
789 }
790 }
791
792 return deleted;
793}
794
795static void
796event_base_free_(struct event_base *base, int run_finalizers)
Christopher Wileye8679812015-07-01 13:36:18 -0700797{
798 int i, n_deleted=0;
799 struct event *ev;
800 /* XXXX grab the lock? If there is contention when one thread frees
801 * the base, then the contending thread will be very sad soon. */
802
803 /* event_base_free(NULL) is how to free the current_base if we
804 * made it with event_init and forgot to hold a reference to it. */
805 if (base == NULL && current_base)
806 base = current_base;
Christopher Wileye8679812015-07-01 13:36:18 -0700807 /* Don't actually free NULL. */
808 if (base == NULL) {
809 event_warnx("%s: no base to free", __func__);
810 return;
811 }
812 /* XXX(niels) - check for internal events first */
813
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100814#ifdef _WIN32
815 event_base_stop_iocp_(base);
Christopher Wileye8679812015-07-01 13:36:18 -0700816#endif
817
818 /* threading fds if we have them */
819 if (base->th_notify_fd[0] != -1) {
820 event_del(&base->th_notify);
821 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
822 if (base->th_notify_fd[1] != -1)
823 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
824 base->th_notify_fd[0] = -1;
825 base->th_notify_fd[1] = -1;
826 event_debug_unassign(&base->th_notify);
827 }
828
829 /* Delete all non-internal events. */
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100830 evmap_delete_all_(base);
831
832 while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
Christopher Wileye8679812015-07-01 13:36:18 -0700833 event_del(ev);
834 ++n_deleted;
835 }
836 for (i = 0; i < base->n_common_timeouts; ++i) {
837 struct common_timeout_list *ctl =
838 base->common_timeout_queues[i];
839 event_del(&ctl->timeout_event); /* Internal; doesn't count */
840 event_debug_unassign(&ctl->timeout_event);
841 for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
842 struct event *next = TAILQ_NEXT(ev,
843 ev_timeout_pos.ev_next_with_common_timeout);
844 if (!(ev->ev_flags & EVLIST_INTERNAL)) {
845 event_del(ev);
846 ++n_deleted;
847 }
848 ev = next;
849 }
850 mm_free(ctl);
851 }
852 if (base->common_timeout_queues)
853 mm_free(base->common_timeout_queues);
854
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100855 for (;;) {
856 /* For finalizers we can register yet another finalizer out from
857 * finalizer, and iff finalizer will be in active_later_queue we can
858 * add finalizer to activequeues, and we will have events in
859 * activequeues after this function returns, which is not what we want
860 * (we even have an assertion for this).
861 *
862 * A simple case is bufferevent with underlying (i.e. filters).
863 */
864 int i = event_base_free_queues_(base, run_finalizers);
865 if (!i) {
866 break;
Christopher Wileye8679812015-07-01 13:36:18 -0700867 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100868 n_deleted += i;
Christopher Wileye8679812015-07-01 13:36:18 -0700869 }
870
871 if (n_deleted)
872 event_debug(("%s: %d events were still set in base",
873 __func__, n_deleted));
874
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100875 while (LIST_FIRST(&base->once_events)) {
876 struct event_once *eonce = LIST_FIRST(&base->once_events);
877 LIST_REMOVE(eonce, next_once);
878 mm_free(eonce);
879 }
880
Christopher Wileye8679812015-07-01 13:36:18 -0700881 if (base->evsel != NULL && base->evsel->dealloc != NULL)
882 base->evsel->dealloc(base);
883
884 for (i = 0; i < base->nactivequeues; ++i)
885 EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
886
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100887 EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
888 min_heap_dtor_(&base->timeheap);
Christopher Wileye8679812015-07-01 13:36:18 -0700889
890 mm_free(base->activequeues);
891
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100892 evmap_io_clear_(&base->io);
893 evmap_signal_clear_(&base->sigmap);
894 event_changelist_freemem_(&base->changelist);
Christopher Wileye8679812015-07-01 13:36:18 -0700895
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100896 EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
Christopher Wileye8679812015-07-01 13:36:18 -0700897 EVTHREAD_FREE_COND(base->current_event_cond);
898
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100899 /* If we're freeing current_base, there won't be a current_base. */
900 if (base == current_base)
901 current_base = NULL;
Christopher Wileye8679812015-07-01 13:36:18 -0700902 mm_free(base);
903}
904
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100905void
906event_base_free_nofinalize(struct event_base *base)
907{
908 event_base_free_(base, 0);
909}
910
911void
912event_base_free(struct event_base *base)
913{
914 event_base_free_(base, 1);
915}
916
917/* Fake eventop; used to disable the backend temporarily inside event_reinit
918 * so that we can call event_del() on an event without telling the backend.
919 */
920static int
921nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
922 short events, void *fdinfo)
923{
924 return 0;
925}
926const struct eventop nil_eventop = {
927 "nil",
928 NULL, /* init: unused. */
929 NULL, /* add: unused. */
930 nil_backend_del, /* del: used, so needs to be killed. */
931 NULL, /* dispatch: unused. */
932 NULL, /* dealloc: unused. */
933 0, 0, 0
934};
935
Christopher Wileye8679812015-07-01 13:36:18 -0700936/* reinitialize the event base after a fork */
937int
938event_reinit(struct event_base *base)
939{
940 const struct eventop *evsel;
941 int res = 0;
Christopher Wileye8679812015-07-01 13:36:18 -0700942 int was_notifiable = 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100943 int had_signal_added = 0;
Christopher Wileye8679812015-07-01 13:36:18 -0700944
945 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
946
947 evsel = base->evsel;
948
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100949 /* check if this event mechanism requires reinit on the backend */
950 if (evsel->need_reinit) {
951 /* We're going to call event_del() on our notify events (the
952 * ones that tell about signals and wakeup events). But we
953 * don't actually want to tell the backend to change its
954 * state, since it might still share some resource (a kqueue,
955 * an epoll fd) with the parent process, and we don't want to
956 * delete the fds from _that_ backend, we temporarily stub out
957 * the evsel with a replacement.
958 */
959 base->evsel = &nil_eventop;
960 }
Christopher Wileye8679812015-07-01 13:36:18 -0700961
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100962 /* We need to re-create a new signal-notification fd and a new
963 * thread-notification fd. Otherwise, we'll still share those with
964 * the parent process, which would make any notification sent to them
965 * get received by one or both of the event loops, more or less at
966 * random.
Christopher Wileye8679812015-07-01 13:36:18 -0700967 */
Christopher Wileye8679812015-07-01 13:36:18 -0700968 if (base->sig.ev_signal_added) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100969 event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
970 event_debug_unassign(&base->sig.ev_signal);
971 memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
972 had_signal_added = 1;
Christopher Wileye8679812015-07-01 13:36:18 -0700973 base->sig.ev_signal_added = 0;
974 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100975 if (base->sig.ev_signal_pair[0] != -1)
976 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
977 if (base->sig.ev_signal_pair[1] != -1)
978 EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
979 if (base->th_notify_fn != NULL) {
Josh Gao83a0c9c2017-08-10 12:30:25 -0700980 was_notifiable = 1;
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100981 base->th_notify_fn = NULL;
982 }
983 if (base->th_notify_fd[0] != -1) {
984 event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
Christopher Wileye8679812015-07-01 13:36:18 -0700985 EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
986 if (base->th_notify_fd[1] != -1)
987 EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
988 base->th_notify_fd[0] = -1;
989 base->th_notify_fd[1] = -1;
990 event_debug_unassign(&base->th_notify);
991 }
992
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100993 /* Replace the original evsel. */
994 base->evsel = evsel;
Christopher Wileye8679812015-07-01 13:36:18 -0700995
Narayan Kamathfc74cb42017-09-13 12:53:52 +0100996 if (evsel->need_reinit) {
997 /* Reconstruct the backend through brute-force, so that we do
998 * not share any structures with the parent process. For some
999 * backends, this is necessary: epoll and kqueue, for
1000 * instance, have events associated with a kernel
1001 * structure. If didn't reinitialize, we'd share that
1002 * structure with the parent process, and any changes made by
1003 * the parent would affect our backend's behavior (and vice
1004 * versa).
1005 */
1006 if (base->evsel->dealloc != NULL)
1007 base->evsel->dealloc(base);
1008 base->evbase = evsel->init(base);
1009 if (base->evbase == NULL) {
1010 event_errx(1,
1011 "%s: could not reinitialize event mechanism",
1012 __func__);
1013 res = -1;
1014 goto done;
1015 }
Christopher Wileye8679812015-07-01 13:36:18 -07001016
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001017 /* Empty out the changelist (if any): we are starting from a
1018 * blank slate. */
1019 event_changelist_freemem_(&base->changelist);
1020
1021 /* Tell the event maps to re-inform the backend about all
1022 * pending events. This will make the signal notification
1023 * event get re-created if necessary. */
1024 if (evmap_reinit_(base) < 0)
1025 res = -1;
1026 } else {
1027 res = evsig_init_(base);
1028 if (res == 0 && had_signal_added) {
1029 res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
1030 if (res == 0)
1031 base->sig.ev_signal_added = 1;
Christopher Wileye8679812015-07-01 13:36:18 -07001032 }
1033 }
1034
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001035 /* If we were notifiable before, and nothing just exploded, become
1036 * notifiable again. */
Christopher Wileye8679812015-07-01 13:36:18 -07001037 if (was_notifiable && res == 0)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001038 res = evthread_make_base_notifiable_nolock_(base);
Christopher Wileye8679812015-07-01 13:36:18 -07001039
1040done:
1041 EVBASE_RELEASE_LOCK(base, th_base_lock);
1042 return (res);
1043}
1044
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001045/* Get the monotonic time for this event_base' timer */
1046int
1047event_gettime_monotonic(struct event_base *base, struct timeval *tv)
1048{
1049 int rv = -1;
1050
1051 if (base && tv) {
1052 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1053 rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
1054 EVBASE_RELEASE_LOCK(base, th_base_lock);
1055 }
1056
1057 return rv;
1058}
1059
Christopher Wileye8679812015-07-01 13:36:18 -07001060const char **
1061event_get_supported_methods(void)
1062{
1063 static const char **methods = NULL;
1064 const struct eventop **method;
1065 const char **tmp;
1066 int i = 0, k;
1067
1068 /* count all methods */
1069 for (method = &eventops[0]; *method != NULL; ++method) {
1070 ++i;
1071 }
1072
1073 /* allocate one more than we need for the NULL pointer */
1074 tmp = mm_calloc((i + 1), sizeof(char *));
1075 if (tmp == NULL)
1076 return (NULL);
1077
1078 /* populate the array with the supported methods */
1079 for (k = 0, i = 0; eventops[k] != NULL; ++k) {
1080 tmp[i++] = eventops[k]->name;
1081 }
1082 tmp[i] = NULL;
1083
1084 if (methods != NULL)
1085 mm_free((char**)methods);
1086
1087 methods = tmp;
1088
1089 return (methods);
1090}
1091
1092struct event_config *
1093event_config_new(void)
1094{
1095 struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
1096
1097 if (cfg == NULL)
1098 return (NULL);
1099
1100 TAILQ_INIT(&cfg->entries);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001101 cfg->max_dispatch_interval.tv_sec = -1;
1102 cfg->max_dispatch_callbacks = INT_MAX;
1103 cfg->limit_callbacks_after_prio = 1;
Christopher Wileye8679812015-07-01 13:36:18 -07001104
1105 return (cfg);
1106}
1107
1108static void
1109event_config_entry_free(struct event_config_entry *entry)
1110{
1111 if (entry->avoid_method != NULL)
1112 mm_free((char *)entry->avoid_method);
1113 mm_free(entry);
1114}
1115
1116void
1117event_config_free(struct event_config *cfg)
1118{
1119 struct event_config_entry *entry;
1120
1121 while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
1122 TAILQ_REMOVE(&cfg->entries, entry, next);
1123 event_config_entry_free(entry);
1124 }
1125 mm_free(cfg);
1126}
1127
1128int
1129event_config_set_flag(struct event_config *cfg, int flag)
1130{
1131 if (!cfg)
1132 return -1;
1133 cfg->flags |= flag;
1134 return 0;
1135}
1136
1137int
1138event_config_avoid_method(struct event_config *cfg, const char *method)
1139{
1140 struct event_config_entry *entry = mm_malloc(sizeof(*entry));
1141 if (entry == NULL)
1142 return (-1);
1143
1144 if ((entry->avoid_method = mm_strdup(method)) == NULL) {
1145 mm_free(entry);
1146 return (-1);
1147 }
1148
1149 TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
1150
1151 return (0);
1152}
1153
1154int
1155event_config_require_features(struct event_config *cfg,
1156 int features)
1157{
1158 if (!cfg)
1159 return (-1);
1160 cfg->require_features = features;
1161 return (0);
1162}
1163
1164int
1165event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
1166{
1167 if (!cfg)
1168 return (-1);
1169 cfg->n_cpus_hint = cpus;
1170 return (0);
1171}
1172
1173int
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001174event_config_set_max_dispatch_interval(struct event_config *cfg,
1175 const struct timeval *max_interval, int max_callbacks, int min_priority)
1176{
1177 if (max_interval)
1178 memcpy(&cfg->max_dispatch_interval, max_interval,
1179 sizeof(struct timeval));
1180 else
1181 cfg->max_dispatch_interval.tv_sec = -1;
1182 cfg->max_dispatch_callbacks =
1183 max_callbacks >= 0 ? max_callbacks : INT_MAX;
1184 if (min_priority < 0)
1185 min_priority = 0;
1186 cfg->limit_callbacks_after_prio = min_priority;
1187 return (0);
1188}
1189
1190int
Christopher Wileye8679812015-07-01 13:36:18 -07001191event_priority_init(int npriorities)
1192{
1193 return event_base_priority_init(current_base, npriorities);
1194}
1195
1196int
1197event_base_priority_init(struct event_base *base, int npriorities)
1198{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001199 int i, r;
1200 r = -1;
1201
1202 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
Christopher Wileye8679812015-07-01 13:36:18 -07001203
1204 if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
1205 || npriorities >= EVENT_MAX_PRIORITIES)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001206 goto err;
Christopher Wileye8679812015-07-01 13:36:18 -07001207
1208 if (npriorities == base->nactivequeues)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001209 goto ok;
Christopher Wileye8679812015-07-01 13:36:18 -07001210
1211 if (base->nactivequeues) {
1212 mm_free(base->activequeues);
1213 base->nactivequeues = 0;
1214 }
1215
1216 /* Allocate our priority queues */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001217 base->activequeues = (struct evcallback_list *)
1218 mm_calloc(npriorities, sizeof(struct evcallback_list));
Christopher Wileye8679812015-07-01 13:36:18 -07001219 if (base->activequeues == NULL) {
1220 event_warn("%s: calloc", __func__);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001221 goto err;
Christopher Wileye8679812015-07-01 13:36:18 -07001222 }
1223 base->nactivequeues = npriorities;
1224
1225 for (i = 0; i < base->nactivequeues; ++i) {
1226 TAILQ_INIT(&base->activequeues[i]);
1227 }
1228
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001229ok:
1230 r = 0;
1231err:
1232 EVBASE_RELEASE_LOCK(base, th_base_lock);
1233 return (r);
1234}
1235
1236int
1237event_base_get_npriorities(struct event_base *base)
1238{
1239
1240 int n;
1241 if (base == NULL)
1242 base = current_base;
1243
1244 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1245 n = base->nactivequeues;
1246 EVBASE_RELEASE_LOCK(base, th_base_lock);
1247 return (n);
1248}
1249
1250int
1251event_base_get_num_events(struct event_base *base, unsigned int type)
1252{
1253 int r = 0;
1254
1255 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1256
1257 if (type & EVENT_BASE_COUNT_ACTIVE)
1258 r += base->event_count_active;
1259
1260 if (type & EVENT_BASE_COUNT_VIRTUAL)
1261 r += base->virtual_event_count;
1262
1263 if (type & EVENT_BASE_COUNT_ADDED)
1264 r += base->event_count;
1265
1266 EVBASE_RELEASE_LOCK(base, th_base_lock);
1267
1268 return r;
1269}
1270
1271int
1272event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
1273{
1274 int r = 0;
1275
1276 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1277
1278 if (type & EVENT_BASE_COUNT_ACTIVE) {
1279 r += base->event_count_active_max;
1280 if (clear)
1281 base->event_count_active_max = 0;
1282 }
1283
1284 if (type & EVENT_BASE_COUNT_VIRTUAL) {
1285 r += base->virtual_event_count_max;
1286 if (clear)
1287 base->virtual_event_count_max = 0;
1288 }
1289
1290 if (type & EVENT_BASE_COUNT_ADDED) {
1291 r += base->event_count_max;
1292 if (clear)
1293 base->event_count_max = 0;
1294 }
1295
1296 EVBASE_RELEASE_LOCK(base, th_base_lock);
1297
1298 return r;
Christopher Wileye8679812015-07-01 13:36:18 -07001299}
1300
1301/* Returns true iff we're currently watching any events. */
1302static int
1303event_haveevents(struct event_base *base)
1304{
1305 /* Caller must hold th_base_lock */
1306 return (base->virtual_event_count > 0 || base->event_count > 0);
1307}
1308
1309/* "closure" function called when processing active signal events */
1310static inline void
1311event_signal_closure(struct event_base *base, struct event *ev)
1312{
1313 short ncalls;
1314 int should_break;
1315
1316 /* Allows deletes to work */
1317 ncalls = ev->ev_ncalls;
1318 if (ncalls != 0)
1319 ev->ev_pncalls = &ncalls;
1320 EVBASE_RELEASE_LOCK(base, th_base_lock);
1321 while (ncalls) {
1322 ncalls--;
1323 ev->ev_ncalls = ncalls;
1324 if (ncalls == 0)
1325 ev->ev_pncalls = NULL;
1326 (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
1327
1328 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1329 should_break = base->event_break;
1330 EVBASE_RELEASE_LOCK(base, th_base_lock);
1331
1332 if (should_break) {
1333 if (ncalls != 0)
1334 ev->ev_pncalls = NULL;
1335 return;
1336 }
1337 }
1338}
1339
1340/* Common timeouts are special timeouts that are handled as queues rather than
1341 * in the minheap. This is more efficient than the minheap if we happen to
1342 * know that we're going to get several thousands of timeout events all with
1343 * the same timeout value.
1344 *
1345 * Since all our timeout handling code assumes timevals can be copied,
1346 * assigned, etc, we can't use "magic pointer" to encode these common
1347 * timeouts. Searching through a list to see if every timeout is common could
1348 * also get inefficient. Instead, we take advantage of the fact that tv_usec
1349 * is 32 bits long, but only uses 20 of those bits (since it can never be over
1350 * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits
1351 * of index into the event_base's aray of common timeouts.
1352 */
1353
1354#define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK
1355#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
1356#define COMMON_TIMEOUT_IDX_SHIFT 20
1357#define COMMON_TIMEOUT_MASK 0xf0000000
1358#define COMMON_TIMEOUT_MAGIC 0x50000000
1359
1360#define COMMON_TIMEOUT_IDX(tv) \
1361 (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
1362
1363/** Return true iff if 'tv' is a common timeout in 'base' */
1364static inline int
1365is_common_timeout(const struct timeval *tv,
1366 const struct event_base *base)
1367{
1368 int idx;
1369 if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
1370 return 0;
1371 idx = COMMON_TIMEOUT_IDX(tv);
1372 return idx < base->n_common_timeouts;
1373}
1374
1375/* True iff tv1 and tv2 have the same common-timeout index, or if neither
1376 * one is a common timeout. */
1377static inline int
1378is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
1379{
1380 return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
1381 (tv2->tv_usec & ~MICROSECONDS_MASK);
1382}
1383
1384/** Requires that 'tv' is a common timeout. Return the corresponding
1385 * common_timeout_list. */
1386static inline struct common_timeout_list *
1387get_common_timeout_list(struct event_base *base, const struct timeval *tv)
1388{
1389 return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
1390}
1391
1392#if 0
1393static inline int
1394common_timeout_ok(const struct timeval *tv,
1395 struct event_base *base)
1396{
1397 const struct timeval *expect =
1398 &get_common_timeout_list(base, tv)->duration;
1399 return tv->tv_sec == expect->tv_sec &&
1400 tv->tv_usec == expect->tv_usec;
1401}
1402#endif
1403
1404/* Add the timeout for the first event in given common timeout list to the
1405 * event_base's minheap. */
1406static void
1407common_timeout_schedule(struct common_timeout_list *ctl,
1408 const struct timeval *now, struct event *head)
1409{
1410 struct timeval timeout = head->ev_timeout;
1411 timeout.tv_usec &= MICROSECONDS_MASK;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001412 event_add_nolock_(&ctl->timeout_event, &timeout, 1);
Christopher Wileye8679812015-07-01 13:36:18 -07001413}
1414
1415/* Callback: invoked when the timeout for a common timeout queue triggers.
1416 * This means that (at least) the first event in that queue should be run,
1417 * and the timeout should be rescheduled if there are more events. */
1418static void
1419common_timeout_callback(evutil_socket_t fd, short what, void *arg)
1420{
1421 struct timeval now;
1422 struct common_timeout_list *ctl = arg;
1423 struct event_base *base = ctl->base;
1424 struct event *ev = NULL;
1425 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1426 gettime(base, &now);
1427 while (1) {
1428 ev = TAILQ_FIRST(&ctl->events);
1429 if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
1430 (ev->ev_timeout.tv_sec == now.tv_sec &&
1431 (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
1432 break;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001433 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1434 event_active_nolock_(ev, EV_TIMEOUT, 1);
Christopher Wileye8679812015-07-01 13:36:18 -07001435 }
1436 if (ev)
1437 common_timeout_schedule(ctl, &now, ev);
1438 EVBASE_RELEASE_LOCK(base, th_base_lock);
1439}
1440
1441#define MAX_COMMON_TIMEOUTS 256
1442
1443const struct timeval *
1444event_base_init_common_timeout(struct event_base *base,
1445 const struct timeval *duration)
1446{
1447 int i;
1448 struct timeval tv;
1449 const struct timeval *result=NULL;
1450 struct common_timeout_list *new_ctl;
1451
1452 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1453 if (duration->tv_usec > 1000000) {
1454 memcpy(&tv, duration, sizeof(struct timeval));
1455 if (is_common_timeout(duration, base))
1456 tv.tv_usec &= MICROSECONDS_MASK;
1457 tv.tv_sec += tv.tv_usec / 1000000;
1458 tv.tv_usec %= 1000000;
1459 duration = &tv;
1460 }
1461 for (i = 0; i < base->n_common_timeouts; ++i) {
1462 const struct common_timeout_list *ctl =
1463 base->common_timeout_queues[i];
1464 if (duration->tv_sec == ctl->duration.tv_sec &&
1465 duration->tv_usec ==
1466 (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
1467 EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
1468 result = &ctl->duration;
1469 goto done;
1470 }
1471 }
1472 if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
1473 event_warnx("%s: Too many common timeouts already in use; "
1474 "we only support %d per event_base", __func__,
1475 MAX_COMMON_TIMEOUTS);
1476 goto done;
1477 }
1478 if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
1479 int n = base->n_common_timeouts < 16 ? 16 :
1480 base->n_common_timeouts*2;
1481 struct common_timeout_list **newqueues =
1482 mm_realloc(base->common_timeout_queues,
1483 n*sizeof(struct common_timeout_queue *));
1484 if (!newqueues) {
1485 event_warn("%s: realloc",__func__);
1486 goto done;
1487 }
1488 base->n_common_timeouts_allocated = n;
1489 base->common_timeout_queues = newqueues;
1490 }
1491 new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
1492 if (!new_ctl) {
1493 event_warn("%s: calloc",__func__);
1494 goto done;
1495 }
1496 TAILQ_INIT(&new_ctl->events);
1497 new_ctl->duration.tv_sec = duration->tv_sec;
1498 new_ctl->duration.tv_usec =
1499 duration->tv_usec | COMMON_TIMEOUT_MAGIC |
1500 (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
1501 evtimer_assign(&new_ctl->timeout_event, base,
1502 common_timeout_callback, new_ctl);
1503 new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
1504 event_priority_set(&new_ctl->timeout_event, 0);
1505 new_ctl->base = base;
1506 base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
1507 result = &new_ctl->duration;
1508
1509done:
1510 if (result)
1511 EVUTIL_ASSERT(is_common_timeout(result, base));
1512
1513 EVBASE_RELEASE_LOCK(base, th_base_lock);
1514 return result;
1515}
1516
1517/* Closure function invoked when we're activating a persistent event. */
1518static inline void
1519event_persist_closure(struct event_base *base, struct event *ev)
1520{
Christopher Wileye8679812015-07-01 13:36:18 -07001521 void (*evcb_callback)(evutil_socket_t, short, void *);
1522
1523 // Other fields of *ev that must be stored before executing
1524 evutil_socket_t evcb_fd;
1525 short evcb_res;
1526 void *evcb_arg;
1527
1528 /* reschedule the persistent event if we have a timeout. */
1529 if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
1530 /* If there was a timeout, we want it to run at an interval of
1531 * ev_io_timeout after the last time it was _scheduled_ for,
1532 * not ev_io_timeout after _now_. If it fired for another
1533 * reason, though, the timeout ought to start ticking _now_. */
1534 struct timeval run_at, relative_to, delay, now;
1535 ev_uint32_t usec_mask = 0;
1536 EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
1537 &ev->ev_io_timeout));
1538 gettime(base, &now);
1539 if (is_common_timeout(&ev->ev_timeout, base)) {
1540 delay = ev->ev_io_timeout;
1541 usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
1542 delay.tv_usec &= MICROSECONDS_MASK;
1543 if (ev->ev_res & EV_TIMEOUT) {
1544 relative_to = ev->ev_timeout;
1545 relative_to.tv_usec &= MICROSECONDS_MASK;
1546 } else {
1547 relative_to = now;
1548 }
1549 } else {
1550 delay = ev->ev_io_timeout;
1551 if (ev->ev_res & EV_TIMEOUT) {
1552 relative_to = ev->ev_timeout;
1553 } else {
1554 relative_to = now;
1555 }
1556 }
1557 evutil_timeradd(&relative_to, &delay, &run_at);
1558 if (evutil_timercmp(&run_at, &now, <)) {
1559 /* Looks like we missed at least one invocation due to
1560 * a clock jump, not running the event loop for a
1561 * while, really slow callbacks, or
1562 * something. Reschedule relative to now.
1563 */
1564 evutil_timeradd(&now, &delay, &run_at);
1565 }
1566 run_at.tv_usec |= usec_mask;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001567 event_add_nolock_(ev, &run_at, 1);
Christopher Wileye8679812015-07-01 13:36:18 -07001568 }
1569
1570 // Save our callback before we release the lock
1571 evcb_callback = ev->ev_callback;
1572 evcb_fd = ev->ev_fd;
1573 evcb_res = ev->ev_res;
1574 evcb_arg = ev->ev_arg;
1575
1576 // Release the lock
1577 EVBASE_RELEASE_LOCK(base, th_base_lock);
1578
1579 // Execute the callback
1580 (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
1581}
1582
1583/*
1584 Helper for event_process_active to process all the events in a single queue,
1585 releasing the lock as we go. This function requires that the lock be held
1586 when it's invoked. Returns -1 if we get a signal or an event_break that
1587 means we should stop processing any active events now. Otherwise returns
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001588 the number of non-internal event_callbacks that we processed.
Christopher Wileye8679812015-07-01 13:36:18 -07001589*/
1590static int
1591event_process_active_single_queue(struct event_base *base,
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001592 struct evcallback_list *activeq,
1593 int max_to_process, const struct timeval *endtime)
Christopher Wileye8679812015-07-01 13:36:18 -07001594{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001595 struct event_callback *evcb;
Christopher Wileye8679812015-07-01 13:36:18 -07001596 int count = 0;
1597
1598 EVUTIL_ASSERT(activeq != NULL);
1599
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001600 for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
1601 struct event *ev=NULL;
1602 if (evcb->evcb_flags & EVLIST_INIT) {
1603 ev = event_callback_to_event(evcb);
1604
1605 if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
1606 event_queue_remove_active(base, evcb);
1607 else
1608 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
1609 event_debug((
1610 "event_process_active: event: %p, %s%s%scall %p",
1611 ev,
1612 ev->ev_res & EV_READ ? "EV_READ " : " ",
1613 ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
1614 ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
1615 ev->ev_callback));
1616 } else {
1617 event_queue_remove_active(base, evcb);
1618 event_debug(("event_process_active: event_callback %p, "
1619 "closure %d, call %p",
1620 evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
1621 }
1622
1623 if (!(evcb->evcb_flags & EVLIST_INTERNAL))
Christopher Wileye8679812015-07-01 13:36:18 -07001624 ++count;
1625
Christopher Wileye8679812015-07-01 13:36:18 -07001626
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001627 base->current_event = evcb;
1628#ifndef EVENT__DISABLE_THREAD_SUPPORT
Christopher Wileye8679812015-07-01 13:36:18 -07001629 base->current_event_waiters = 0;
1630#endif
1631
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001632 switch (evcb->evcb_closure) {
1633 case EV_CLOSURE_EVENT_SIGNAL:
1634 EVUTIL_ASSERT(ev != NULL);
Christopher Wileye8679812015-07-01 13:36:18 -07001635 event_signal_closure(base, ev);
1636 break;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001637 case EV_CLOSURE_EVENT_PERSIST:
1638 EVUTIL_ASSERT(ev != NULL);
Christopher Wileye8679812015-07-01 13:36:18 -07001639 event_persist_closure(base, ev);
1640 break;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001641 case EV_CLOSURE_EVENT: {
1642 void (*evcb_callback)(evutil_socket_t, short, void *);
1643 EVUTIL_ASSERT(ev != NULL);
1644 evcb_callback = *ev->ev_callback;
Josh Gao83a0c9c2017-08-10 12:30:25 -07001645 EVBASE_RELEASE_LOCK(base, th_base_lock);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001646 evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
1647 }
1648 break;
1649 case EV_CLOSURE_CB_SELF: {
1650 void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
1651 EVBASE_RELEASE_LOCK(base, th_base_lock);
1652 evcb_selfcb(evcb, evcb->evcb_arg);
1653 }
1654 break;
1655 case EV_CLOSURE_EVENT_FINALIZE:
1656 case EV_CLOSURE_EVENT_FINALIZE_FREE: {
1657 void (*evcb_evfinalize)(struct event *, void *);
1658 int evcb_closure = evcb->evcb_closure;
1659 EVUTIL_ASSERT(ev != NULL);
1660 base->current_event = NULL;
1661 evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
1662 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1663 EVBASE_RELEASE_LOCK(base, th_base_lock);
1664 evcb_evfinalize(ev, ev->ev_arg);
1665 event_debug_note_teardown_(ev);
1666 if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
1667 mm_free(ev);
1668 }
1669 break;
1670 case EV_CLOSURE_CB_FINALIZE: {
1671 void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
1672 base->current_event = NULL;
1673 EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
1674 EVBASE_RELEASE_LOCK(base, th_base_lock);
1675 evcb_cbfinalize(evcb, evcb->evcb_arg);
1676 }
1677 break;
1678 default:
1679 EVUTIL_ASSERT(0);
Christopher Wileye8679812015-07-01 13:36:18 -07001680 }
1681
1682 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
Christopher Wileye8679812015-07-01 13:36:18 -07001683 base->current_event = NULL;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001684#ifndef EVENT__DISABLE_THREAD_SUPPORT
Christopher Wileye8679812015-07-01 13:36:18 -07001685 if (base->current_event_waiters) {
1686 base->current_event_waiters = 0;
1687 EVTHREAD_COND_BROADCAST(base->current_event_cond);
1688 }
1689#endif
1690
1691 if (base->event_break)
1692 return -1;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001693 if (count >= max_to_process)
1694 return count;
1695 if (count && endtime) {
1696 struct timeval now;
1697 update_time_cache(base);
1698 gettime(base, &now);
1699 if (evutil_timercmp(&now, endtime, >=))
1700 return count;
1701 }
Christopher Wileye8679812015-07-01 13:36:18 -07001702 if (base->event_continue)
1703 break;
1704 }
1705 return count;
1706}
1707
1708/*
Christopher Wileye8679812015-07-01 13:36:18 -07001709 * Active events are stored in priority queues. Lower priorities are always
1710 * process before higher priorities. Low priority events can starve high
1711 * priority ones.
1712 */
1713
1714static int
1715event_process_active(struct event_base *base)
1716{
1717 /* Caller must hold th_base_lock */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001718 struct evcallback_list *activeq = NULL;
Christopher Wileye8679812015-07-01 13:36:18 -07001719 int i, c = 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001720 const struct timeval *endtime;
1721 struct timeval tv;
1722 const int maxcb = base->max_dispatch_callbacks;
1723 const int limit_after_prio = base->limit_callbacks_after_prio;
1724 if (base->max_dispatch_time.tv_sec >= 0) {
1725 update_time_cache(base);
1726 gettime(base, &tv);
1727 evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
1728 endtime = &tv;
1729 } else {
1730 endtime = NULL;
1731 }
Christopher Wileye8679812015-07-01 13:36:18 -07001732
1733 for (i = 0; i < base->nactivequeues; ++i) {
1734 if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
1735 base->event_running_priority = i;
1736 activeq = &base->activequeues[i];
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001737 if (i < limit_after_prio)
1738 c = event_process_active_single_queue(base, activeq,
1739 INT_MAX, NULL);
1740 else
1741 c = event_process_active_single_queue(base, activeq,
1742 maxcb, endtime);
Christopher Wileye8679812015-07-01 13:36:18 -07001743 if (c < 0) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001744 goto done;
Christopher Wileye8679812015-07-01 13:36:18 -07001745 } else if (c > 0)
1746 break; /* Processed a real event; do not
1747 * consider lower-priority events */
1748 /* If we get here, all of the events we processed
1749 * were internal. Continue. */
1750 }
1751 }
1752
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001753done:
Christopher Wileye8679812015-07-01 13:36:18 -07001754 base->event_running_priority = -1;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001755
Christopher Wileye8679812015-07-01 13:36:18 -07001756 return c;
1757}
1758
1759/*
1760 * Wait continuously for events. We exit only if no events are left.
1761 */
1762
1763int
1764event_dispatch(void)
1765{
1766 return (event_loop(0));
1767}
1768
1769int
1770event_base_dispatch(struct event_base *event_base)
1771{
1772 return (event_base_loop(event_base, 0));
1773}
1774
1775const char *
1776event_base_get_method(const struct event_base *base)
1777{
1778 EVUTIL_ASSERT(base);
1779 return (base->evsel->name);
1780}
1781
1782/** Callback: used to implement event_base_loopexit by telling the event_base
1783 * that it's time to exit its loop. */
1784static void
1785event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
1786{
1787 struct event_base *base = arg;
1788 base->event_gotterm = 1;
1789}
1790
1791int
1792event_loopexit(const struct timeval *tv)
1793{
1794 return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
1795 current_base, tv));
1796}
1797
1798int
1799event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
1800{
1801 return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
1802 event_base, tv));
1803}
1804
1805int
1806event_loopbreak(void)
1807{
1808 return (event_base_loopbreak(current_base));
1809}
1810
1811int
1812event_base_loopbreak(struct event_base *event_base)
1813{
1814 int r = 0;
1815 if (event_base == NULL)
1816 return (-1);
1817
1818 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1819 event_base->event_break = 1;
1820
1821 if (EVBASE_NEED_NOTIFY(event_base)) {
1822 r = evthread_notify_base(event_base);
1823 } else {
1824 r = (0);
1825 }
1826 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1827 return r;
1828}
1829
1830int
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001831event_base_loopcontinue(struct event_base *event_base)
1832{
1833 int r = 0;
1834 if (event_base == NULL)
1835 return (-1);
1836
1837 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1838 event_base->event_continue = 1;
1839
1840 if (EVBASE_NEED_NOTIFY(event_base)) {
1841 r = evthread_notify_base(event_base);
1842 } else {
1843 r = (0);
1844 }
1845 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1846 return r;
1847}
1848
1849int
Christopher Wileye8679812015-07-01 13:36:18 -07001850event_base_got_break(struct event_base *event_base)
1851{
1852 int res;
1853 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1854 res = event_base->event_break;
1855 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1856 return res;
1857}
1858
1859int
1860event_base_got_exit(struct event_base *event_base)
1861{
1862 int res;
1863 EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
1864 res = event_base->event_gotterm;
1865 EVBASE_RELEASE_LOCK(event_base, th_base_lock);
1866 return res;
1867}
1868
1869/* not thread safe */
1870
1871int
1872event_loop(int flags)
1873{
1874 return event_base_loop(current_base, flags);
1875}
1876
1877int
1878event_base_loop(struct event_base *base, int flags)
1879{
1880 const struct eventop *evsel = base->evsel;
1881 struct timeval tv;
1882 struct timeval *tv_p;
1883 int res, done, retval = 0;
1884
1885 /* Grab the lock. We will release it inside evsel.dispatch, and again
1886 * as we invoke user callbacks. */
1887 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
1888
1889 if (base->running_loop) {
1890 event_warnx("%s: reentrant invocation. Only one event_base_loop"
1891 " can run on each event_base at once.", __func__);
1892 EVBASE_RELEASE_LOCK(base, th_base_lock);
1893 return -1;
1894 }
1895
1896 base->running_loop = 1;
1897
1898 clear_time_cache(base);
1899
1900 if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001901 evsig_set_base_(base);
Christopher Wileye8679812015-07-01 13:36:18 -07001902
1903 done = 0;
1904
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001905#ifndef EVENT__DISABLE_THREAD_SUPPORT
Christopher Wileye8679812015-07-01 13:36:18 -07001906 base->th_owner_id = EVTHREAD_GET_ID();
1907#endif
1908
1909 base->event_gotterm = base->event_break = 0;
1910
1911 while (!done) {
1912 base->event_continue = 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001913 base->n_deferreds_queued = 0;
Christopher Wileye8679812015-07-01 13:36:18 -07001914
1915 /* Terminate the loop if we have been asked to */
1916 if (base->event_gotterm) {
1917 break;
1918 }
1919
1920 if (base->event_break) {
1921 break;
1922 }
1923
Christopher Wileye8679812015-07-01 13:36:18 -07001924 tv_p = &tv;
1925 if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
1926 timeout_next(base, &tv_p);
1927 } else {
1928 /*
1929 * if we have active events, we just poll new events
1930 * without waiting.
1931 */
1932 evutil_timerclear(&tv);
1933 }
1934
1935 /* If we have no events, we just exit */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001936 if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
1937 !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
Christopher Wileye8679812015-07-01 13:36:18 -07001938 event_debug(("%s: no events registered.", __func__));
1939 retval = 1;
1940 goto done;
1941 }
1942
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001943 event_queue_make_later_events_active(base);
Christopher Wileye8679812015-07-01 13:36:18 -07001944
1945 clear_time_cache(base);
1946
1947 res = evsel->dispatch(base, tv_p);
1948
1949 if (res == -1) {
1950 event_debug(("%s: dispatch returned unsuccessfully.",
1951 __func__));
1952 retval = -1;
1953 goto done;
1954 }
1955
1956 update_time_cache(base);
1957
1958 timeout_process(base);
1959
1960 if (N_ACTIVE_CALLBACKS(base)) {
1961 int n = event_process_active(base);
1962 if ((flags & EVLOOP_ONCE)
1963 && N_ACTIVE_CALLBACKS(base) == 0
1964 && n != 0)
1965 done = 1;
1966 } else if (flags & EVLOOP_NONBLOCK)
1967 done = 1;
1968 }
1969 event_debug(("%s: asked to terminate loop.", __func__));
1970
1971done:
1972 clear_time_cache(base);
1973 base->running_loop = 0;
1974
1975 EVBASE_RELEASE_LOCK(base, th_base_lock);
1976
1977 return (retval);
1978}
1979
Christopher Wileye8679812015-07-01 13:36:18 -07001980/* One-time callback to implement event_base_once: invokes the user callback,
1981 * then deletes the allocated storage */
1982static void
1983event_once_cb(evutil_socket_t fd, short events, void *arg)
1984{
1985 struct event_once *eonce = arg;
1986
1987 (*eonce->cb)(fd, events, eonce->arg);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01001988 EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
1989 LIST_REMOVE(eonce, next_once);
1990 EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
Christopher Wileye8679812015-07-01 13:36:18 -07001991 event_debug_unassign(&eonce->ev);
1992 mm_free(eonce);
1993}
1994
1995/* not threadsafe, event scheduled once. */
1996int
1997event_once(evutil_socket_t fd, short events,
1998 void (*callback)(evutil_socket_t, short, void *),
1999 void *arg, const struct timeval *tv)
2000{
2001 return event_base_once(current_base, fd, events, callback, arg, tv);
2002}
2003
2004/* Schedules an event once */
2005int
2006event_base_once(struct event_base *base, evutil_socket_t fd, short events,
2007 void (*callback)(evutil_socket_t, short, void *),
2008 void *arg, const struct timeval *tv)
2009{
2010 struct event_once *eonce;
Christopher Wileye8679812015-07-01 13:36:18 -07002011 int res = 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002012 int activate = 0;
Christopher Wileye8679812015-07-01 13:36:18 -07002013
2014 /* We cannot support signals that just fire once, or persistent
2015 * events. */
2016 if (events & (EV_SIGNAL|EV_PERSIST))
2017 return (-1);
2018
2019 if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
2020 return (-1);
2021
2022 eonce->cb = callback;
2023 eonce->arg = arg;
2024
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002025 if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
Josh Gao83a0c9c2017-08-10 12:30:25 -07002026 evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002027
2028 if (tv == NULL || ! evutil_timerisset(tv)) {
2029 /* If the event is going to become active immediately,
2030 * don't put it on the timeout queue. This is one
2031 * idiom for scheduling a callback, so let's make
2032 * it fast (and order-preserving). */
2033 activate = 1;
2034 }
2035 } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
2036 events &= EV_READ|EV_WRITE|EV_CLOSED;
Christopher Wileye8679812015-07-01 13:36:18 -07002037
2038 event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
2039 } else {
2040 /* Bad event combination */
2041 mm_free(eonce);
2042 return (-1);
2043 }
2044
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002045 if (res == 0) {
2046 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2047 if (activate)
2048 event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
2049 else
2050 res = event_add_nolock_(&eonce->ev, tv, 0);
2051
2052 if (res != 0) {
2053 mm_free(eonce);
2054 return (res);
2055 } else {
2056 LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
2057 }
2058 EVBASE_RELEASE_LOCK(base, th_base_lock);
Christopher Wileye8679812015-07-01 13:36:18 -07002059 }
2060
2061 return (0);
2062}
2063
2064int
2065event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
2066{
2067 if (!base)
2068 base = current_base;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002069 if (arg == &event_self_cbarg_ptr_)
2070 arg = ev;
Christopher Wileye8679812015-07-01 13:36:18 -07002071
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002072 event_debug_assert_not_added_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002073
2074 ev->ev_base = base;
2075
2076 ev->ev_callback = callback;
2077 ev->ev_arg = arg;
2078 ev->ev_fd = fd;
2079 ev->ev_events = events;
2080 ev->ev_res = 0;
2081 ev->ev_flags = EVLIST_INIT;
2082 ev->ev_ncalls = 0;
2083 ev->ev_pncalls = NULL;
2084
2085 if (events & EV_SIGNAL) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002086 if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
Christopher Wileye8679812015-07-01 13:36:18 -07002087 event_warnx("%s: EV_SIGNAL is not compatible with "
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002088 "EV_READ, EV_WRITE or EV_CLOSED", __func__);
Christopher Wileye8679812015-07-01 13:36:18 -07002089 return -1;
2090 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002091 ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
Christopher Wileye8679812015-07-01 13:36:18 -07002092 } else {
2093 if (events & EV_PERSIST) {
2094 evutil_timerclear(&ev->ev_io_timeout);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002095 ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
Christopher Wileye8679812015-07-01 13:36:18 -07002096 } else {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002097 ev->ev_closure = EV_CLOSURE_EVENT;
Christopher Wileye8679812015-07-01 13:36:18 -07002098 }
2099 }
2100
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002101 min_heap_elem_init_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002102
2103 if (base != NULL) {
2104 /* by default, we put new events into the middle priority */
2105 ev->ev_pri = base->nactivequeues / 2;
2106 }
2107
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002108 event_debug_note_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002109
2110 return 0;
2111}
2112
2113int
2114event_base_set(struct event_base *base, struct event *ev)
2115{
2116 /* Only innocent events may be assigned to a different base */
2117 if (ev->ev_flags != EVLIST_INIT)
2118 return (-1);
2119
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002120 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002121
2122 ev->ev_base = base;
2123 ev->ev_pri = base->nactivequeues/2;
2124
2125 return (0);
2126}
2127
2128void
2129event_set(struct event *ev, evutil_socket_t fd, short events,
2130 void (*callback)(evutil_socket_t, short, void *), void *arg)
2131{
2132 int r;
2133 r = event_assign(ev, current_base, fd, events, callback, arg);
2134 EVUTIL_ASSERT(r == 0);
2135}
2136
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002137void *
2138event_self_cbarg(void)
2139{
2140 return &event_self_cbarg_ptr_;
2141}
2142
2143struct event *
2144event_base_get_running_event(struct event_base *base)
2145{
2146 struct event *ev = NULL;
2147 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2148 if (EVBASE_IN_THREAD(base)) {
2149 struct event_callback *evcb = base->current_event;
2150 if (evcb->evcb_flags & EVLIST_INIT)
2151 ev = event_callback_to_event(evcb);
2152 }
2153 EVBASE_RELEASE_LOCK(base, th_base_lock);
2154 return ev;
2155}
2156
Christopher Wileye8679812015-07-01 13:36:18 -07002157struct event *
2158event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
2159{
2160 struct event *ev;
2161 ev = mm_malloc(sizeof(struct event));
2162 if (ev == NULL)
2163 return (NULL);
2164 if (event_assign(ev, base, fd, events, cb, arg) < 0) {
2165 mm_free(ev);
2166 return (NULL);
2167 }
2168
2169 return (ev);
2170}
2171
2172void
2173event_free(struct event *ev)
2174{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002175 /* This is disabled, so that events which have been finalized be a
2176 * valid target for event_free(). That's */
2177 // event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002178
2179 /* make sure that this event won't be coming back to haunt us. */
2180 event_del(ev);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002181 event_debug_note_teardown_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002182 mm_free(ev);
2183
2184}
2185
2186void
2187event_debug_unassign(struct event *ev)
2188{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002189 event_debug_assert_not_added_(ev);
2190 event_debug_note_teardown_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002191
2192 ev->ev_flags &= ~EVLIST_INIT;
2193}
2194
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002195#define EVENT_FINALIZE_FREE_ 0x10000
2196static int
2197event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2198{
2199 ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
2200 EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
2201
2202 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2203 ev->ev_closure = closure;
2204 ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
2205 event_active_nolock_(ev, EV_FINALIZE, 1);
2206 ev->ev_flags |= EVLIST_FINALIZING;
2207 return 0;
2208}
2209
2210static int
2211event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2212{
2213 int r;
2214 struct event_base *base = ev->ev_base;
2215 if (EVUTIL_FAILURE_CHECK(!base)) {
2216 event_warnx("%s: event has no event_base set.", __func__);
2217 return -1;
2218 }
2219
2220 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2221 r = event_finalize_nolock_(base, flags, ev, cb);
2222 EVBASE_RELEASE_LOCK(base, th_base_lock);
2223 return r;
2224}
2225
2226int
2227event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2228{
2229 return event_finalize_impl_(flags, ev, cb);
2230}
2231
2232int
2233event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
2234{
2235 return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
2236}
2237
2238void
2239event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2240{
2241 struct event *ev = NULL;
2242 if (evcb->evcb_flags & EVLIST_INIT) {
2243 ev = event_callback_to_event(evcb);
2244 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
2245 } else {
2246 event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
2247 }
2248
2249 evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
2250 evcb->evcb_cb_union.evcb_cbfinalize = cb;
2251 event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
2252 evcb->evcb_flags |= EVLIST_FINALIZING;
2253}
2254
2255void
2256event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
2257{
2258 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2259 event_callback_finalize_nolock_(base, flags, evcb, cb);
2260 EVBASE_RELEASE_LOCK(base, th_base_lock);
2261}
2262
2263/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided
2264 * callback will be invoked on *one of them*, after they have *all* been
2265 * finalized. */
2266int
2267event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
2268{
2269 int n_pending = 0, i;
2270
2271 if (base == NULL)
2272 base = current_base;
2273
2274 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2275
2276 event_debug(("%s: %d events finalizing", __func__, n_cbs));
2277
2278 /* At most one can be currently executing; the rest we just
2279 * cancel... But we always make sure that the finalize callback
2280 * runs. */
2281 for (i = 0; i < n_cbs; ++i) {
2282 struct event_callback *evcb = evcbs[i];
2283 if (evcb == base->current_event) {
2284 event_callback_finalize_nolock_(base, 0, evcb, cb);
2285 ++n_pending;
2286 } else {
2287 event_callback_cancel_nolock_(base, evcb, 0);
2288 }
2289 }
2290
2291 if (n_pending == 0) {
2292 /* Just do the first one. */
2293 event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
2294 }
2295
2296 EVBASE_RELEASE_LOCK(base, th_base_lock);
2297 return 0;
2298}
2299
Christopher Wileye8679812015-07-01 13:36:18 -07002300/*
2301 * Set's the priority of an event - if an event is already scheduled
2302 * changing the priority is going to fail.
2303 */
2304
2305int
2306event_priority_set(struct event *ev, int pri)
2307{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002308 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002309
2310 if (ev->ev_flags & EVLIST_ACTIVE)
2311 return (-1);
2312 if (pri < 0 || pri >= ev->ev_base->nactivequeues)
2313 return (-1);
2314
2315 ev->ev_pri = pri;
2316
2317 return (0);
2318}
2319
2320/*
2321 * Checks if a specific event is pending or scheduled.
2322 */
2323
2324int
2325event_pending(const struct event *ev, short event, struct timeval *tv)
2326{
2327 int flags = 0;
2328
2329 if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
2330 event_warnx("%s: event has no event_base set.", __func__);
2331 return 0;
2332 }
2333
2334 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002335 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002336
2337 if (ev->ev_flags & EVLIST_INSERTED)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002338 flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
2339 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
Christopher Wileye8679812015-07-01 13:36:18 -07002340 flags |= ev->ev_res;
2341 if (ev->ev_flags & EVLIST_TIMEOUT)
2342 flags |= EV_TIMEOUT;
2343
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002344 event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
Christopher Wileye8679812015-07-01 13:36:18 -07002345
2346 /* See if there is a timeout that we should report */
2347 if (tv != NULL && (flags & event & EV_TIMEOUT)) {
2348 struct timeval tmp = ev->ev_timeout;
2349 tmp.tv_usec &= MICROSECONDS_MASK;
Christopher Wileye8679812015-07-01 13:36:18 -07002350 /* correctly remamp to real time */
2351 evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
Christopher Wileye8679812015-07-01 13:36:18 -07002352 }
2353
2354 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2355
2356 return (flags & event);
2357}
2358
2359int
2360event_initialized(const struct event *ev)
2361{
2362 if (!(ev->ev_flags & EVLIST_INIT))
2363 return 0;
2364
2365 return 1;
2366}
2367
2368void
2369event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
2370{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002371 event_debug_assert_is_setup_(event);
Christopher Wileye8679812015-07-01 13:36:18 -07002372
2373 if (base_out)
2374 *base_out = event->ev_base;
2375 if (fd_out)
2376 *fd_out = event->ev_fd;
2377 if (events_out)
2378 *events_out = event->ev_events;
2379 if (callback_out)
2380 *callback_out = event->ev_callback;
2381 if (arg_out)
2382 *arg_out = event->ev_arg;
2383}
2384
2385size_t
2386event_get_struct_event_size(void)
2387{
2388 return sizeof(struct event);
2389}
2390
2391evutil_socket_t
2392event_get_fd(const struct event *ev)
2393{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002394 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002395 return ev->ev_fd;
2396}
2397
2398struct event_base *
2399event_get_base(const struct event *ev)
2400{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002401 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002402 return ev->ev_base;
2403}
2404
2405short
2406event_get_events(const struct event *ev)
2407{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002408 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002409 return ev->ev_events;
2410}
2411
2412event_callback_fn
2413event_get_callback(const struct event *ev)
2414{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002415 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002416 return ev->ev_callback;
2417}
2418
2419void *
2420event_get_callback_arg(const struct event *ev)
2421{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002422 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002423 return ev->ev_arg;
2424}
2425
2426int
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002427event_get_priority(const struct event *ev)
2428{
2429 event_debug_assert_is_setup_(ev);
2430 return ev->ev_pri;
2431}
2432
2433int
Christopher Wileye8679812015-07-01 13:36:18 -07002434event_add(struct event *ev, const struct timeval *tv)
2435{
2436 int res;
2437
2438 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2439 event_warnx("%s: event has no event_base set.", __func__);
2440 return -1;
2441 }
2442
2443 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2444
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002445 res = event_add_nolock_(ev, tv, 0);
Christopher Wileye8679812015-07-01 13:36:18 -07002446
2447 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2448
2449 return (res);
2450}
2451
2452/* Helper callback: wake an event_base from another thread. This version
2453 * works by writing a byte to one end of a socketpair, so that the event_base
2454 * listening on the other end will wake up as the corresponding event
2455 * triggers */
2456static int
2457evthread_notify_base_default(struct event_base *base)
2458{
2459 char buf[1];
2460 int r;
2461 buf[0] = (char) 0;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002462#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -07002463 r = send(base->th_notify_fd[1], buf, 1, 0);
2464#else
2465 r = write(base->th_notify_fd[1], buf, 1);
2466#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002467 return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
Christopher Wileye8679812015-07-01 13:36:18 -07002468}
2469
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002470#ifdef EVENT__HAVE_EVENTFD
Christopher Wileye8679812015-07-01 13:36:18 -07002471/* Helper callback: wake an event_base from another thread. This version
2472 * assumes that you have a working eventfd() implementation. */
2473static int
2474evthread_notify_base_eventfd(struct event_base *base)
2475{
2476 ev_uint64_t msg = 1;
2477 int r;
2478 do {
2479 r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
2480 } while (r < 0 && errno == EAGAIN);
2481
2482 return (r < 0) ? -1 : 0;
2483}
2484#endif
2485
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002486
Christopher Wileye8679812015-07-01 13:36:18 -07002487/** Tell the thread currently running the event_loop for base (if any) that it
2488 * needs to stop waiting in its dispatch function (if it is) and process all
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002489 * active callbacks. */
Christopher Wileye8679812015-07-01 13:36:18 -07002490static int
2491evthread_notify_base(struct event_base *base)
2492{
2493 EVENT_BASE_ASSERT_LOCKED(base);
2494 if (!base->th_notify_fn)
2495 return -1;
2496 if (base->is_notify_pending)
2497 return 0;
2498 base->is_notify_pending = 1;
2499 return base->th_notify_fn(base);
2500}
2501
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002502/* Implementation function to remove a timeout on a currently pending event.
2503 */
2504int
2505event_remove_timer_nolock_(struct event *ev)
2506{
2507 struct event_base *base = ev->ev_base;
2508
2509 EVENT_BASE_ASSERT_LOCKED(base);
2510 event_debug_assert_is_setup_(ev);
2511
2512 event_debug(("event_remove_timer_nolock: event: %p", ev));
2513
2514 /* If it's not pending on a timeout, we don't need to do anything. */
2515 if (ev->ev_flags & EVLIST_TIMEOUT) {
2516 event_queue_remove_timeout(base, ev);
2517 evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
2518 }
2519
2520 return (0);
2521}
2522
2523int
2524event_remove_timer(struct event *ev)
2525{
2526 int res;
2527
2528 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2529 event_warnx("%s: event has no event_base set.", __func__);
2530 return -1;
2531 }
2532
2533 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2534
2535 res = event_remove_timer_nolock_(ev);
2536
2537 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2538
2539 return (res);
2540}
2541
Christopher Wileye8679812015-07-01 13:36:18 -07002542/* Implementation function to add an event. Works just like event_add,
2543 * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set,
2544 * we treat tv as an absolute time, not as an interval to add to the current
2545 * time */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002546int
2547event_add_nolock_(struct event *ev, const struct timeval *tv,
Christopher Wileye8679812015-07-01 13:36:18 -07002548 int tv_is_absolute)
2549{
2550 struct event_base *base = ev->ev_base;
2551 int res = 0;
2552 int notify = 0;
2553
2554 EVENT_BASE_ASSERT_LOCKED(base);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002555 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002556
2557 event_debug((
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002558 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
Christopher Wileye8679812015-07-01 13:36:18 -07002559 ev,
2560 EV_SOCK_ARG(ev->ev_fd),
2561 ev->ev_events & EV_READ ? "EV_READ " : " ",
2562 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002563 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
Christopher Wileye8679812015-07-01 13:36:18 -07002564 tv ? "EV_TIMEOUT " : " ",
2565 ev->ev_callback));
2566
2567 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2568
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002569 if (ev->ev_flags & EVLIST_FINALIZING) {
2570 /* XXXX debug */
2571 return (-1);
2572 }
2573
Christopher Wileye8679812015-07-01 13:36:18 -07002574 /*
2575 * prepare for timeout insertion further below, if we get a
2576 * failure on any step, we should not change any state.
2577 */
2578 if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002579 if (min_heap_reserve_(&base->timeheap,
2580 1 + min_heap_size_(&base->timeheap)) == -1)
Christopher Wileye8679812015-07-01 13:36:18 -07002581 return (-1); /* ENOMEM == errno */
2582 }
2583
2584 /* If the main thread is currently executing a signal event's
2585 * callback, and we are not the main thread, then we want to wait
2586 * until the callback is done before we mess with the event, or else
2587 * we can race on ev_ncalls and ev_pncalls below. */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002588#ifndef EVENT__DISABLE_THREAD_SUPPORT
2589 if (base->current_event == event_to_event_callback(ev) &&
2590 (ev->ev_events & EV_SIGNAL)
Christopher Wileye8679812015-07-01 13:36:18 -07002591 && !EVBASE_IN_THREAD(base)) {
2592 ++base->current_event_waiters;
2593 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2594 }
2595#endif
2596
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002597 if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
2598 !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2599 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2600 res = evmap_io_add_(base, ev->ev_fd, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002601 else if (ev->ev_events & EV_SIGNAL)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002602 res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002603 if (res != -1)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002604 event_queue_insert_inserted(base, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002605 if (res == 1) {
2606 /* evmap says we need to notify the main thread. */
2607 notify = 1;
2608 res = 0;
2609 }
2610 }
2611
2612 /*
2613 * we should change the timeout state only if the previous event
2614 * addition succeeded.
2615 */
2616 if (res != -1 && tv != NULL) {
2617 struct timeval now;
2618 int common_timeout;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002619#ifdef USE_REINSERT_TIMEOUT
2620 int was_common;
2621 int old_timeout_idx;
2622#endif
Christopher Wileye8679812015-07-01 13:36:18 -07002623
2624 /*
2625 * for persistent timeout events, we remember the
2626 * timeout value and re-add the event.
2627 *
2628 * If tv_is_absolute, this was already set.
2629 */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002630 if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
Christopher Wileye8679812015-07-01 13:36:18 -07002631 ev->ev_io_timeout = *tv;
2632
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002633#ifndef USE_REINSERT_TIMEOUT
Christopher Wileye8679812015-07-01 13:36:18 -07002634 if (ev->ev_flags & EVLIST_TIMEOUT) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002635 event_queue_remove_timeout(base, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002636 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002637#endif
Christopher Wileye8679812015-07-01 13:36:18 -07002638
2639 /* Check if it is active due to a timeout. Rescheduling
2640 * this timeout before the callback can be executed
2641 * removes it from the active list. */
2642 if ((ev->ev_flags & EVLIST_ACTIVE) &&
2643 (ev->ev_res & EV_TIMEOUT)) {
2644 if (ev->ev_events & EV_SIGNAL) {
2645 /* See if we are just active executing
2646 * this event in a loop
2647 */
2648 if (ev->ev_ncalls && ev->ev_pncalls) {
2649 /* Abort loop */
2650 *ev->ev_pncalls = 0;
2651 }
2652 }
2653
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002654 event_queue_remove_active(base, event_to_event_callback(ev));
Christopher Wileye8679812015-07-01 13:36:18 -07002655 }
2656
2657 gettime(base, &now);
2658
2659 common_timeout = is_common_timeout(tv, base);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002660#ifdef USE_REINSERT_TIMEOUT
2661 was_common = is_common_timeout(&ev->ev_timeout, base);
2662 old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
2663#endif
2664
Christopher Wileye8679812015-07-01 13:36:18 -07002665 if (tv_is_absolute) {
2666 ev->ev_timeout = *tv;
2667 } else if (common_timeout) {
2668 struct timeval tmp = *tv;
2669 tmp.tv_usec &= MICROSECONDS_MASK;
2670 evutil_timeradd(&now, &tmp, &ev->ev_timeout);
2671 ev->ev_timeout.tv_usec |=
2672 (tv->tv_usec & ~MICROSECONDS_MASK);
2673 } else {
2674 evutil_timeradd(&now, tv, &ev->ev_timeout);
2675 }
2676
2677 event_debug((
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002678 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
2679 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
Christopher Wileye8679812015-07-01 13:36:18 -07002680
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002681#ifdef USE_REINSERT_TIMEOUT
2682 event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
2683#else
2684 event_queue_insert_timeout(base, ev);
2685#endif
2686
Christopher Wileye8679812015-07-01 13:36:18 -07002687 if (common_timeout) {
2688 struct common_timeout_list *ctl =
2689 get_common_timeout_list(base, &ev->ev_timeout);
2690 if (ev == TAILQ_FIRST(&ctl->events)) {
2691 common_timeout_schedule(ctl, &now, ev);
2692 }
2693 } else {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002694 struct event* top = NULL;
Christopher Wileye8679812015-07-01 13:36:18 -07002695 /* See if the earliest timeout is now earlier than it
2696 * was before: if so, we will need to tell the main
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002697 * thread to wake up earlier than it would otherwise.
2698 * We double check the timeout of the top element to
2699 * handle time distortions due to system suspension.
2700 */
2701 if (min_heap_elt_is_top_(ev))
2702 notify = 1;
2703 else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
2704 evutil_timercmp(&top->ev_timeout, &now, <))
Christopher Wileye8679812015-07-01 13:36:18 -07002705 notify = 1;
2706 }
2707 }
2708
2709 /* if we are not in the right thread, we need to wake up the loop */
2710 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2711 evthread_notify_base(base);
2712
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002713 event_debug_note_add_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002714
2715 return (res);
2716}
2717
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002718static int
2719event_del_(struct event *ev, int blocking)
Christopher Wileye8679812015-07-01 13:36:18 -07002720{
2721 int res;
2722
2723 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2724 event_warnx("%s: event has no event_base set.", __func__);
2725 return -1;
2726 }
2727
2728 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2729
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002730 res = event_del_nolock_(ev, blocking);
Christopher Wileye8679812015-07-01 13:36:18 -07002731
2732 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2733
2734 return (res);
2735}
2736
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002737int
2738event_del(struct event *ev)
2739{
2740 return event_del_(ev, EVENT_DEL_AUTOBLOCK);
2741}
2742
2743int
2744event_del_block(struct event *ev)
2745{
2746 return event_del_(ev, EVENT_DEL_BLOCK);
2747}
2748
2749int
2750event_del_noblock(struct event *ev)
2751{
2752 return event_del_(ev, EVENT_DEL_NOBLOCK);
2753}
2754
2755/** Helper for event_del: always called with th_base_lock held.
2756 *
2757 * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
2758 * EVEN_IF_FINALIZING} values. See those for more information.
2759 */
2760int
2761event_del_nolock_(struct event *ev, int blocking)
Christopher Wileye8679812015-07-01 13:36:18 -07002762{
2763 struct event_base *base;
2764 int res = 0, notify = 0;
2765
2766 event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
2767 ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
2768
2769 /* An event without a base has not been added */
2770 if (ev->ev_base == NULL)
2771 return (-1);
2772
2773 EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
2774
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002775 if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
2776 if (ev->ev_flags & EVLIST_FINALIZING) {
2777 /* XXXX Debug */
2778 return 0;
2779 }
2780 }
2781
Christopher Wileye8679812015-07-01 13:36:18 -07002782 /* If the main thread is currently executing this event's callback,
2783 * and we are not the main thread, then we want to wait until the
2784 * callback is done before we start removing the event. That way,
2785 * when this function returns, it will be safe to free the
2786 * user-supplied argument. */
2787 base = ev->ev_base;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002788#ifndef EVENT__DISABLE_THREAD_SUPPORT
2789 if (blocking != EVENT_DEL_NOBLOCK &&
2790 base->current_event == event_to_event_callback(ev) &&
2791 !EVBASE_IN_THREAD(base) &&
2792 (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
Christopher Wileye8679812015-07-01 13:36:18 -07002793 ++base->current_event_waiters;
2794 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2795 }
2796#endif
2797
2798 EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
2799
2800 /* See if we are just active executing this event in a loop */
2801 if (ev->ev_events & EV_SIGNAL) {
2802 if (ev->ev_ncalls && ev->ev_pncalls) {
2803 /* Abort loop */
2804 *ev->ev_pncalls = 0;
2805 }
2806 }
2807
2808 if (ev->ev_flags & EVLIST_TIMEOUT) {
2809 /* NOTE: We never need to notify the main thread because of a
2810 * deleted timeout event: all that could happen if we don't is
2811 * that the dispatch loop might wake up too early. But the
2812 * point of notifying the main thread _is_ to wake up the
2813 * dispatch loop early anyway, so we wouldn't gain anything by
2814 * doing it.
2815 */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002816 event_queue_remove_timeout(base, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002817 }
2818
2819 if (ev->ev_flags & EVLIST_ACTIVE)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002820 event_queue_remove_active(base, event_to_event_callback(ev));
2821 else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
2822 event_queue_remove_active_later(base, event_to_event_callback(ev));
Christopher Wileye8679812015-07-01 13:36:18 -07002823
2824 if (ev->ev_flags & EVLIST_INSERTED) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002825 event_queue_remove_inserted(base, ev);
2826 if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
2827 res = evmap_io_del_(base, ev->ev_fd, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002828 else
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002829 res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002830 if (res == 1) {
2831 /* evmap says we need to notify the main thread. */
2832 notify = 1;
2833 res = 0;
2834 }
2835 }
2836
2837 /* if we are not in the right thread, we need to wake up the loop */
2838 if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
2839 evthread_notify_base(base);
2840
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002841 event_debug_note_del_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002842
2843 return (res);
2844}
2845
2846void
2847event_active(struct event *ev, int res, short ncalls)
2848{
2849 if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
2850 event_warnx("%s: event has no event_base set.", __func__);
2851 return;
2852 }
2853
2854 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2855
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002856 event_debug_assert_is_setup_(ev);
Christopher Wileye8679812015-07-01 13:36:18 -07002857
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002858 event_active_nolock_(ev, res, ncalls);
Christopher Wileye8679812015-07-01 13:36:18 -07002859
2860 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2861}
2862
2863
2864void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002865event_active_nolock_(struct event *ev, int res, short ncalls)
Christopher Wileye8679812015-07-01 13:36:18 -07002866{
2867 struct event_base *base;
2868
2869 event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
2870 ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
2871
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002872 base = ev->ev_base;
2873 EVENT_BASE_ASSERT_LOCKED(base);
Josh Gao83a0c9c2017-08-10 12:30:25 -07002874
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002875 if (ev->ev_flags & EVLIST_FINALIZING) {
2876 /* XXXX debug */
Josh Gao83a0c9c2017-08-10 12:30:25 -07002877 return;
2878 }
2879
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002880 switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
2881 default:
2882 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
2883 EVUTIL_ASSERT(0);
2884 break;
2885 case EVLIST_ACTIVE:
2886 /* We get different kinds of events, add them together */
2887 ev->ev_res |= res;
2888 return;
2889 case EVLIST_ACTIVE_LATER:
2890 ev->ev_res |= res;
2891 break;
2892 case 0:
2893 ev->ev_res = res;
2894 break;
2895 }
Christopher Wileye8679812015-07-01 13:36:18 -07002896
2897 if (ev->ev_pri < base->event_running_priority)
2898 base->event_continue = 1;
2899
2900 if (ev->ev_events & EV_SIGNAL) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002901#ifndef EVENT__DISABLE_THREAD_SUPPORT
2902 if (base->current_event == event_to_event_callback(ev) &&
2903 !EVBASE_IN_THREAD(base)) {
Christopher Wileye8679812015-07-01 13:36:18 -07002904 ++base->current_event_waiters;
2905 EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
2906 }
2907#endif
2908 ev->ev_ncalls = ncalls;
2909 ev->ev_pncalls = NULL;
2910 }
2911
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002912 event_callback_activate_nolock_(base, event_to_event_callback(ev));
2913}
2914
2915void
2916event_active_later_(struct event *ev, int res)
2917{
2918 EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
2919 event_active_later_nolock_(ev, res);
2920 EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
2921}
2922
2923void
2924event_active_later_nolock_(struct event *ev, int res)
2925{
2926 struct event_base *base = ev->ev_base;
2927 EVENT_BASE_ASSERT_LOCKED(base);
2928
2929 if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2930 /* We get different kinds of events, add them together */
2931 ev->ev_res |= res;
2932 return;
2933 }
2934
2935 ev->ev_res = res;
2936
2937 event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
2938}
2939
2940int
2941event_callback_activate_(struct event_base *base,
2942 struct event_callback *evcb)
2943{
2944 int r;
2945 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
2946 r = event_callback_activate_nolock_(base, evcb);
2947 EVBASE_RELEASE_LOCK(base, th_base_lock);
2948 return r;
2949}
2950
2951int
2952event_callback_activate_nolock_(struct event_base *base,
2953 struct event_callback *evcb)
2954{
2955 int r = 1;
2956
2957 if (evcb->evcb_flags & EVLIST_FINALIZING)
2958 return 0;
2959
2960 switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
2961 default:
2962 EVUTIL_ASSERT(0);
2963 case EVLIST_ACTIVE_LATER:
2964 event_queue_remove_active_later(base, evcb);
2965 r = 0;
2966 break;
2967 case EVLIST_ACTIVE:
2968 return 0;
2969 case 0:
2970 break;
2971 }
2972
2973 event_queue_insert_active(base, evcb);
Christopher Wileye8679812015-07-01 13:36:18 -07002974
2975 if (EVBASE_NEED_NOTIFY(base))
2976 evthread_notify_base(base);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002977
2978 return r;
2979}
2980
2981int
2982event_callback_activate_later_nolock_(struct event_base *base,
2983 struct event_callback *evcb)
2984{
2985 if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
2986 return 0;
2987
2988 event_queue_insert_active_later(base, evcb);
2989 if (EVBASE_NEED_NOTIFY(base))
2990 evthread_notify_base(base);
2991 return 1;
Christopher Wileye8679812015-07-01 13:36:18 -07002992}
2993
2994void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002995event_callback_init_(struct event_base *base,
2996 struct event_callback *cb)
Christopher Wileye8679812015-07-01 13:36:18 -07002997{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01002998 memset(cb, 0, sizeof(*cb));
2999 cb->evcb_pri = base->nactivequeues - 1;
3000}
3001
3002int
3003event_callback_cancel_(struct event_base *base,
3004 struct event_callback *evcb)
3005{
3006 int r;
3007 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3008 r = event_callback_cancel_nolock_(base, evcb, 0);
3009 EVBASE_RELEASE_LOCK(base, th_base_lock);
3010 return r;
3011}
3012
3013int
3014event_callback_cancel_nolock_(struct event_base *base,
3015 struct event_callback *evcb, int even_if_finalizing)
3016{
3017 if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
3018 return 0;
3019
3020 if (evcb->evcb_flags & EVLIST_INIT)
3021 return event_del_nolock_(event_callback_to_event(evcb),
3022 even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
3023
3024 switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
3025 default:
3026 case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
3027 EVUTIL_ASSERT(0);
3028 break;
3029 case EVLIST_ACTIVE:
3030 /* We get different kinds of events, add them together */
3031 event_queue_remove_active(base, evcb);
3032 return 0;
3033 case EVLIST_ACTIVE_LATER:
3034 event_queue_remove_active_later(base, evcb);
3035 break;
3036 case 0:
3037 break;
3038 }
3039
3040 return 0;
Elliott Hughes2a572d12017-08-07 14:18:18 -07003041}
3042
Josh Gao83a0c9c2017-08-10 12:30:25 -07003043void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003044event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
Elliott Hughes2a572d12017-08-07 14:18:18 -07003045{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003046 memset(cb, 0, sizeof(*cb));
3047 cb->evcb_cb_union.evcb_selfcb = fn;
3048 cb->evcb_arg = arg;
3049 cb->evcb_pri = priority;
3050 cb->evcb_closure = EV_CLOSURE_CB_SELF;
Josh Gao83a0c9c2017-08-10 12:30:25 -07003051}
3052
3053void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003054event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
Josh Gao83a0c9c2017-08-10 12:30:25 -07003055{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003056 cb->evcb_pri = priority;
3057}
Josh Gao83a0c9c2017-08-10 12:30:25 -07003058
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003059void
3060event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
3061{
3062 if (!base)
3063 base = current_base;
3064 event_callback_cancel_(base, cb);
3065}
3066
3067#define MAX_DEFERREDS_QUEUED 32
3068int
3069event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
3070{
3071 int r = 1;
3072 if (!base)
3073 base = current_base;
3074 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3075 if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
3076 r = event_callback_activate_later_nolock_(base, cb);
3077 } else {
3078 r = event_callback_activate_nolock_(base, cb);
3079 if (r) {
3080 ++base->n_deferreds_queued;
3081 }
Josh Gao83a0c9c2017-08-10 12:30:25 -07003082 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003083 EVBASE_RELEASE_LOCK(base, th_base_lock);
3084 return r;
Christopher Wileye8679812015-07-01 13:36:18 -07003085}
3086
3087static int
3088timeout_next(struct event_base *base, struct timeval **tv_p)
3089{
3090 /* Caller must hold th_base_lock */
3091 struct timeval now;
3092 struct event *ev;
3093 struct timeval *tv = *tv_p;
3094 int res = 0;
3095
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003096 ev = min_heap_top_(&base->timeheap);
Christopher Wileye8679812015-07-01 13:36:18 -07003097
3098 if (ev == NULL) {
3099 /* if no time-based events are active wait for I/O */
3100 *tv_p = NULL;
3101 goto out;
3102 }
3103
3104 if (gettime(base, &now) == -1) {
3105 res = -1;
3106 goto out;
3107 }
3108
3109 if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
3110 evutil_timerclear(tv);
3111 goto out;
3112 }
3113
3114 evutil_timersub(&ev->ev_timeout, &now, tv);
3115
3116 EVUTIL_ASSERT(tv->tv_sec >= 0);
3117 EVUTIL_ASSERT(tv->tv_usec >= 0);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003118 event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
Christopher Wileye8679812015-07-01 13:36:18 -07003119
3120out:
3121 return (res);
3122}
3123
Christopher Wileye8679812015-07-01 13:36:18 -07003124/* Activate every event whose timeout has elapsed. */
3125static void
3126timeout_process(struct event_base *base)
3127{
3128 /* Caller must hold lock. */
3129 struct timeval now;
3130 struct event *ev;
3131
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003132 if (min_heap_empty_(&base->timeheap)) {
Christopher Wileye8679812015-07-01 13:36:18 -07003133 return;
3134 }
3135
3136 gettime(base, &now);
3137
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003138 while ((ev = min_heap_top_(&base->timeheap))) {
Christopher Wileye8679812015-07-01 13:36:18 -07003139 if (evutil_timercmp(&ev->ev_timeout, &now, >))
3140 break;
3141
3142 /* delete this event from the I/O queues */
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003143 event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
Christopher Wileye8679812015-07-01 13:36:18 -07003144
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003145 event_debug(("timeout_process: event: %p, call %p",
3146 ev, ev->ev_callback));
3147 event_active_nolock_(ev, EV_TIMEOUT, 1);
Christopher Wileye8679812015-07-01 13:36:18 -07003148 }
3149}
3150
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003151#if (EVLIST_INTERNAL >> 4) != 1
3152#error "Mismatch for value of EVLIST_INTERNAL"
3153#endif
3154
3155#ifndef MAX
3156#define MAX(a,b) (((a)>(b))?(a):(b))
3157#endif
3158
3159#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
3160
3161/* These are a fancy way to spell
3162 if (flags & EVLIST_INTERNAL)
3163 base->event_count--/++;
3164*/
3165#define DECR_EVENT_COUNT(base,flags) \
3166 ((base)->event_count -= (~((flags) >> 4) & 1))
3167#define INCR_EVENT_COUNT(base,flags) do { \
3168 ((base)->event_count += (~((flags) >> 4) & 1)); \
3169 MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \
3170} while (0)
3171
Christopher Wileye8679812015-07-01 13:36:18 -07003172static void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003173event_queue_remove_inserted(struct event_base *base, struct event *ev)
Christopher Wileye8679812015-07-01 13:36:18 -07003174{
3175 EVENT_BASE_ASSERT_LOCKED(base);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003176 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
Christopher Wileye8679812015-07-01 13:36:18 -07003177 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003178 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
3179 return;
3180 }
3181 DECR_EVENT_COUNT(base, ev->ev_flags);
3182 ev->ev_flags &= ~EVLIST_INSERTED;
3183}
3184static void
3185event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
3186{
3187 EVENT_BASE_ASSERT_LOCKED(base);
3188 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
3189 event_errx(1, "%s: %p not on queue %x", __func__,
3190 evcb, EVLIST_ACTIVE);
3191 return;
3192 }
3193 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3194 evcb->evcb_flags &= ~EVLIST_ACTIVE;
3195 base->event_count_active--;
3196
3197 TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
3198 evcb, evcb_active_next);
3199}
3200static void
3201event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
3202{
3203 EVENT_BASE_ASSERT_LOCKED(base);
3204 if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
3205 event_errx(1, "%s: %p not on queue %x", __func__,
3206 evcb, EVLIST_ACTIVE_LATER);
3207 return;
3208 }
3209 DECR_EVENT_COUNT(base, evcb->evcb_flags);
3210 evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
3211 base->event_count_active--;
3212
3213 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3214}
3215static void
3216event_queue_remove_timeout(struct event_base *base, struct event *ev)
3217{
3218 EVENT_BASE_ASSERT_LOCKED(base);
3219 if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
3220 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
3221 ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
3222 return;
3223 }
3224 DECR_EVENT_COUNT(base, ev->ev_flags);
3225 ev->ev_flags &= ~EVLIST_TIMEOUT;
3226
3227 if (is_common_timeout(&ev->ev_timeout, base)) {
3228 struct common_timeout_list *ctl =
3229 get_common_timeout_list(base, &ev->ev_timeout);
3230 TAILQ_REMOVE(&ctl->events, ev,
3231 ev_timeout_pos.ev_next_with_common_timeout);
3232 } else {
3233 min_heap_erase_(&base->timeheap, ev);
3234 }
3235}
3236
3237#ifdef USE_REINSERT_TIMEOUT
3238/* Remove and reinsert 'ev' into the timeout queue. */
3239static void
3240event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
3241 int was_common, int is_common, int old_timeout_idx)
3242{
3243 struct common_timeout_list *ctl;
3244 if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
3245 event_queue_insert_timeout(base, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07003246 return;
3247 }
3248
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003249 switch ((was_common<<1) | is_common) {
3250 case 3: /* Changing from one common timeout to another */
3251 ctl = base->common_timeout_queues[old_timeout_idx];
3252 TAILQ_REMOVE(&ctl->events, ev,
3253 ev_timeout_pos.ev_next_with_common_timeout);
3254 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3255 insert_common_timeout_inorder(ctl, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07003256 break;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003257 case 2: /* Was common; is no longer common */
3258 ctl = base->common_timeout_queues[old_timeout_idx];
3259 TAILQ_REMOVE(&ctl->events, ev,
3260 ev_timeout_pos.ev_next_with_common_timeout);
3261 min_heap_push_(&base->timeheap, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07003262 break;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003263 case 1: /* Wasn't common; has become common. */
3264 min_heap_erase_(&base->timeheap, ev);
3265 ctl = get_common_timeout_list(base, &ev->ev_timeout);
3266 insert_common_timeout_inorder(ctl, ev);
3267 break;
3268 case 0: /* was in heap; is still on heap. */
3269 min_heap_adjust_(&base->timeheap, ev);
Christopher Wileye8679812015-07-01 13:36:18 -07003270 break;
3271 default:
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003272 EVUTIL_ASSERT(0); /* unreachable */
3273 break;
Christopher Wileye8679812015-07-01 13:36:18 -07003274 }
3275}
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003276#endif
Christopher Wileye8679812015-07-01 13:36:18 -07003277
3278/* Add 'ev' to the common timeout list in 'ev'. */
3279static void
3280insert_common_timeout_inorder(struct common_timeout_list *ctl,
3281 struct event *ev)
3282{
3283 struct event *e;
3284 /* By all logic, we should just be able to append 'ev' to the end of
3285 * ctl->events, since the timeout on each 'ev' is set to {the common
3286 * timeout} + {the time when we add the event}, and so the events
3287 * should arrive in order of their timeeouts. But just in case
3288 * there's some wacky threading issue going on, we do a search from
3289 * the end of 'ev' to find the right insertion point.
3290 */
3291 TAILQ_FOREACH_REVERSE(e, &ctl->events,
3292 event_list, ev_timeout_pos.ev_next_with_common_timeout) {
3293 /* This timercmp is a little sneaky, since both ev and e have
3294 * magic values in tv_usec. Fortunately, they ought to have
3295 * the _same_ magic values in tv_usec. Let's assert for that.
3296 */
3297 EVUTIL_ASSERT(
3298 is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
3299 if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
3300 TAILQ_INSERT_AFTER(&ctl->events, e, ev,
3301 ev_timeout_pos.ev_next_with_common_timeout);
3302 return;
3303 }
3304 }
3305 TAILQ_INSERT_HEAD(&ctl->events, ev,
3306 ev_timeout_pos.ev_next_with_common_timeout);
3307}
3308
3309static void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003310event_queue_insert_inserted(struct event_base *base, struct event *ev)
Christopher Wileye8679812015-07-01 13:36:18 -07003311{
3312 EVENT_BASE_ASSERT_LOCKED(base);
3313
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003314 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
3315 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
3316 ev, EV_SOCK_ARG(ev->ev_fd));
Elliott Hughes2a572d12017-08-07 14:18:18 -07003317 return;
Christopher Wileye8679812015-07-01 13:36:18 -07003318 }
Elliott Hughes2a572d12017-08-07 14:18:18 -07003319
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003320 INCR_EVENT_COUNT(base, ev->ev_flags);
Elliott Hughes2a572d12017-08-07 14:18:18 -07003321
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003322 ev->ev_flags |= EVLIST_INSERTED;
3323}
3324
3325static void
3326event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
3327{
3328 EVENT_BASE_ASSERT_LOCKED(base);
3329
3330 if (evcb->evcb_flags & EVLIST_ACTIVE) {
3331 /* Double insertion is possible for active events */
3332 return;
Elliott Hughes2a572d12017-08-07 14:18:18 -07003333 }
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003334
3335 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3336
3337 evcb->evcb_flags |= EVLIST_ACTIVE;
3338
3339 base->event_count_active++;
3340 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3341 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3342 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
3343 evcb, evcb_active_next);
3344}
3345
3346static void
3347event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
3348{
3349 EVENT_BASE_ASSERT_LOCKED(base);
3350 if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
3351 /* Double insertion is possible */
3352 return;
3353 }
3354
3355 INCR_EVENT_COUNT(base, evcb->evcb_flags);
3356 evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
3357 base->event_count_active++;
3358 MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
3359 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3360 TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
3361}
3362
3363static void
3364event_queue_insert_timeout(struct event_base *base, struct event *ev)
3365{
3366 EVENT_BASE_ASSERT_LOCKED(base);
3367
3368 if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
3369 event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
3370 ev, EV_SOCK_ARG(ev->ev_fd));
3371 return;
3372 }
3373
3374 INCR_EVENT_COUNT(base, ev->ev_flags);
3375
3376 ev->ev_flags |= EVLIST_TIMEOUT;
3377
3378 if (is_common_timeout(&ev->ev_timeout, base)) {
3379 struct common_timeout_list *ctl =
3380 get_common_timeout_list(base, &ev->ev_timeout);
3381 insert_common_timeout_inorder(ctl, ev);
3382 } else {
3383 min_heap_push_(&base->timeheap, ev);
3384 }
3385}
3386
3387static void
3388event_queue_make_later_events_active(struct event_base *base)
3389{
3390 struct event_callback *evcb;
3391 EVENT_BASE_ASSERT_LOCKED(base);
3392
3393 while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
3394 TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
3395 evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
3396 EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
3397 TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
3398 base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
Christopher Wileye8679812015-07-01 13:36:18 -07003399 }
3400}
3401
3402/* Functions for debugging */
3403
3404const char *
3405event_get_version(void)
3406{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003407 return (EVENT__VERSION);
Christopher Wileye8679812015-07-01 13:36:18 -07003408}
3409
3410ev_uint32_t
3411event_get_version_number(void)
3412{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003413 return (EVENT__NUMERIC_VERSION);
Christopher Wileye8679812015-07-01 13:36:18 -07003414}
3415
3416/*
3417 * No thread-safe interface needed - the information should be the same
3418 * for all threads.
3419 */
3420
3421const char *
3422event_get_method(void)
3423{
3424 return (current_base->evsel->name);
3425}
3426
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003427#ifndef EVENT__DISABLE_MM_REPLACEMENT
3428static void *(*mm_malloc_fn_)(size_t sz) = NULL;
3429static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
3430static void (*mm_free_fn_)(void *p) = NULL;
Christopher Wileye8679812015-07-01 13:36:18 -07003431
3432void *
3433event_mm_malloc_(size_t sz)
3434{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003435 if (sz == 0)
3436 return NULL;
3437
3438 if (mm_malloc_fn_)
3439 return mm_malloc_fn_(sz);
Christopher Wileye8679812015-07-01 13:36:18 -07003440 else
3441 return malloc(sz);
3442}
3443
3444void *
3445event_mm_calloc_(size_t count, size_t size)
3446{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003447 if (count == 0 || size == 0)
3448 return NULL;
3449
3450 if (mm_malloc_fn_) {
Christopher Wileye8679812015-07-01 13:36:18 -07003451 size_t sz = count * size;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003452 void *p = NULL;
3453 if (count > EV_SIZE_MAX / size)
3454 goto error;
3455 p = mm_malloc_fn_(sz);
Christopher Wileye8679812015-07-01 13:36:18 -07003456 if (p)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003457 return memset(p, 0, sz);
3458 } else {
3459 void *p = calloc(count, size);
3460#ifdef _WIN32
3461 /* Windows calloc doesn't reliably set ENOMEM */
3462 if (p == NULL)
3463 goto error;
3464#endif
Christopher Wileye8679812015-07-01 13:36:18 -07003465 return p;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003466 }
3467
3468error:
3469 errno = ENOMEM;
3470 return NULL;
Christopher Wileye8679812015-07-01 13:36:18 -07003471}
3472
3473char *
3474event_mm_strdup_(const char *str)
3475{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003476 if (!str) {
3477 errno = EINVAL;
3478 return NULL;
3479 }
3480
3481 if (mm_malloc_fn_) {
Christopher Wileye8679812015-07-01 13:36:18 -07003482 size_t ln = strlen(str);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003483 void *p = NULL;
3484 if (ln == EV_SIZE_MAX)
3485 goto error;
3486 p = mm_malloc_fn_(ln+1);
Christopher Wileye8679812015-07-01 13:36:18 -07003487 if (p)
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003488 return memcpy(p, str, ln+1);
Christopher Wileye8679812015-07-01 13:36:18 -07003489 } else
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003490#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -07003491 return _strdup(str);
3492#else
3493 return strdup(str);
3494#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003495
3496error:
3497 errno = ENOMEM;
3498 return NULL;
Christopher Wileye8679812015-07-01 13:36:18 -07003499}
3500
3501void *
3502event_mm_realloc_(void *ptr, size_t sz)
3503{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003504 if (mm_realloc_fn_)
3505 return mm_realloc_fn_(ptr, sz);
Christopher Wileye8679812015-07-01 13:36:18 -07003506 else
3507 return realloc(ptr, sz);
3508}
3509
3510void
3511event_mm_free_(void *ptr)
3512{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003513 if (mm_free_fn_)
3514 mm_free_fn_(ptr);
Christopher Wileye8679812015-07-01 13:36:18 -07003515 else
3516 free(ptr);
3517}
3518
3519void
3520event_set_mem_functions(void *(*malloc_fn)(size_t sz),
3521 void *(*realloc_fn)(void *ptr, size_t sz),
3522 void (*free_fn)(void *ptr))
3523{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003524 mm_malloc_fn_ = malloc_fn;
3525 mm_realloc_fn_ = realloc_fn;
3526 mm_free_fn_ = free_fn;
Christopher Wileye8679812015-07-01 13:36:18 -07003527}
3528#endif
3529
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003530#ifdef EVENT__HAVE_EVENTFD
Christopher Wileye8679812015-07-01 13:36:18 -07003531static void
3532evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
3533{
3534 ev_uint64_t msg;
3535 ev_ssize_t r;
3536 struct event_base *base = arg;
3537
3538 r = read(fd, (void*) &msg, sizeof(msg));
3539 if (r<0 && errno != EAGAIN) {
3540 event_sock_warn(fd, "Error reading from eventfd");
3541 }
3542 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3543 base->is_notify_pending = 0;
3544 EVBASE_RELEASE_LOCK(base, th_base_lock);
3545}
3546#endif
3547
3548static void
3549evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
3550{
3551 unsigned char buf[1024];
3552 struct event_base *base = arg;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003553#ifdef _WIN32
Christopher Wileye8679812015-07-01 13:36:18 -07003554 while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
3555 ;
3556#else
3557 while (read(fd, (char*)buf, sizeof(buf)) > 0)
3558 ;
3559#endif
3560
3561 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3562 base->is_notify_pending = 0;
3563 EVBASE_RELEASE_LOCK(base, th_base_lock);
3564}
3565
3566int
3567evthread_make_base_notifiable(struct event_base *base)
3568{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003569 int r;
Christopher Wileye8679812015-07-01 13:36:18 -07003570 if (!base)
3571 return -1;
3572
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003573 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3574 r = evthread_make_base_notifiable_nolock_(base);
3575 EVBASE_RELEASE_LOCK(base, th_base_lock);
3576 return r;
3577}
Elliott Hughes2a572d12017-08-07 14:18:18 -07003578
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003579static int
3580evthread_make_base_notifiable_nolock_(struct event_base *base)
3581{
3582 void (*cb)(evutil_socket_t, short, void *);
3583 int (*notify)(struct event_base *);
3584
3585 if (base->th_notify_fn != NULL) {
3586 /* The base is already notifiable: we're doing fine. */
3587 return 0;
3588 }
3589
3590#if defined(EVENT__HAVE_WORKING_KQUEUE)
3591 if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
3592 base->th_notify_fn = event_kq_notify_base_;
3593 /* No need to add an event here; the backend can wake
3594 * itself up just fine. */
3595 return 0;
3596 }
Christopher Wileye8679812015-07-01 13:36:18 -07003597#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003598
3599#ifdef EVENT__HAVE_EVENTFD
3600 base->th_notify_fd[0] = evutil_eventfd_(0,
3601 EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
Christopher Wileye8679812015-07-01 13:36:18 -07003602 if (base->th_notify_fd[0] >= 0) {
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003603 base->th_notify_fd[1] = -1;
Christopher Wileye8679812015-07-01 13:36:18 -07003604 notify = evthread_notify_base_eventfd;
3605 cb = evthread_notify_drain_eventfd;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003606 } else
Christopher Wileye8679812015-07-01 13:36:18 -07003607#endif
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003608 if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
3609 notify = evthread_notify_base_default;
3610 cb = evthread_notify_drain_default;
3611 } else {
3612 return -1;
Josh Gao83a0c9c2017-08-10 12:30:25 -07003613 }
Josh Gao83a0c9c2017-08-10 12:30:25 -07003614
Christopher Wileye8679812015-07-01 13:36:18 -07003615 base->th_notify_fn = notify;
3616
Christopher Wileye8679812015-07-01 13:36:18 -07003617 /* prepare an event that we can use for wakeup */
3618 event_assign(&base->th_notify, base, base->th_notify_fd[0],
3619 EV_READ|EV_PERSIST, cb, base);
3620
3621 /* we need to mark this as internal event */
3622 base->th_notify.ev_flags |= EVLIST_INTERNAL;
3623 event_priority_set(&base->th_notify, 0);
3624
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003625 return event_add_nolock_(&base->th_notify, NULL, 0);
Christopher Wileye8679812015-07-01 13:36:18 -07003626}
3627
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003628int
3629event_base_foreach_event_nolock_(struct event_base *base,
3630 event_base_foreach_event_cb fn, void *arg)
3631{
3632 int r, i;
3633 unsigned u;
3634 struct event *ev;
3635
3636 /* Start out with all the EVLIST_INSERTED events. */
3637 if ((r = evmap_foreach_event_(base, fn, arg)))
3638 return r;
3639
3640 /* Okay, now we deal with those events that have timeouts and are in
3641 * the min-heap. */
3642 for (u = 0; u < base->timeheap.n; ++u) {
3643 ev = base->timeheap.p[u];
3644 if (ev->ev_flags & EVLIST_INSERTED) {
3645 /* we already processed this one */
3646 continue;
3647 }
3648 if ((r = fn(base, ev, arg)))
3649 return r;
3650 }
3651
3652 /* Now for the events in one of the timeout queues.
3653 * the min-heap. */
3654 for (i = 0; i < base->n_common_timeouts; ++i) {
3655 struct common_timeout_list *ctl =
3656 base->common_timeout_queues[i];
3657 TAILQ_FOREACH(ev, &ctl->events,
3658 ev_timeout_pos.ev_next_with_common_timeout) {
3659 if (ev->ev_flags & EVLIST_INSERTED) {
3660 /* we already processed this one */
3661 continue;
3662 }
3663 if ((r = fn(base, ev, arg)))
3664 return r;
3665 }
3666 }
3667
3668 /* Finally, we deal wit all the active events that we haven't touched
3669 * yet. */
3670 for (i = 0; i < base->nactivequeues; ++i) {
3671 struct event_callback *evcb;
3672 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3673 if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
3674 /* This isn't an event (evlist_init clear), or
3675 * we already processed it. (inserted or
3676 * timeout set */
3677 continue;
3678 }
3679 ev = event_callback_to_event(evcb);
3680 if ((r = fn(base, ev, arg)))
3681 return r;
3682 }
3683 }
3684
3685 return 0;
3686}
3687
3688/* Helper for event_base_dump_events: called on each event in the event base;
3689 * dumps only the inserted events. */
3690static int
3691dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
3692{
3693 FILE *output = arg;
3694 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3695 "sig" : "fd ";
3696
3697 if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
3698 return 0;
3699
3700 fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
3701 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
3702 (e->ev_events&EV_READ)?" Read":"",
3703 (e->ev_events&EV_WRITE)?" Write":"",
3704 (e->ev_events&EV_CLOSED)?" EOF":"",
3705 (e->ev_events&EV_SIGNAL)?" Signal":"",
3706 (e->ev_events&EV_PERSIST)?" Persist":"",
3707 (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
3708 if (e->ev_flags & EVLIST_TIMEOUT) {
3709 struct timeval tv;
3710 tv.tv_sec = e->ev_timeout.tv_sec;
3711 tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
3712 evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
3713 fprintf(output, " Timeout=%ld.%06d",
3714 (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
3715 }
3716 fputc('\n', output);
3717
3718 return 0;
3719}
3720
3721/* Helper for event_base_dump_events: called on each event in the event base;
3722 * dumps only the active events. */
3723static int
3724dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
3725{
3726 FILE *output = arg;
3727 const char *gloss = (e->ev_events & EV_SIGNAL) ?
3728 "sig" : "fd ";
3729
3730 if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
3731 return 0;
3732
3733 fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
3734 (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
3735 (e->ev_res&EV_READ)?" Read":"",
3736 (e->ev_res&EV_WRITE)?" Write":"",
3737 (e->ev_res&EV_CLOSED)?" EOF":"",
3738 (e->ev_res&EV_SIGNAL)?" Signal":"",
3739 (e->ev_res&EV_TIMEOUT)?" Timeout":"",
3740 (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
3741 (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
3742
3743 return 0;
3744}
3745
3746int
3747event_base_foreach_event(struct event_base *base,
3748 event_base_foreach_event_cb fn, void *arg)
3749{
3750 int r;
3751 if ((!fn) || (!base)) {
3752 return -1;
3753 }
3754 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3755 r = event_base_foreach_event_nolock_(base, fn, arg);
3756 EVBASE_RELEASE_LOCK(base, th_base_lock);
3757 return r;
3758}
3759
3760
Christopher Wileye8679812015-07-01 13:36:18 -07003761void
3762event_base_dump_events(struct event_base *base, FILE *output)
3763{
Elliott Hughes2a572d12017-08-07 14:18:18 -07003764 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003765 fprintf(output, "Inserted events:\n");
3766 event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
3767
3768 fprintf(output, "Active events:\n");
3769 event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
Elliott Hughes2a572d12017-08-07 14:18:18 -07003770 EVBASE_RELEASE_LOCK(base, th_base_lock);
3771}
3772
3773void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003774event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
3775{
3776 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3777 evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
3778 EVBASE_RELEASE_LOCK(base, th_base_lock);
3779}
3780
3781void
3782event_base_active_by_signal(struct event_base *base, int sig)
3783{
3784 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3785 evmap_signal_active_(base, sig, 1);
3786 EVBASE_RELEASE_LOCK(base, th_base_lock);
3787}
3788
3789
3790void
3791event_base_add_virtual_(struct event_base *base)
3792{
3793 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3794 base->virtual_event_count++;
3795 MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
3796 EVBASE_RELEASE_LOCK(base, th_base_lock);
3797}
3798
3799void
3800event_base_del_virtual_(struct event_base *base)
Christopher Wileye8679812015-07-01 13:36:18 -07003801{
3802 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3803 EVUTIL_ASSERT(base->virtual_event_count > 0);
3804 base->virtual_event_count--;
3805 if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
3806 evthread_notify_base(base);
3807 EVBASE_RELEASE_LOCK(base, th_base_lock);
3808}
3809
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003810static void
3811event_free_debug_globals_locks(void)
3812{
3813#ifndef EVENT__DISABLE_THREAD_SUPPORT
3814#ifndef EVENT__DISABLE_DEBUG_MODE
3815 if (event_debug_map_lock_ != NULL) {
3816 EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
3817 event_debug_map_lock_ = NULL;
3818 evthreadimpl_disable_lock_debugging_();
3819 }
3820#endif /* EVENT__DISABLE_DEBUG_MODE */
3821#endif /* EVENT__DISABLE_THREAD_SUPPORT */
3822 return;
3823}
3824
3825static void
3826event_free_debug_globals(void)
3827{
3828 event_free_debug_globals_locks();
3829}
3830
3831static void
3832event_free_evsig_globals(void)
3833{
3834 evsig_free_globals_();
3835}
3836
3837static void
3838event_free_evutil_globals(void)
3839{
3840 evutil_free_globals_();
3841}
3842
3843static void
3844event_free_globals(void)
3845{
3846 event_free_debug_globals();
3847 event_free_evsig_globals();
3848 event_free_evutil_globals();
3849}
3850
3851void
3852libevent_global_shutdown(void)
3853{
3854 event_disable_debug_mode();
3855 event_free_globals();
3856}
3857
3858#ifndef EVENT__DISABLE_THREAD_SUPPORT
Christopher Wileye8679812015-07-01 13:36:18 -07003859int
3860event_global_setup_locks_(const int enable_locks)
3861{
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003862#ifndef EVENT__DISABLE_DEBUG_MODE
3863 EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
Christopher Wileye8679812015-07-01 13:36:18 -07003864#endif
3865 if (evsig_global_setup_locks_(enable_locks) < 0)
3866 return -1;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003867 if (evutil_global_setup_locks_(enable_locks) < 0)
3868 return -1;
Christopher Wileye8679812015-07-01 13:36:18 -07003869 if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
3870 return -1;
3871 return 0;
3872}
3873#endif
3874
3875void
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003876event_base_assert_ok_(struct event_base *base)
3877{
3878 EVBASE_ACQUIRE_LOCK(base, th_base_lock);
3879 event_base_assert_ok_nolock_(base);
3880 EVBASE_RELEASE_LOCK(base, th_base_lock);
3881}
3882
3883void
3884event_base_assert_ok_nolock_(struct event_base *base)
Christopher Wileye8679812015-07-01 13:36:18 -07003885{
3886 int i;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003887 int count;
3888
3889 /* First do checks on the per-fd and per-signal lists */
3890 evmap_check_integrity_(base);
Christopher Wileye8679812015-07-01 13:36:18 -07003891
3892 /* Check the heap property */
3893 for (i = 1; i < (int)base->timeheap.n; ++i) {
3894 int parent = (i - 1) / 2;
3895 struct event *ev, *p_ev;
3896 ev = base->timeheap.p[i];
3897 p_ev = base->timeheap.p[parent];
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003898 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
Christopher Wileye8679812015-07-01 13:36:18 -07003899 EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
3900 EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
3901 }
3902
3903 /* Check that the common timeouts are fine */
3904 for (i = 0; i < base->n_common_timeouts; ++i) {
3905 struct common_timeout_list *ctl = base->common_timeout_queues[i];
3906 struct event *last=NULL, *ev;
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003907
3908 EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
3909
Christopher Wileye8679812015-07-01 13:36:18 -07003910 TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
3911 if (last)
3912 EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003913 EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
Christopher Wileye8679812015-07-01 13:36:18 -07003914 EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
3915 EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
3916 last = ev;
3917 }
3918 }
3919
Narayan Kamathfc74cb42017-09-13 12:53:52 +01003920 /* Check the active queues. */
3921 count = 0;
3922 for (i = 0; i < base->nactivequeues; ++i) {
3923 struct event_callback *evcb;
3924 EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
3925 TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
3926 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
3927 EVUTIL_ASSERT(evcb->evcb_pri == i);
3928 ++count;
3929 }
3930 }
3931
3932 {
3933 struct event_callback *evcb;
3934 TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
3935 EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
3936 ++count;
3937 }
3938 }
3939 EVUTIL_ASSERT(count == base->event_count_active);
Christopher Wileye8679812015-07-01 13:36:18 -07003940}