Upgrade to 2.1.8-stable (2017-01-22).

Bug: N/A
Test: builds
Change-Id: Idbbdc1db3d01984a4f4b60f8fdf455140b6b7ca6
diff --git a/event.c b/event.c
index fab419a..503003e 100644
--- a/event.c
+++ b/event.c
@@ -25,34 +25,33 @@
  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #include "event2/event-config.h"
+#include "evconfig-private.h"
 
-#ifdef WIN32
+#ifdef _WIN32
 #include <winsock2.h>
 #define WIN32_LEAN_AND_MEAN
 #include <windows.h>
 #undef WIN32_LEAN_AND_MEAN
 #endif
 #include <sys/types.h>
-#if !defined(WIN32) && defined(_EVENT_HAVE_SYS_TIME_H)
+#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
 #include <sys/time.h>
 #endif
 #include <sys/queue.h>
-#ifdef _EVENT_HAVE_SYS_SOCKET_H
+#ifdef EVENT__HAVE_SYS_SOCKET_H
 #include <sys/socket.h>
 #endif
 #include <stdio.h>
 #include <stdlib.h>
-#ifdef _EVENT_HAVE_UNISTD_H
+#ifdef EVENT__HAVE_UNISTD_H
 #include <unistd.h>
 #endif
-#ifdef _EVENT_HAVE_SYS_EVENTFD_H
-#include <sys/eventfd.h>
-#endif
 #include <ctype.h>
 #include <errno.h>
 #include <signal.h>
 #include <string.h>
 #include <time.h>
+#include <limits.h>
 
 #include "event2/event.h"
 #include "event2/event_struct.h"
@@ -66,52 +65,58 @@
 #include "evmap-internal.h"
 #include "iocp-internal.h"
 #include "changelist-internal.h"
+#define HT_NO_CACHE_HASH_VALUES
 #include "ht-internal.h"
 #include "util-internal.h"
 
-#ifdef _EVENT_HAVE_EVENT_PORTS
+
+#ifdef EVENT__HAVE_WORKING_KQUEUE
+#include "kqueue-internal.h"
+#endif
+
+#ifdef EVENT__HAVE_EVENT_PORTS
 extern const struct eventop evportops;
 #endif
-#ifdef _EVENT_HAVE_SELECT
+#ifdef EVENT__HAVE_SELECT
 extern const struct eventop selectops;
 #endif
-#ifdef _EVENT_HAVE_POLL
+#ifdef EVENT__HAVE_POLL
 extern const struct eventop pollops;
 #endif
-#ifdef _EVENT_HAVE_EPOLL
+#ifdef EVENT__HAVE_EPOLL
 extern const struct eventop epollops;
 #endif
-#ifdef _EVENT_HAVE_WORKING_KQUEUE
+#ifdef EVENT__HAVE_WORKING_KQUEUE
 extern const struct eventop kqops;
 #endif
-#ifdef _EVENT_HAVE_DEVPOLL
+#ifdef EVENT__HAVE_DEVPOLL
 extern const struct eventop devpollops;
 #endif
-#ifdef WIN32
+#ifdef _WIN32
 extern const struct eventop win32ops;
 #endif
 
 /* Array of backends in order of preference. */
 static const struct eventop *eventops[] = {
-#ifdef _EVENT_HAVE_EVENT_PORTS
+#ifdef EVENT__HAVE_EVENT_PORTS
 	&evportops,
 #endif
-#ifdef _EVENT_HAVE_WORKING_KQUEUE
+#ifdef EVENT__HAVE_WORKING_KQUEUE
 	&kqops,
 #endif
-#ifdef _EVENT_HAVE_EPOLL
+#ifdef EVENT__HAVE_EPOLL
 	&epollops,
 #endif
-#ifdef _EVENT_HAVE_DEVPOLL
+#ifdef EVENT__HAVE_DEVPOLL
 	&devpollops,
 #endif
-#ifdef _EVENT_HAVE_POLL
+#ifdef EVENT__HAVE_POLL
 	&pollops,
 #endif
-#ifdef _EVENT_HAVE_SELECT
+#ifdef EVENT__HAVE_SELECT
 	&selectops,
 #endif
-#ifdef WIN32
+#ifdef _WIN32
 	&win32ops,
 #endif
 	NULL
@@ -123,29 +128,43 @@
 
 /* Global state */
 
-static int use_monotonic;
+static void *event_self_cbarg_ptr_ = NULL;
 
 /* Prototypes */
-static inline int event_add_internal(struct event *ev,
-    const struct timeval *tv, int tv_is_absolute);
-static inline int event_del_internal(struct event *ev);
+static void	event_queue_insert_active(struct event_base *, struct event_callback *);
+static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
+static void	event_queue_insert_timeout(struct event_base *, struct event *);
+static void	event_queue_insert_inserted(struct event_base *, struct event *);
+static void	event_queue_remove_active(struct event_base *, struct event_callback *);
+static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
+static void	event_queue_remove_timeout(struct event_base *, struct event *);
+static void	event_queue_remove_inserted(struct event_base *, struct event *);
+static void event_queue_make_later_events_active(struct event_base *base);
 
-static void	event_queue_insert(struct event_base *, struct event *, int);
-static void	event_queue_remove(struct event_base *, struct event *, int);
+static int evthread_make_base_notifiable_nolock_(struct event_base *base);
+static int event_del_(struct event *ev, int blocking);
+
+#ifdef USE_REINSERT_TIMEOUT
+/* This code seems buggy; only turn it on if we find out what the trouble is. */
+static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
+#endif
+
 static int	event_haveevents(struct event_base *);
 
 static int	event_process_active(struct event_base *);
 
 static int	timeout_next(struct event_base *, struct timeval **);
 static void	timeout_process(struct event_base *);
-static void	timeout_correct(struct event_base *, struct timeval *);
 
 static inline void	event_signal_closure(struct event_base *, struct event *ev);
 static inline void	event_persist_closure(struct event_base *, struct event *ev);
 
 static int	evthread_notify_base(struct event_base *base);
 
-#ifndef _EVENT_DISABLE_DEBUG_MODE
+static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
+    struct event *ev);
+
+#ifndef EVENT__DISABLE_DEBUG_MODE
 /* These functions implement a hashtable of which 'struct event *' structures
  * have been setup or added.  We don't want to trust the content of the struct
  * event itself, since we're trying to work through cases where an event gets
@@ -179,11 +198,27 @@
 	return a->ptr == b->ptr;
 }
 
-int _event_debug_mode_on = 0;
+int event_debug_mode_on_ = 0;
+
+
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
+/**
+ * @brief debug mode variable which is set for any function/structure that needs
+ *        to be shared across threads (if thread support is enabled).
+ *
+ *        When and if evthreads are initialized, this variable will be evaluated,
+ *        and if set to something other than zero, this means the evthread setup 
+ *        functions were called out of order.
+ *
+ *        See: "Locks and threading" in the documentation.
+ */
+int event_debug_created_threadable_ctx_ = 0;
+#endif
+
 /* Set if it's too late to enable event_debug_mode. */
 static int event_debug_mode_too_late = 0;
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
-static void *_event_debug_map_lock = NULL;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+static void *event_debug_map_lock_ = NULL;
 #endif
 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
 	HT_INITIALIZER();
@@ -194,11 +229,11 @@
     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
 
 /* Macro: record that ev is now setup (that is, ready for an add) */
-#define _event_debug_note_setup(ev) do {				\
-	if (_event_debug_mode_on) {					\
+#define event_debug_note_setup_(ev) do {				\
+	if (event_debug_mode_on_) {					\
 		struct event_debug_entry *dent,find;			\
 		find.ptr = (ev);					\
-		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
+		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
 		if (dent) {						\
 			dent->added = 0;				\
@@ -211,142 +246,123 @@
 			dent->added = 0;				\
 			HT_INSERT(event_debug_map, &global_debug_map, dent); \
 		}							\
-		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
+		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
 	}								\
 	event_debug_mode_too_late = 1;					\
 	} while (0)
 /* Macro: record that ev is no longer setup */
-#define _event_debug_note_teardown(ev) do {				\
-	if (_event_debug_mode_on) {					\
+#define event_debug_note_teardown_(ev) do {				\
+	if (event_debug_mode_on_) {					\
 		struct event_debug_entry *dent,find;			\
 		find.ptr = (ev);					\
-		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
+		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
 		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
 		if (dent)						\
 			mm_free(dent);					\
-		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
+		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
 	}								\
 	event_debug_mode_too_late = 1;					\
 	} while (0)
 /* Macro: record that ev is now added */
-#define _event_debug_note_add(ev)	do {				\
-	if (_event_debug_mode_on) {					\
+#define event_debug_note_add_(ev)	do {				\
+	if (event_debug_mode_on_) {					\
 		struct event_debug_entry *dent,find;			\
 		find.ptr = (ev);					\
-		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
+		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
 		if (dent) {						\
 			dent->added = 1;				\
 		} else {						\
-			event_errx(_EVENT_ERR_ABORT,			\
+			event_errx(EVENT_ERR_ABORT_,			\
 			    "%s: noting an add on a non-setup event %p" \
 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
 			    ", flags: 0x%x)",				\
 			    __func__, (ev), (ev)->ev_events,		\
 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
 		}							\
-		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
+		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
 	}								\
 	event_debug_mode_too_late = 1;					\
 	} while (0)
 /* Macro: record that ev is no longer added */
-#define _event_debug_note_del(ev) do {					\
-	if (_event_debug_mode_on) {					\
+#define event_debug_note_del_(ev) do {					\
+	if (event_debug_mode_on_) {					\
 		struct event_debug_entry *dent,find;			\
 		find.ptr = (ev);					\
-		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
+		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
 		if (dent) {						\
 			dent->added = 0;				\
 		} else {						\
-			event_errx(_EVENT_ERR_ABORT,			\
+			event_errx(EVENT_ERR_ABORT_,			\
 			    "%s: noting a del on a non-setup event %p"	\
 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
 			    ", flags: 0x%x)",				\
 			    __func__, (ev), (ev)->ev_events,		\
 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
 		}							\
-		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
+		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
 	}								\
 	event_debug_mode_too_late = 1;					\
 	} while (0)
 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
-#define _event_debug_assert_is_setup(ev) do {				\
-	if (_event_debug_mode_on) {					\
+#define event_debug_assert_is_setup_(ev) do {				\
+	if (event_debug_mode_on_) {					\
 		struct event_debug_entry *dent,find;			\
 		find.ptr = (ev);					\
-		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
+		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
 		if (!dent) {						\
-			event_errx(_EVENT_ERR_ABORT,			\
+			event_errx(EVENT_ERR_ABORT_,			\
 			    "%s called on a non-initialized event %p"	\
 			    " (events: 0x%x, fd: "EV_SOCK_FMT\
 			    ", flags: 0x%x)",				\
 			    __func__, (ev), (ev)->ev_events,		\
 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
 		}							\
-		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
+		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
 	}								\
 	} while (0)
 /* Macro: assert that ev is not added (i.e., okay to tear down or set
  * up again) */
-#define _event_debug_assert_not_added(ev) do {				\
-	if (_event_debug_mode_on) {					\
+#define event_debug_assert_not_added_(ev) do {				\
+	if (event_debug_mode_on_) {					\
 		struct event_debug_entry *dent,find;			\
 		find.ptr = (ev);					\
-		EVLOCK_LOCK(_event_debug_map_lock, 0);			\
+		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
 		if (dent && dent->added) {				\
-			event_errx(_EVENT_ERR_ABORT,			\
+			event_errx(EVENT_ERR_ABORT_,			\
 			    "%s called on an already added event %p"	\
 			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
 			    "flags: 0x%x)",				\
 			    __func__, (ev), (ev)->ev_events,		\
 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
 		}							\
-		EVLOCK_UNLOCK(_event_debug_map_lock, 0);		\
+		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
 	}								\
 	} while (0)
 #else
-#define _event_debug_note_setup(ev) \
+#define event_debug_note_setup_(ev) \
 	((void)0)
-#define _event_debug_note_teardown(ev) \
+#define event_debug_note_teardown_(ev) \
 	((void)0)
-#define _event_debug_note_add(ev) \
+#define event_debug_note_add_(ev) \
 	((void)0)
-#define _event_debug_note_del(ev) \
+#define event_debug_note_del_(ev) \
 	((void)0)
-#define _event_debug_assert_is_setup(ev) \
+#define event_debug_assert_is_setup_(ev) \
 	((void)0)
-#define _event_debug_assert_not_added(ev) \
+#define event_debug_assert_not_added_(ev) \
 	((void)0)
 #endif
 
 #define EVENT_BASE_ASSERT_LOCKED(base)		\
 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
 
-/* The first time this function is called, it sets use_monotonic to 1
- * if we have a clock function that supports monotonic time */
-static void
-detect_monotonic(void)
-{
-#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
-	struct timespec	ts;
-	static int use_monotonic_initialized = 0;
-
-	if (use_monotonic_initialized)
-		return;
-
-	if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
-		use_monotonic = 1;
-
-	use_monotonic_initialized = 1;
-#endif
-}
-
 /* How often (in seconds) do we check for changes in wall clock time relative
  * to monotonic time?  Set this to -1 for 'never.' */
-#define CLOCK_SYNC_INTERVAL -1
+#define CLOCK_SYNC_INTERVAL 5
 
 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
  * on 'base'.  If there is a cached time, return it.  Otherwise, use
@@ -363,28 +379,19 @@
 		return (0);
 	}
 
-#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
-	if (use_monotonic) {
-		struct timespec	ts;
-
-		if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
-			return (-1);
-
-		tp->tv_sec = ts.tv_sec;
-		tp->tv_usec = ts.tv_nsec / 1000;
-		if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
-		    < ts.tv_sec) {
-			struct timeval tv;
-			evutil_gettimeofday(&tv,NULL);
-			evutil_timersub(&tv, tp, &base->tv_clock_diff);
-			base->last_updated_clock_diff = ts.tv_sec;
-		}
-
-		return (0);
+	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
+		return -1;
 	}
-#endif
 
-	return (evutil_gettimeofday(tp, NULL));
+	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
+	    < tp->tv_sec) {
+		struct timeval tv;
+		evutil_gettimeofday(&tv,NULL);
+		evutil_timersub(&tv, tp, &base->tv_clock_diff);
+		base->last_updated_clock_diff = tp->tv_sec;
+	}
+
+	return 0;
 }
 
 int
@@ -401,11 +408,7 @@
 	if (base->tv_cache.tv_sec == 0) {
 		r = evutil_gettimeofday(tv, NULL);
 	} else {
-#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
-#else
-		*tv = base->tv_cache;
-#endif
 		r = 0;
 	}
 	EVBASE_RELEASE_LOCK(base, th_base_lock);
@@ -428,6 +431,36 @@
 	    gettime(base, &base->tv_cache);
 }
 
+int
+event_base_update_cache_time(struct event_base *base)
+{
+
+	if (!base) {
+		base = current_base;
+		if (!current_base)
+			return -1;
+	}
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	if (base->running_loop)
+		update_time_cache(base);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return 0;
+}
+
+static inline struct event *
+event_callback_to_event(struct event_callback *evcb)
+{
+	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
+	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
+}
+
+static inline struct event_callback *
+event_to_event_callback(struct event *ev)
+{
+	return &ev->ev_evcallback;
+}
+
 struct event_base *
 event_init(void)
 {
@@ -481,10 +514,10 @@
 
 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
 	for (i = 8; environment[i] != '\0'; ++i)
-		environment[i] = EVUTIL_TOUPPER(environment[i]);
-	/* Note that evutil_getenv() ignores the environment entirely if
+		environment[i] = EVUTIL_TOUPPER_(environment[i]);
+	/* Note that evutil_getenv_() ignores the environment entirely if
 	 * we're setuid */
-	return (evutil_getenv(environment) != NULL);
+	return (evutil_getenv_(environment) != NULL);
 }
 
 int
@@ -494,59 +527,39 @@
 }
 
 void
-event_deferred_cb_queue_init(struct deferred_cb_queue *cb)
-{
-	memset(cb, 0, sizeof(struct deferred_cb_queue));
-	TAILQ_INIT(&cb->deferred_cb_list);
-}
-
-/** Helper for the deferred_cb queue: wake up the event base. */
-static void
-notify_base_cbq_callback(struct deferred_cb_queue *cb, void *baseptr)
-{
-	struct event_base *base = baseptr;
-	if (EVBASE_NEED_NOTIFY(base))
-		evthread_notify_base(base);
-}
-
-struct deferred_cb_queue *
-event_base_get_deferred_cb_queue(struct event_base *base)
-{
-	return base ? &base->defer_queue : NULL;
-}
-
-void
 event_enable_debug_mode(void)
 {
-#ifndef _EVENT_DISABLE_DEBUG_MODE
-	if (_event_debug_mode_on)
+#ifndef EVENT__DISABLE_DEBUG_MODE
+	if (event_debug_mode_on_)
 		event_errx(1, "%s was called twice!", __func__);
 	if (event_debug_mode_too_late)
 		event_errx(1, "%s must be called *before* creating any events "
 		    "or event_bases",__func__);
 
-	_event_debug_mode_on = 1;
+	event_debug_mode_on_ = 1;
 
 	HT_INIT(event_debug_map, &global_debug_map);
 #endif
 }
 
-#if 0
 void
 event_disable_debug_mode(void)
 {
+#ifndef EVENT__DISABLE_DEBUG_MODE
 	struct event_debug_entry **ent, *victim;
 
-	EVLOCK_LOCK(_event_debug_map_lock, 0);
+	EVLOCK_LOCK(event_debug_map_lock_, 0);
 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
 		victim = *ent;
-		ent = HT_NEXT_RMV(event_debug_map,&global_debug_map, ent);
+		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
 		mm_free(victim);
 	}
 	HT_CLEAR(event_debug_map, &global_debug_map);
-	EVLOCK_UNLOCK(_event_debug_map_lock , 0);
-}
+	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
+
+	event_debug_mode_on_  = 0;
 #endif
+}
 
 struct event_base *
 event_base_new_with_config(const struct event_config *cfg)
@@ -555,7 +568,7 @@
 	struct event_base *base;
 	int should_check_environment;
 
-#ifndef _EVENT_DISABLE_DEBUG_MODE
+#ifndef EVENT__DISABLE_DEBUG_MODE
 	event_debug_mode_too_late = 1;
 #endif
 
@@ -563,30 +576,60 @@
 		event_warn("%s: calloc", __func__);
 		return NULL;
 	}
-	detect_monotonic();
-	gettime(base, &base->event_tv);
 
-	min_heap_ctor(&base->timeheap);
-	TAILQ_INIT(&base->eventqueue);
+	if (cfg)
+		base->flags = cfg->flags;
+
+	should_check_environment =
+	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
+
+	{
+		struct timeval tmp;
+		int precise_time =
+		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
+		int flags;
+		if (should_check_environment && !precise_time) {
+			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
+			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
+		}
+		flags = precise_time ? EV_MONOT_PRECISE : 0;
+		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
+
+		gettime(base, &tmp);
+	}
+
+	min_heap_ctor_(&base->timeheap);
+
 	base->sig.ev_signal_pair[0] = -1;
 	base->sig.ev_signal_pair[1] = -1;
 	base->th_notify_fd[0] = -1;
 	base->th_notify_fd[1] = -1;
 
-	event_deferred_cb_queue_init(&base->defer_queue);
-	base->defer_queue.notify_fn = notify_base_cbq_callback;
-	base->defer_queue.notify_arg = base;
-	if (cfg)
-		base->flags = cfg->flags;
+	TAILQ_INIT(&base->active_later_queue);
 
-	evmap_io_initmap(&base->io);
-	evmap_signal_initmap(&base->sigmap);
-	event_changelist_init(&base->changelist);
+	evmap_io_initmap_(&base->io);
+	evmap_signal_initmap_(&base->sigmap);
+	event_changelist_init_(&base->changelist);
 
 	base->evbase = NULL;
 
-	should_check_environment =
-	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
+	if (cfg) {
+		memcpy(&base->max_dispatch_time,
+		    &cfg->max_dispatch_interval, sizeof(struct timeval));
+		base->limit_callbacks_after_prio =
+		    cfg->limit_callbacks_after_prio;
+	} else {
+		base->max_dispatch_time.tv_sec = -1;
+		base->limit_callbacks_after_prio = 1;
+	}
+	if (cfg && cfg->max_dispatch_callbacks >= 0) {
+		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
+	} else {
+		base->max_dispatch_callbacks = INT_MAX;
+	}
+	if (base->max_dispatch_callbacks == INT_MAX &&
+	    base->max_dispatch_time.tv_sec == -1)
+		base->limit_callbacks_after_prio = INT_MAX;
 
 	for (i = 0; eventops[i] && !base->evbase; i++) {
 		if (cfg != NULL) {
@@ -617,7 +660,7 @@
 		return NULL;
 	}
 
-	if (evutil_getenv("EVENT_SHOW_METHOD"))
+	if (evutil_getenv_("EVENT_SHOW_METHOD"))
 		event_msgx("libevent using: %s", base->evsel->name);
 
 	/* allocate a single active event queue */
@@ -628,13 +671,15 @@
 
 	/* prepare for threading */
 
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
+	event_debug_created_threadable_ctx_ = 1;
+#endif
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
 	if (EVTHREAD_LOCKING_ENABLED() &&
 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
 		int r;
-		EVTHREAD_ALLOC_LOCK(base->th_base_lock,
-		    EVTHREAD_LOCKTYPE_RECURSIVE);
-		base->defer_queue.lock = base->th_base_lock;
+		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
 		EVTHREAD_ALLOC_COND(base->current_event_cond);
 		r = evthread_make_base_notifiable(base);
 		if (r<0) {
@@ -645,21 +690,21 @@
 	}
 #endif
 
-#ifdef WIN32
+#ifdef _WIN32
 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
-		event_base_start_iocp(base, cfg->n_cpus_hint);
+		event_base_start_iocp_(base, cfg->n_cpus_hint);
 #endif
 
 	return (base);
 }
 
 int
-event_base_start_iocp(struct event_base *base, int n_cpus)
+event_base_start_iocp_(struct event_base *base, int n_cpus)
 {
-#ifdef WIN32
+#ifdef _WIN32
 	if (base->iocp)
 		return 0;
-	base->iocp = event_iocp_port_launch(n_cpus);
+	base->iocp = event_iocp_port_launch_(n_cpus);
 	if (!base->iocp) {
 		event_warnx("%s: Couldn't launch IOCP", __func__);
 		return -1;
@@ -671,21 +716,84 @@
 }
 
 void
-event_base_stop_iocp(struct event_base *base)
+event_base_stop_iocp_(struct event_base *base)
 {
-#ifdef WIN32
+#ifdef _WIN32
 	int rv;
 
 	if (!base->iocp)
 		return;
-	rv = event_iocp_shutdown(base->iocp, -1);
+	rv = event_iocp_shutdown_(base->iocp, -1);
 	EVUTIL_ASSERT(rv >= 0);
 	base->iocp = NULL;
 #endif
 }
 
-void
-event_base_free(struct event_base *base)
+static int
+event_base_cancel_single_callback_(struct event_base *base,
+    struct event_callback *evcb,
+    int run_finalizers)
+{
+	int result = 0;
+
+	if (evcb->evcb_flags & EVLIST_INIT) {
+		struct event *ev = event_callback_to_event(evcb);
+		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
+			result = 1;
+		}
+	} else {
+		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+		event_callback_cancel_nolock_(base, evcb, 1);
+		EVBASE_RELEASE_LOCK(base, th_base_lock);
+		result = 1;
+	}
+
+	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
+		switch (evcb->evcb_closure) {
+		case EV_CLOSURE_EVENT_FINALIZE:
+		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
+			struct event *ev = event_callback_to_event(evcb);
+			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
+			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
+				mm_free(ev);
+			break;
+		}
+		case EV_CLOSURE_CB_FINALIZE:
+			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
+			break;
+		default:
+			break;
+		}
+	}
+	return result;
+}
+
+static int event_base_free_queues_(struct event_base *base, int run_finalizers)
+{
+	int deleted = 0, i;
+
+	for (i = 0; i < base->nactivequeues; ++i) {
+		struct event_callback *evcb, *next;
+		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
+			next = TAILQ_NEXT(evcb, evcb_active_next);
+			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
+			evcb = next;
+		}
+	}
+
+	{
+		struct event_callback *evcb;
+		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
+			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
+		}
+	}
+
+	return deleted;
+}
+
+static void
+event_base_free_(struct event_base *base, int run_finalizers)
 {
 	int i, n_deleted=0;
 	struct event *ev;
@@ -696,9 +804,6 @@
 	 * made it with event_init and forgot to hold a reference to it. */
 	if (base == NULL && current_base)
 		base = current_base;
-	/* If we're freeing current_base, there won't be a current_base. */
-	if (base == current_base)
-		current_base = NULL;
 	/* Don't actually free NULL. */
 	if (base == NULL) {
 		event_warnx("%s: no base to free", __func__);
@@ -706,8 +811,8 @@
 	}
 	/* XXX(niels) - check for internal events first */
 
-#ifdef WIN32
-	event_base_stop_iocp(base);
+#ifdef _WIN32
+	event_base_stop_iocp_(base);
 #endif
 
 	/* threading fds if we have them */
@@ -722,15 +827,9 @@
 	}
 
 	/* Delete all non-internal events. */
-	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
-		struct event *next = TAILQ_NEXT(ev, ev_next);
-		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
-			event_del(ev);
-			++n_deleted;
-		}
-		ev = next;
-	}
-	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
+	evmap_delete_all_(base);
+
+	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
 		event_del(ev);
 		++n_deleted;
 	}
@@ -753,93 +852,136 @@
 	if (base->common_timeout_queues)
 		mm_free(base->common_timeout_queues);
 
-	for (i = 0; i < base->nactivequeues; ++i) {
-		for (ev = TAILQ_FIRST(&base->activequeues[i]); ev; ) {
-			struct event *next = TAILQ_NEXT(ev, ev_active_next);
-			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
-				event_del(ev);
-				++n_deleted;
-			}
-			ev = next;
+	for (;;) {
+		/* For finalizers we can register yet another finalizer out from
+		 * finalizer, and iff finalizer will be in active_later_queue we can
+		 * add finalizer to activequeues, and we will have events in
+		 * activequeues after this function returns, which is not what we want
+		 * (we even have an assertion for this).
+		 *
+		 * A simple case is bufferevent with underlying (i.e. filters).
+		 */
+		int i = event_base_free_queues_(base, run_finalizers);
+		if (!i) {
+			break;
 		}
+		n_deleted += i;
 	}
 
 	if (n_deleted)
 		event_debug(("%s: %d events were still set in base",
 			__func__, n_deleted));
 
+	while (LIST_FIRST(&base->once_events)) {
+		struct event_once *eonce = LIST_FIRST(&base->once_events);
+		LIST_REMOVE(eonce, next_once);
+		mm_free(eonce);
+	}
+
 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
 		base->evsel->dealloc(base);
 
 	for (i = 0; i < base->nactivequeues; ++i)
 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
 
-	EVUTIL_ASSERT(min_heap_empty(&base->timeheap));
-	min_heap_dtor(&base->timeheap);
+	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
+	min_heap_dtor_(&base->timeheap);
 
 	mm_free(base->activequeues);
 
-	EVUTIL_ASSERT(TAILQ_EMPTY(&base->eventqueue));
+	evmap_io_clear_(&base->io);
+	evmap_signal_clear_(&base->sigmap);
+	event_changelist_freemem_(&base->changelist);
 
-	evmap_io_clear(&base->io);
-	evmap_signal_clear(&base->sigmap);
-	event_changelist_freemem(&base->changelist);
-
-	EVTHREAD_FREE_LOCK(base->th_base_lock, EVTHREAD_LOCKTYPE_RECURSIVE);
+	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
 	EVTHREAD_FREE_COND(base->current_event_cond);
 
+	/* If we're freeing current_base, there won't be a current_base. */
+	if (base == current_base)
+		current_base = NULL;
 	mm_free(base);
 }
 
+void
+event_base_free_nofinalize(struct event_base *base)
+{
+	event_base_free_(base, 0);
+}
+
+void
+event_base_free(struct event_base *base)
+{
+	event_base_free_(base, 1);
+}
+
+/* Fake eventop; used to disable the backend temporarily inside event_reinit
+ * so that we can call event_del() on an event without telling the backend.
+ */
+static int
+nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
+    short events, void *fdinfo)
+{
+	return 0;
+}
+const struct eventop nil_eventop = {
+	"nil",
+	NULL, /* init: unused. */
+	NULL, /* add: unused. */
+	nil_backend_del, /* del: used, so needs to be killed. */
+	NULL, /* dispatch: unused. */
+	NULL, /* dealloc: unused. */
+	0, 0, 0
+};
+
 /* reinitialize the event base after a fork */
 int
 event_reinit(struct event_base *base)
 {
 	const struct eventop *evsel;
 	int res = 0;
-	struct event *ev;
 	int was_notifiable = 0;
+	int had_signal_added = 0;
 
 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
 
 	evsel = base->evsel;
 
-#if 0
-	/* Right now, reinit always takes effect, since even if the
-	   backend doesn't require it, the signal socketpair code does.
+	/* check if this event mechanism requires reinit on the backend */
+	if (evsel->need_reinit) {
+		/* We're going to call event_del() on our notify events (the
+		 * ones that tell about signals and wakeup events).  But we
+		 * don't actually want to tell the backend to change its
+		 * state, since it might still share some resource (a kqueue,
+		 * an epoll fd) with the parent process, and we don't want to
+		 * delete the fds from _that_ backend, we temporarily stub out
+		 * the evsel with a replacement.
+		 */
+		base->evsel = &nil_eventop;
+	}
 
-	   XXX
+	/* We need to re-create a new signal-notification fd and a new
+	 * thread-notification fd.  Otherwise, we'll still share those with
+	 * the parent process, which would make any notification sent to them
+	 * get received by one or both of the event loops, more or less at
+	 * random.
 	 */
-	/* check if this event mechanism requires reinit */
-	if (!evsel->need_reinit)
-		goto done;
-#endif
-
-	/* prevent internal delete */
 	if (base->sig.ev_signal_added) {
-		/* we cannot call event_del here because the base has
-		 * not been reinitialized yet. */
-		event_queue_remove(base, &base->sig.ev_signal,
-		    EVLIST_INSERTED);
-		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
-			event_queue_remove(base, &base->sig.ev_signal,
-			    EVLIST_ACTIVE);
-		if (base->sig.ev_signal_pair[0] != -1)
-			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
-		if (base->sig.ev_signal_pair[1] != -1)
-			EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
+		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
+		event_debug_unassign(&base->sig.ev_signal);
+		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
+		had_signal_added = 1;
 		base->sig.ev_signal_added = 0;
 	}
-	if (base->th_notify_fd[0] != -1) {
-		/* we cannot call event_del here because the base has
-		 * not been reinitialized yet. */
+	if (base->sig.ev_signal_pair[0] != -1)
+		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
+	if (base->sig.ev_signal_pair[1] != -1)
+		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
+	if (base->th_notify_fn != NULL) {
 		was_notifiable = 1;
-		event_queue_remove(base, &base->th_notify,
-		    EVLIST_INSERTED);
-		if (base->th_notify.ev_flags & EVLIST_ACTIVE)
-			event_queue_remove(base, &base->th_notify,
-			    EVLIST_ACTIVE);
-		base->sig.ev_signal_added = 0;
+		base->th_notify_fn = NULL;
+	}
+	if (base->th_notify_fd[0] != -1) {
+		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
 		if (base->th_notify_fd[1] != -1)
 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
@@ -848,45 +990,73 @@
 		event_debug_unassign(&base->th_notify);
 	}
 
-	if (base->evsel->dealloc != NULL)
-		base->evsel->dealloc(base);
-	base->evbase = evsel->init(base);
-	if (base->evbase == NULL) {
-		event_errx(1, "%s: could not reinitialize event mechanism",
-		    __func__);
-		res = -1;
-		goto done;
-	}
+	/* Replace the original evsel. */
+        base->evsel = evsel;
 
-	event_changelist_freemem(&base->changelist); /* XXX */
-	evmap_io_clear(&base->io);
-	evmap_signal_clear(&base->sigmap);
+	if (evsel->need_reinit) {
+		/* Reconstruct the backend through brute-force, so that we do
+		 * not share any structures with the parent process. For some
+		 * backends, this is necessary: epoll and kqueue, for
+		 * instance, have events associated with a kernel
+		 * structure. If didn't reinitialize, we'd share that
+		 * structure with the parent process, and any changes made by
+		 * the parent would affect our backend's behavior (and vice
+		 * versa).
+		 */
+		if (base->evsel->dealloc != NULL)
+			base->evsel->dealloc(base);
+		base->evbase = evsel->init(base);
+		if (base->evbase == NULL) {
+			event_errx(1,
+			   "%s: could not reinitialize event mechanism",
+			   __func__);
+			res = -1;
+			goto done;
+		}
 
-	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
-		if (ev->ev_events & (EV_READ|EV_WRITE)) {
-			if (ev == &base->sig.ev_signal) {
-				/* If we run into the ev_signal event, it's only
-				 * in eventqueue because some signal event was
-				 * added, which made evsig_add re-add ev_signal.
-				 * So don't double-add it. */
-				continue;
-			}
-			if (evmap_io_add(base, ev->ev_fd, ev) == -1)
-				res = -1;
-		} else if (ev->ev_events & EV_SIGNAL) {
-			if (evmap_signal_add(base, (int)ev->ev_fd, ev) == -1)
-				res = -1;
+		/* Empty out the changelist (if any): we are starting from a
+		 * blank slate. */
+		event_changelist_freemem_(&base->changelist);
+
+		/* Tell the event maps to re-inform the backend about all
+		 * pending events. This will make the signal notification
+		 * event get re-created if necessary. */
+		if (evmap_reinit_(base) < 0)
+			res = -1;
+	} else {
+		res = evsig_init_(base);
+		if (res == 0 && had_signal_added) {
+			res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
+			if (res == 0)
+				base->sig.ev_signal_added = 1;
 		}
 	}
 
+	/* If we were notifiable before, and nothing just exploded, become
+	 * notifiable again. */
 	if (was_notifiable && res == 0)
-		res = evthread_make_base_notifiable(base);
+		res = evthread_make_base_notifiable_nolock_(base);
 
 done:
 	EVBASE_RELEASE_LOCK(base, th_base_lock);
 	return (res);
 }
 
+/* Get the monotonic time for this event_base' timer */
+int
+event_gettime_monotonic(struct event_base *base, struct timeval *tv)
+{
+  int rv = -1;
+
+  if (base && tv) {
+    EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+    rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
+    EVBASE_RELEASE_LOCK(base, th_base_lock);
+  }
+
+  return rv;
+}
+
 const char **
 event_get_supported_methods(void)
 {
@@ -928,6 +1098,9 @@
 		return (NULL);
 
 	TAILQ_INIT(&cfg->entries);
+	cfg->max_dispatch_interval.tv_sec = -1;
+	cfg->max_dispatch_callbacks = INT_MAX;
+	cfg->limit_callbacks_after_prio = 1;
 
 	return (cfg);
 }
@@ -998,6 +1171,23 @@
 }
 
 int
+event_config_set_max_dispatch_interval(struct event_config *cfg,
+    const struct timeval *max_interval, int max_callbacks, int min_priority)
+{
+	if (max_interval)
+		memcpy(&cfg->max_dispatch_interval, max_interval,
+		    sizeof(struct timeval));
+	else
+		cfg->max_dispatch_interval.tv_sec = -1;
+	cfg->max_dispatch_callbacks =
+	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
+	if (min_priority < 0)
+		min_priority = 0;
+	cfg->limit_callbacks_after_prio = min_priority;
+	return (0);
+}
+
+int
 event_priority_init(int npriorities)
 {
 	return event_base_priority_init(current_base, npriorities);
@@ -1006,14 +1196,17 @@
 int
 event_base_priority_init(struct event_base *base, int npriorities)
 {
-	int i;
+	int i, r;
+	r = -1;
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
 
 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
 	    || npriorities >= EVENT_MAX_PRIORITIES)
-		return (-1);
+		goto err;
 
 	if (npriorities == base->nactivequeues)
-		return (0);
+		goto ok;
 
 	if (base->nactivequeues) {
 		mm_free(base->activequeues);
@@ -1021,11 +1214,11 @@
 	}
 
 	/* Allocate our priority queues */
-	base->activequeues = (struct event_list *)
-	  mm_calloc(npriorities, sizeof(struct event_list));
+	base->activequeues = (struct evcallback_list *)
+	  mm_calloc(npriorities, sizeof(struct evcallback_list));
 	if (base->activequeues == NULL) {
 		event_warn("%s: calloc", __func__);
-		return (-1);
+		goto err;
 	}
 	base->nactivequeues = npriorities;
 
@@ -1033,7 +1226,76 @@
 		TAILQ_INIT(&base->activequeues[i]);
 	}
 
-	return (0);
+ok:
+	r = 0;
+err:
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return (r);
+}
+
+int
+event_base_get_npriorities(struct event_base *base)
+{
+
+	int n;
+	if (base == NULL)
+		base = current_base;
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	n = base->nactivequeues;
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return (n);
+}
+
+int
+event_base_get_num_events(struct event_base *base, unsigned int type)
+{
+	int r = 0;
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+	if (type & EVENT_BASE_COUNT_ACTIVE)
+		r += base->event_count_active;
+
+	if (type & EVENT_BASE_COUNT_VIRTUAL)
+		r += base->virtual_event_count;
+
+	if (type & EVENT_BASE_COUNT_ADDED)
+		r += base->event_count;
+
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+	return r;
+}
+
+int
+event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
+{
+	int r = 0;
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+	if (type & EVENT_BASE_COUNT_ACTIVE) {
+		r += base->event_count_active_max;
+		if (clear)
+			base->event_count_active_max = 0;
+	}
+
+	if (type & EVENT_BASE_COUNT_VIRTUAL) {
+		r += base->virtual_event_count_max;
+		if (clear)
+			base->virtual_event_count_max = 0;
+	}
+
+	if (type & EVENT_BASE_COUNT_ADDED) {
+		r += base->event_count_max;
+		if (clear)
+			base->event_count_max = 0;
+	}
+
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+
+	return r;
 }
 
 /* Returns true iff we're currently watching any events. */
@@ -1147,7 +1409,7 @@
 {
 	struct timeval timeout = head->ev_timeout;
 	timeout.tv_usec &= MICROSECONDS_MASK;
-	event_add_internal(&ctl->timeout_event, &timeout, 1);
+	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
 }
 
 /* Callback: invoked when the timeout for a common timeout queue triggers.
@@ -1168,8 +1430,8 @@
 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
 			break;
-		event_del_internal(ev);
-		event_active_nolock(ev, EV_TIMEOUT, 1);
+		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+		event_active_nolock_(ev, EV_TIMEOUT, 1);
 	}
 	if (ev)
 		common_timeout_schedule(ctl, &now, ev);
@@ -1256,7 +1518,6 @@
 static inline void
 event_persist_closure(struct event_base *base, struct event *ev)
 {
-	// Define our callback, we use this to store our callback before it's executed
 	void (*evcb_callback)(evutil_socket_t, short, void *);
 
         // Other fields of *ev that must be stored before executing
@@ -1303,7 +1564,7 @@
 			evutil_timeradd(&now, &delay, &run_at);
 		}
 		run_at.tv_usec |= usec_mask;
-		event_add_internal(ev, &run_at, 1);
+		event_add_nolock_(ev, &run_at, 1);
 	}
 
 	// Save our callback before we release the lock
@@ -1324,55 +1585,103 @@
   releasing the lock as we go.  This function requires that the lock be held
   when it's invoked.  Returns -1 if we get a signal or an event_break that
   means we should stop processing any active events now.  Otherwise returns
-  the number of non-internal events that we processed.
+  the number of non-internal event_callbacks that we processed.
 */
 static int
 event_process_active_single_queue(struct event_base *base,
-    struct event_list *activeq)
+    struct evcallback_list *activeq,
+    int max_to_process, const struct timeval *endtime)
 {
-	struct event *ev;
+	struct event_callback *evcb;
 	int count = 0;
 
 	EVUTIL_ASSERT(activeq != NULL);
 
-	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
-		if (ev->ev_events & EV_PERSIST)
-			event_queue_remove(base, ev, EVLIST_ACTIVE);
-		else
-			event_del_internal(ev);
-		if (!(ev->ev_flags & EVLIST_INTERNAL))
+	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
+		struct event *ev=NULL;
+		if (evcb->evcb_flags & EVLIST_INIT) {
+			ev = event_callback_to_event(evcb);
+
+			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
+				event_queue_remove_active(base, evcb);
+			else
+				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+			event_debug((
+			    "event_process_active: event: %p, %s%s%scall %p",
+			    ev,
+			    ev->ev_res & EV_READ ? "EV_READ " : " ",
+			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
+			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
+			    ev->ev_callback));
+		} else {
+			event_queue_remove_active(base, evcb);
+			event_debug(("event_process_active: event_callback %p, "
+				"closure %d, call %p",
+				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
+		}
+
+		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
 			++count;
 
-		event_debug((
-			 "event_process_active: event: %p, %s%scall %p",
-			ev,
-			ev->ev_res & EV_READ ? "EV_READ " : " ",
-			ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
-			ev->ev_callback));
 
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
-		base->current_event = ev;
+		base->current_event = evcb;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
 		base->current_event_waiters = 0;
 #endif
 
-		switch (ev->ev_closure) {
-		case EV_CLOSURE_SIGNAL:
+		switch (evcb->evcb_closure) {
+		case EV_CLOSURE_EVENT_SIGNAL:
+			EVUTIL_ASSERT(ev != NULL);
 			event_signal_closure(base, ev);
 			break;
-		case EV_CLOSURE_PERSIST:
+		case EV_CLOSURE_EVENT_PERSIST:
+			EVUTIL_ASSERT(ev != NULL);
 			event_persist_closure(base, ev);
 			break;
-		default:
-		case EV_CLOSURE_NONE:
+		case EV_CLOSURE_EVENT: {
+			void (*evcb_callback)(evutil_socket_t, short, void *);
+			EVUTIL_ASSERT(ev != NULL);
+			evcb_callback = *ev->ev_callback;
 			EVBASE_RELEASE_LOCK(base, th_base_lock);
-			(*ev->ev_callback)(
-				ev->ev_fd, ev->ev_res, ev->ev_arg);
-			break;
+			evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
+		}
+		break;
+		case EV_CLOSURE_CB_SELF: {
+			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
+			EVBASE_RELEASE_LOCK(base, th_base_lock);
+			evcb_selfcb(evcb, evcb->evcb_arg);
+		}
+		break;
+		case EV_CLOSURE_EVENT_FINALIZE:
+		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
+			void (*evcb_evfinalize)(struct event *, void *);
+			int evcb_closure = evcb->evcb_closure;
+			EVUTIL_ASSERT(ev != NULL);
+			base->current_event = NULL;
+			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
+			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
+			EVBASE_RELEASE_LOCK(base, th_base_lock);
+			evcb_evfinalize(ev, ev->ev_arg);
+			event_debug_note_teardown_(ev);
+			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
+				mm_free(ev);
+		}
+		break;
+		case EV_CLOSURE_CB_FINALIZE: {
+			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
+			base->current_event = NULL;
+			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
+			EVBASE_RELEASE_LOCK(base, th_base_lock);
+			evcb_cbfinalize(evcb, evcb->evcb_arg);
+		}
+		break;
+		default:
+			EVUTIL_ASSERT(0);
 		}
 
 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
 		base->current_event = NULL;
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
 		if (base->current_event_waiters) {
 			base->current_event_waiters = 0;
 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
@@ -1381,6 +1690,15 @@
 
 		if (base->event_break)
 			return -1;
+		if (count >= max_to_process)
+			return count;
+		if (count && endtime) {
+			struct timeval now;
+			update_time_cache(base);
+			gettime(base, &now);
+			if (evutil_timercmp(&now, endtime, >=))
+				return count;
+		}
 		if (base->event_continue)
 			break;
 	}
@@ -1388,37 +1706,6 @@
 }
 
 /*
-   Process up to MAX_DEFERRED of the defered_cb entries in 'queue'.  If
-   *breakptr becomes set to 1, stop.  Requires that we start out holding
-   the lock on 'queue'; releases the lock around 'queue' for each deferred_cb
-   we process.
- */
-static int
-event_process_deferred_callbacks(struct deferred_cb_queue *queue, int *breakptr)
-{
-	int count = 0;
-	struct deferred_cb *cb;
-
-#define MAX_DEFERRED 16
-	while ((cb = TAILQ_FIRST(&queue->deferred_cb_list))) {
-		cb->queued = 0;
-		TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
-		--queue->active_count;
-		UNLOCK_DEFERRED_QUEUE(queue);
-
-		cb->cb(cb, cb->arg);
-
-		LOCK_DEFERRED_QUEUE(queue);
-		if (*breakptr)
-			return -1;
-		if (++count == MAX_DEFERRED)
-			break;
-	}
-#undef MAX_DEFERRED
-	return count;
-}
-
-/*
  * Active events are stored in priority queues.  Lower priorities are always
  * process before higher priorities.  Low priority events can starve high
  * priority ones.
@@ -1428,17 +1715,33 @@
 event_process_active(struct event_base *base)
 {
 	/* Caller must hold th_base_lock */
-	struct event_list *activeq = NULL;
+	struct evcallback_list *activeq = NULL;
 	int i, c = 0;
+	const struct timeval *endtime;
+	struct timeval tv;
+	const int maxcb = base->max_dispatch_callbacks;
+	const int limit_after_prio = base->limit_callbacks_after_prio;
+	if (base->max_dispatch_time.tv_sec >= 0) {
+		update_time_cache(base);
+		gettime(base, &tv);
+		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
+		endtime = &tv;
+	} else {
+		endtime = NULL;
+	}
 
 	for (i = 0; i < base->nactivequeues; ++i) {
 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
 			base->event_running_priority = i;
 			activeq = &base->activequeues[i];
-			c = event_process_active_single_queue(base, activeq);
+			if (i < limit_after_prio)
+				c = event_process_active_single_queue(base, activeq,
+				    INT_MAX, NULL);
+			else
+				c = event_process_active_single_queue(base, activeq,
+				    maxcb, endtime);
 			if (c < 0) {
-				base->event_running_priority = -1;
-				return -1;
+				goto done;
 			} else if (c > 0)
 				break; /* Processed a real event; do not
 					* consider lower-priority events */
@@ -1447,8 +1750,9 @@
 		}
 	}
 
-	event_process_deferred_callbacks(&base->defer_queue,&base->event_break);
+done:
 	base->event_running_priority = -1;
+
 	return c;
 }
 
@@ -1524,6 +1828,25 @@
 }
 
 int
+event_base_loopcontinue(struct event_base *event_base)
+{
+	int r = 0;
+	if (event_base == NULL)
+		return (-1);
+
+	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
+	event_base->event_continue = 1;
+
+	if (EVBASE_NEED_NOTIFY(event_base)) {
+		r = evthread_notify_base(event_base);
+	} else {
+		r = (0);
+	}
+	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
+	return r;
+}
+
+int
 event_base_got_break(struct event_base *event_base)
 {
 	int res;
@@ -1575,11 +1898,11 @@
 	clear_time_cache(base);
 
 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
-		evsig_set_base(base);
+		evsig_set_base_(base);
 
 	done = 0;
 
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
 	base->th_owner_id = EVTHREAD_GET_ID();
 #endif
 
@@ -1587,6 +1910,7 @@
 
 	while (!done) {
 		base->event_continue = 0;
+		base->n_deferreds_queued = 0;
 
 		/* Terminate the loop if we have been asked to */
 		if (base->event_gotterm) {
@@ -1597,8 +1921,6 @@
 			break;
 		}
 
-		timeout_correct(base, &tv);
-
 		tv_p = &tv;
 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
 			timeout_next(base, &tv_p);
@@ -1611,14 +1933,14 @@
 		}
 
 		/* If we have no events, we just exit */
-		if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
+		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
+		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
 			event_debug(("%s: no events registered.", __func__));
 			retval = 1;
 			goto done;
 		}
 
-		/* update last old time */
-		gettime(base, &base->event_tv);
+		event_queue_make_later_events_active(base);
 
 		clear_time_cache(base);
 
@@ -1655,14 +1977,6 @@
 	return (retval);
 }
 
-/* Sets up an event for processing once */
-struct event_once {
-	struct event ev;
-
-	void (*cb)(evutil_socket_t, short, void *);
-	void *arg;
-};
-
 /* One-time callback to implement event_base_once: invokes the user callback,
  * then deletes the allocated storage */
 static void
@@ -1671,6 +1985,9 @@
 	struct event_once *eonce = arg;
 
 	(*eonce->cb)(fd, events, eonce->arg);
+	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
+	LIST_REMOVE(eonce, next_once);
+	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
 	event_debug_unassign(&eonce->ev);
 	mm_free(eonce);
 }
@@ -1691,8 +2008,8 @@
     void *arg, const struct timeval *tv)
 {
 	struct event_once *eonce;
-	struct timeval etv;
 	int res = 0;
+	int activate = 0;
 
 	/* We cannot support signals that just fire once, or persistent
 	 * events. */
@@ -1705,15 +2022,18 @@
 	eonce->cb = callback;
 	eonce->arg = arg;
 
-	if (events == EV_TIMEOUT) {
-		if (tv == NULL) {
-			evutil_timerclear(&etv);
-			tv = &etv;
-		}
-
+	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
-	} else if (events & (EV_READ|EV_WRITE)) {
-		events &= EV_READ|EV_WRITE;
+
+		if (tv == NULL || ! evutil_timerisset(tv)) {
+			/* If the event is going to become active immediately,
+			 * don't put it on the timeout queue.  This is one
+			 * idiom for scheduling a callback, so let's make
+			 * it fast (and order-preserving). */
+			activate = 1;
+		}
+	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
+		events &= EV_READ|EV_WRITE|EV_CLOSED;
 
 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
 	} else {
@@ -1722,11 +2042,20 @@
 		return (-1);
 	}
 
-	if (res == 0)
-		res = event_add(&eonce->ev, tv);
-	if (res != 0) {
-		mm_free(eonce);
-		return (res);
+	if (res == 0) {
+		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+		if (activate)
+			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
+		else
+			res = event_add_nolock_(&eonce->ev, tv, 0);
+
+		if (res != 0) {
+			mm_free(eonce);
+			return (res);
+		} else {
+			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
+		}
+		EVBASE_RELEASE_LOCK(base, th_base_lock);
 	}
 
 	return (0);
@@ -1737,8 +2066,10 @@
 {
 	if (!base)
 		base = current_base;
+	if (arg == &event_self_cbarg_ptr_)
+		arg = ev;
 
-	_event_debug_assert_not_added(ev);
+	event_debug_assert_not_added_(ev);
 
 	ev->ev_base = base;
 
@@ -1752,29 +2083,29 @@
 	ev->ev_pncalls = NULL;
 
 	if (events & EV_SIGNAL) {
-		if ((events & (EV_READ|EV_WRITE)) != 0) {
+		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
 			event_warnx("%s: EV_SIGNAL is not compatible with "
-			    "EV_READ or EV_WRITE", __func__);
+			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
 			return -1;
 		}
-		ev->ev_closure = EV_CLOSURE_SIGNAL;
+		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
 	} else {
 		if (events & EV_PERSIST) {
 			evutil_timerclear(&ev->ev_io_timeout);
-			ev->ev_closure = EV_CLOSURE_PERSIST;
+			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
 		} else {
-			ev->ev_closure = EV_CLOSURE_NONE;
+			ev->ev_closure = EV_CLOSURE_EVENT;
 		}
 	}
 
-	min_heap_elem_init(ev);
+	min_heap_elem_init_(ev);
 
 	if (base != NULL) {
 		/* by default, we put new events into the middle priority */
 		ev->ev_pri = base->nactivequeues / 2;
 	}
 
-	_event_debug_note_setup(ev);
+	event_debug_note_setup_(ev);
 
 	return 0;
 }
@@ -1786,7 +2117,7 @@
 	if (ev->ev_flags != EVLIST_INIT)
 		return (-1);
 
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 
 	ev->ev_base = base;
 	ev->ev_pri = base->nactivequeues/2;
@@ -1803,6 +2134,26 @@
 	EVUTIL_ASSERT(r == 0);
 }
 
+void *
+event_self_cbarg(void)
+{
+	return &event_self_cbarg_ptr_;
+}
+
+struct event *
+event_base_get_running_event(struct event_base *base)
+{
+	struct event *ev = NULL;
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	if (EVBASE_IN_THREAD(base)) {
+		struct event_callback *evcb = base->current_event;
+		if (evcb->evcb_flags & EVLIST_INIT)
+			ev = event_callback_to_event(evcb);
+	}
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return ev;
+}
+
 struct event *
 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
 {
@@ -1821,11 +2172,13 @@
 void
 event_free(struct event *ev)
 {
-	_event_debug_assert_is_setup(ev);
+	/* This is disabled, so that events which have been finalized be a
+	 * valid target for event_free(). That's */
+	// event_debug_assert_is_setup_(ev);
 
 	/* make sure that this event won't be coming back to haunt us. */
 	event_del(ev);
-	_event_debug_note_teardown(ev);
+	event_debug_note_teardown_(ev);
 	mm_free(ev);
 
 }
@@ -1833,12 +2186,117 @@
 void
 event_debug_unassign(struct event *ev)
 {
-	_event_debug_assert_not_added(ev);
-	_event_debug_note_teardown(ev);
+	event_debug_assert_not_added_(ev);
+	event_debug_note_teardown_(ev);
 
 	ev->ev_flags &= ~EVLIST_INIT;
 }
 
+#define EVENT_FINALIZE_FREE_ 0x10000
+static int
+event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
+	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
+
+	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+	ev->ev_closure = closure;
+	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
+	event_active_nolock_(ev, EV_FINALIZE, 1);
+	ev->ev_flags |= EVLIST_FINALIZING;
+	return 0;
+}
+
+static int
+event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+	int r;
+	struct event_base *base = ev->ev_base;
+	if (EVUTIL_FAILURE_CHECK(!base)) {
+		event_warnx("%s: event has no event_base set.", __func__);
+		return -1;
+	}
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	r = event_finalize_nolock_(base, flags, ev, cb);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return r;
+}
+
+int
+event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+	return event_finalize_impl_(flags, ev, cb);
+}
+
+int
+event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
+{
+	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
+}
+
+void
+event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
+{
+	struct event *ev = NULL;
+	if (evcb->evcb_flags & EVLIST_INIT) {
+		ev = event_callback_to_event(evcb);
+		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
+	} else {
+		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
+	}
+
+	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
+	evcb->evcb_cb_union.evcb_cbfinalize = cb;
+	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
+	evcb->evcb_flags |= EVLIST_FINALIZING;
+}
+
+void
+event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
+{
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	event_callback_finalize_nolock_(base, flags, evcb, cb);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+/** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
+ * callback will be invoked on *one of them*, after they have *all* been
+ * finalized. */
+int
+event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
+{
+	int n_pending = 0, i;
+
+	if (base == NULL)
+		base = current_base;
+
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+
+	event_debug(("%s: %d events finalizing", __func__, n_cbs));
+
+	/* At most one can be currently executing; the rest we just
+	 * cancel... But we always make sure that the finalize callback
+	 * runs. */
+	for (i = 0; i < n_cbs; ++i) {
+		struct event_callback *evcb = evcbs[i];
+		if (evcb == base->current_event) {
+			event_callback_finalize_nolock_(base, 0, evcb, cb);
+			++n_pending;
+		} else {
+			event_callback_cancel_nolock_(base, evcb, 0);
+		}
+	}
+
+	if (n_pending == 0) {
+		/* Just do the first one. */
+		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
+	}
+
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return 0;
+}
+
 /*
  * Set's the priority of an event - if an event is already scheduled
  * changing the priority is going to fail.
@@ -1847,7 +2305,7 @@
 int
 event_priority_set(struct event *ev, int pri)
 {
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 
 	if (ev->ev_flags & EVLIST_ACTIVE)
 		return (-1);
@@ -1874,27 +2332,23 @@
 	}
 
 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 
 	if (ev->ev_flags & EVLIST_INSERTED)
-		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
-	if (ev->ev_flags & EVLIST_ACTIVE)
+		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
+	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
 		flags |= ev->ev_res;
 	if (ev->ev_flags & EVLIST_TIMEOUT)
 		flags |= EV_TIMEOUT;
 
-	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
+	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
 
 	/* See if there is a timeout that we should report */
 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
 		struct timeval tmp = ev->ev_timeout;
 		tmp.tv_usec &= MICROSECONDS_MASK;
-#if defined(_EVENT_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
 		/* correctly remamp to real time */
 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
-#else
-		*tv = tmp;
-#endif
 	}
 
 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
@@ -1914,7 +2368,7 @@
 void
 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
 {
-	_event_debug_assert_is_setup(event);
+	event_debug_assert_is_setup_(event);
 
 	if (base_out)
 		*base_out = event->ev_base;
@@ -1937,39 +2391,46 @@
 evutil_socket_t
 event_get_fd(const struct event *ev)
 {
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 	return ev->ev_fd;
 }
 
 struct event_base *
 event_get_base(const struct event *ev)
 {
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 	return ev->ev_base;
 }
 
 short
 event_get_events(const struct event *ev)
 {
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 	return ev->ev_events;
 }
 
 event_callback_fn
 event_get_callback(const struct event *ev)
 {
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 	return ev->ev_callback;
 }
 
 void *
 event_get_callback_arg(const struct event *ev)
 {
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 	return ev->ev_arg;
 }
 
 int
+event_get_priority(const struct event *ev)
+{
+	event_debug_assert_is_setup_(ev);
+	return ev->ev_pri;
+}
+
+int
 event_add(struct event *ev, const struct timeval *tv)
 {
 	int res;
@@ -1981,7 +2442,7 @@
 
 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
 
-	res = event_add_internal(ev, tv, 0);
+	res = event_add_nolock_(ev, tv, 0);
 
 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
 
@@ -1998,15 +2459,15 @@
 	char buf[1];
 	int r;
 	buf[0] = (char) 0;
-#ifdef WIN32
+#ifdef _WIN32
 	r = send(base->th_notify_fd[1], buf, 1, 0);
 #else
 	r = write(base->th_notify_fd[1], buf, 1);
 #endif
-	return (r < 0 && errno != EAGAIN) ? -1 : 0;
+	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
 }
 
-#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
+#ifdef EVENT__HAVE_EVENTFD
 /* Helper callback: wake an event_base from another thread.  This version
  * assumes that you have a working eventfd() implementation. */
 static int
@@ -2022,9 +2483,10 @@
 }
 #endif
 
+
 /** Tell the thread currently running the event_loop for base (if any) that it
  * needs to stop waiting in its dispatch function (if it is) and process all
- * active events and deferred callbacks (if there are any).  */
+ * active callbacks. */
 static int
 evthread_notify_base(struct event_base *base)
 {
@@ -2037,12 +2499,52 @@
 	return base->th_notify_fn(base);
 }
 
+/* Implementation function to remove a timeout on a currently pending event.
+ */
+int
+event_remove_timer_nolock_(struct event *ev)
+{
+	struct event_base *base = ev->ev_base;
+
+	EVENT_BASE_ASSERT_LOCKED(base);
+	event_debug_assert_is_setup_(ev);
+
+	event_debug(("event_remove_timer_nolock: event: %p", ev));
+
+	/* If it's not pending on a timeout, we don't need to do anything. */
+	if (ev->ev_flags & EVLIST_TIMEOUT) {
+		event_queue_remove_timeout(base, ev);
+		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
+	}
+
+	return (0);
+}
+
+int
+event_remove_timer(struct event *ev)
+{
+	int res;
+
+	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
+		event_warnx("%s: event has no event_base set.", __func__);
+		return -1;
+	}
+
+	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+
+	res = event_remove_timer_nolock_(ev);
+
+	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+
+	return (res);
+}
+
 /* Implementation function to add an event.  Works just like event_add,
  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
  * we treat tv as an absolute time, not as an interval to add to the current
  * time */
-static inline int
-event_add_internal(struct event *ev, const struct timeval *tv,
+int
+event_add_nolock_(struct event *ev, const struct timeval *tv,
     int tv_is_absolute)
 {
 	struct event_base *base = ev->ev_base;
@@ -2050,26 +2552,32 @@
 	int notify = 0;
 
 	EVENT_BASE_ASSERT_LOCKED(base);
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 
 	event_debug((
-		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%scall %p",
+		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
 		 ev,
 		 EV_SOCK_ARG(ev->ev_fd),
 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
+		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
 		 tv ? "EV_TIMEOUT " : " ",
 		 ev->ev_callback));
 
 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
 
+	if (ev->ev_flags & EVLIST_FINALIZING) {
+		/* XXXX debug */
+		return (-1);
+	}
+
 	/*
 	 * prepare for timeout insertion further below, if we get a
 	 * failure on any step, we should not change any state.
 	 */
 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
-		if (min_heap_reserve(&base->timeheap,
-			1 + min_heap_size(&base->timeheap)) == -1)
+		if (min_heap_reserve_(&base->timeheap,
+			1 + min_heap_size_(&base->timeheap)) == -1)
 			return (-1);  /* ENOMEM == errno */
 	}
 
@@ -2077,22 +2585,23 @@
 	 * callback, and we are not the main thread, then we want to wait
 	 * until the callback is done before we mess with the event, or else
 	 * we can race on ev_ncalls and ev_pncalls below. */
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
-	if (base->current_event == ev && (ev->ev_events & EV_SIGNAL)
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+	if (base->current_event == event_to_event_callback(ev) &&
+	    (ev->ev_events & EV_SIGNAL)
 	    && !EVBASE_IN_THREAD(base)) {
 		++base->current_event_waiters;
 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
 	}
 #endif
 
-	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
-	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
-		if (ev->ev_events & (EV_READ|EV_WRITE))
-			res = evmap_io_add(base, ev->ev_fd, ev);
+	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
+	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
+			res = evmap_io_add_(base, ev->ev_fd, ev);
 		else if (ev->ev_events & EV_SIGNAL)
-			res = evmap_signal_add(base, (int)ev->ev_fd, ev);
+			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
 		if (res != -1)
-			event_queue_insert(base, ev, EVLIST_INSERTED);
+			event_queue_insert_inserted(base, ev);
 		if (res == 1) {
 			/* evmap says we need to notify the main thread. */
 			notify = 1;
@@ -2107,6 +2616,10 @@
 	if (res != -1 && tv != NULL) {
 		struct timeval now;
 		int common_timeout;
+#ifdef USE_REINSERT_TIMEOUT
+		int was_common;
+		int old_timeout_idx;
+#endif
 
 		/*
 		 * for persistent timeout events, we remember the
@@ -2114,19 +2627,14 @@
 		 *
 		 * If tv_is_absolute, this was already set.
 		 */
-		if (ev->ev_closure == EV_CLOSURE_PERSIST && !tv_is_absolute)
+		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
 			ev->ev_io_timeout = *tv;
 
-		/*
-		 * we already reserved memory above for the case where we
-		 * are not replacing an existing timeout.
-		 */
+#ifndef USE_REINSERT_TIMEOUT
 		if (ev->ev_flags & EVLIST_TIMEOUT) {
-			/* XXX I believe this is needless. */
-			if (min_heap_elt_is_top(ev))
-				notify = 1;
-			event_queue_remove(base, ev, EVLIST_TIMEOUT);
+			event_queue_remove_timeout(base, ev);
 		}
+#endif
 
 		/* Check if it is active due to a timeout.  Rescheduling
 		 * this timeout before the callback can be executed
@@ -2143,12 +2651,17 @@
 				}
 			}
 
-			event_queue_remove(base, ev, EVLIST_ACTIVE);
+			event_queue_remove_active(base, event_to_event_callback(ev));
 		}
 
 		gettime(base, &now);
 
 		common_timeout = is_common_timeout(tv, base);
+#ifdef USE_REINSERT_TIMEOUT
+		was_common = is_common_timeout(&ev->ev_timeout, base);
+		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
+#endif
+
 		if (tv_is_absolute) {
 			ev->ev_timeout = *tv;
 		} else if (common_timeout) {
@@ -2162,10 +2675,15 @@
 		}
 
 		event_debug((
-			 "event_add: timeout in %d seconds, call %p",
-			 (int)tv->tv_sec, ev->ev_callback));
+			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
+			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
 
-		event_queue_insert(base, ev, EVLIST_TIMEOUT);
+#ifdef USE_REINSERT_TIMEOUT
+		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
+#else
+		event_queue_insert_timeout(base, ev);
+#endif
+
 		if (common_timeout) {
 			struct common_timeout_list *ctl =
 			    get_common_timeout_list(base, &ev->ev_timeout);
@@ -2173,11 +2691,17 @@
 				common_timeout_schedule(ctl, &now, ev);
 			}
 		} else {
+			struct event* top = NULL;
 			/* See if the earliest timeout is now earlier than it
 			 * was before: if so, we will need to tell the main
-			 * thread to wake up earlier than it would
-			 * otherwise. */
-			if (min_heap_elt_is_top(ev))
+			 * thread to wake up earlier than it would otherwise.
+			 * We double check the timeout of the top element to
+			 * handle time distortions due to system suspension.
+			 */
+			if (min_heap_elt_is_top_(ev))
+				notify = 1;
+			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
+					 evutil_timercmp(&top->ev_timeout, &now, <))
 				notify = 1;
 		}
 	}
@@ -2186,13 +2710,13 @@
 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
 		evthread_notify_base(base);
 
-	_event_debug_note_add(ev);
+	event_debug_note_add_(ev);
 
 	return (res);
 }
 
-int
-event_del(struct event *ev)
+static int
+event_del_(struct event *ev, int blocking)
 {
 	int res;
 
@@ -2203,16 +2727,38 @@
 
 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
 
-	res = event_del_internal(ev);
+	res = event_del_nolock_(ev, blocking);
 
 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
 
 	return (res);
 }
 
-/* Helper for event_del: always called with th_base_lock held. */
-static inline int
-event_del_internal(struct event *ev)
+int
+event_del(struct event *ev)
+{
+	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
+}
+
+int
+event_del_block(struct event *ev)
+{
+	return event_del_(ev, EVENT_DEL_BLOCK);
+}
+
+int
+event_del_noblock(struct event *ev)
+{
+	return event_del_(ev, EVENT_DEL_NOBLOCK);
+}
+
+/** Helper for event_del: always called with th_base_lock held.
+ *
+ * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
+ * EVEN_IF_FINALIZING} values. See those for more information.
+ */
+int
+event_del_nolock_(struct event *ev, int blocking)
 {
 	struct event_base *base;
 	int res = 0, notify = 0;
@@ -2226,14 +2772,24 @@
 
 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
 
+	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
+		if (ev->ev_flags & EVLIST_FINALIZING) {
+			/* XXXX Debug */
+			return 0;
+		}
+	}
+
 	/* If the main thread is currently executing this event's callback,
 	 * and we are not the main thread, then we want to wait until the
 	 * callback is done before we start removing the event.  That way,
 	 * when this function returns, it will be safe to free the
 	 * user-supplied argument. */
 	base = ev->ev_base;
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
-	if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+	if (blocking != EVENT_DEL_NOBLOCK &&
+	    base->current_event == event_to_event_callback(ev) &&
+	    !EVBASE_IN_THREAD(base) &&
+	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
 		++base->current_event_waiters;
 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
 	}
@@ -2257,18 +2813,20 @@
 		 * dispatch loop early anyway, so we wouldn't gain anything by
 		 * doing it.
 		 */
-		event_queue_remove(base, ev, EVLIST_TIMEOUT);
+		event_queue_remove_timeout(base, ev);
 	}
 
 	if (ev->ev_flags & EVLIST_ACTIVE)
-		event_queue_remove(base, ev, EVLIST_ACTIVE);
+		event_queue_remove_active(base, event_to_event_callback(ev));
+	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
+		event_queue_remove_active_later(base, event_to_event_callback(ev));
 
 	if (ev->ev_flags & EVLIST_INSERTED) {
-		event_queue_remove(base, ev, EVLIST_INSERTED);
-		if (ev->ev_events & (EV_READ|EV_WRITE))
-			res = evmap_io_del(base, ev->ev_fd, ev);
+		event_queue_remove_inserted(base, ev);
+		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
+			res = evmap_io_del_(base, ev->ev_fd, ev);
 		else
-			res = evmap_signal_del(base, (int)ev->ev_fd, ev);
+			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
 		if (res == 1) {
 			/* evmap says we need to notify the main thread. */
 			notify = 1;
@@ -2280,7 +2838,7 @@
 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
 		evthread_notify_base(base);
 
-	_event_debug_note_del(ev);
+	event_debug_note_del_(ev);
 
 	return (res);
 }
@@ -2295,41 +2853,54 @@
 
 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
 
-	_event_debug_assert_is_setup(ev);
+	event_debug_assert_is_setup_(ev);
 
-	event_active_nolock(ev, res, ncalls);
+	event_active_nolock_(ev, res, ncalls);
 
 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
 }
 
 
 void
-event_active_nolock(struct event *ev, int res, short ncalls)
+event_active_nolock_(struct event *ev, int res, short ncalls)
 {
 	struct event_base *base;
 
 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
 
+	base = ev->ev_base;
+	EVENT_BASE_ASSERT_LOCKED(base);
 
-	/* We get different kinds of events, add them together */
-	if (ev->ev_flags & EVLIST_ACTIVE) {
-		ev->ev_res |= res;
+	if (ev->ev_flags & EVLIST_FINALIZING) {
+		/* XXXX debug */
 		return;
 	}
 
-	base = ev->ev_base;
-
-	EVENT_BASE_ASSERT_LOCKED(base);
-
-	ev->ev_res = res;
+	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+	default:
+	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
+		EVUTIL_ASSERT(0);
+		break;
+	case EVLIST_ACTIVE:
+		/* We get different kinds of events, add them together */
+		ev->ev_res |= res;
+		return;
+	case EVLIST_ACTIVE_LATER:
+		ev->ev_res |= res;
+		break;
+	case 0:
+		ev->ev_res = res;
+		break;
+	}
 
 	if (ev->ev_pri < base->event_running_priority)
 		base->event_continue = 1;
 
 	if (ev->ev_events & EV_SIGNAL) {
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
-		if (base->current_event == ev && !EVBASE_IN_THREAD(base)) {
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+		if (base->current_event == event_to_event_callback(ev) &&
+		    !EVBASE_IN_THREAD(base)) {
 			++base->current_event_waiters;
 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
 		}
@@ -2338,60 +2909,179 @@
 		ev->ev_pncalls = NULL;
 	}
 
-	event_queue_insert(base, ev, EVLIST_ACTIVE);
+	event_callback_activate_nolock_(base, event_to_event_callback(ev));
+}
+
+void
+event_active_later_(struct event *ev, int res)
+{
+	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
+	event_active_later_nolock_(ev, res);
+	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
+}
+
+void
+event_active_later_nolock_(struct event *ev, int res)
+{
+	struct event_base *base = ev->ev_base;
+	EVENT_BASE_ASSERT_LOCKED(base);
+
+	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
+		/* We get different kinds of events, add them together */
+		ev->ev_res |= res;
+		return;
+	}
+
+	ev->ev_res = res;
+
+	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
+}
+
+int
+event_callback_activate_(struct event_base *base,
+    struct event_callback *evcb)
+{
+	int r;
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	r = event_callback_activate_nolock_(base, evcb);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return r;
+}
+
+int
+event_callback_activate_nolock_(struct event_base *base,
+    struct event_callback *evcb)
+{
+	int r = 1;
+
+	if (evcb->evcb_flags & EVLIST_FINALIZING)
+		return 0;
+
+	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
+	default:
+		EVUTIL_ASSERT(0);
+	case EVLIST_ACTIVE_LATER:
+		event_queue_remove_active_later(base, evcb);
+		r = 0;
+		break;
+	case EVLIST_ACTIVE:
+		return 0;
+	case 0:
+		break;
+	}
+
+	event_queue_insert_active(base, evcb);
 
 	if (EVBASE_NEED_NOTIFY(base))
 		evthread_notify_base(base);
+
+	return r;
+}
+
+int
+event_callback_activate_later_nolock_(struct event_base *base,
+    struct event_callback *evcb)
+{
+	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
+		return 0;
+
+	event_queue_insert_active_later(base, evcb);
+	if (EVBASE_NEED_NOTIFY(base))
+		evthread_notify_base(base);
+	return 1;
 }
 
 void
-event_deferred_cb_init(struct deferred_cb *cb, deferred_cb_fn fn, void *arg)
+event_callback_init_(struct event_base *base,
+    struct event_callback *cb)
 {
-	memset(cb, 0, sizeof(struct deferred_cb));
-	cb->cb = fn;
-	cb->arg = arg;
+	memset(cb, 0, sizeof(*cb));
+	cb->evcb_pri = base->nactivequeues - 1;
+}
+
+int
+event_callback_cancel_(struct event_base *base,
+    struct event_callback *evcb)
+{
+	int r;
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	r = event_callback_cancel_nolock_(base, evcb, 0);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return r;
+}
+
+int
+event_callback_cancel_nolock_(struct event_base *base,
+    struct event_callback *evcb, int even_if_finalizing)
+{
+	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
+		return 0;
+
+	if (evcb->evcb_flags & EVLIST_INIT)
+		return event_del_nolock_(event_callback_to_event(evcb),
+		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
+
+	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
+	default:
+	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
+		EVUTIL_ASSERT(0);
+		break;
+	case EVLIST_ACTIVE:
+		/* We get different kinds of events, add them together */
+		event_queue_remove_active(base, evcb);
+		return 0;
+	case EVLIST_ACTIVE_LATER:
+		event_queue_remove_active_later(base, evcb);
+		break;
+	case 0:
+		break;
+	}
+
+	return 0;
 }
 
 void
-event_deferred_cb_cancel(struct deferred_cb_queue *queue,
-    struct deferred_cb *cb)
+event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
 {
-	if (!queue) {
-		if (current_base)
-			queue = &current_base->defer_queue;
-		else
-			return;
-	}
-
-	LOCK_DEFERRED_QUEUE(queue);
-	if (cb->queued) {
-		TAILQ_REMOVE(&queue->deferred_cb_list, cb, cb_next);
-		--queue->active_count;
-		cb->queued = 0;
-	}
-	UNLOCK_DEFERRED_QUEUE(queue);
+	memset(cb, 0, sizeof(*cb));
+	cb->evcb_cb_union.evcb_selfcb = fn;
+	cb->evcb_arg = arg;
+	cb->evcb_pri = priority;
+	cb->evcb_closure = EV_CLOSURE_CB_SELF;
 }
 
 void
-event_deferred_cb_schedule(struct deferred_cb_queue *queue,
-    struct deferred_cb *cb)
+event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
 {
-	if (!queue) {
-		if (current_base)
-			queue = &current_base->defer_queue;
-		else
-			return;
-	}
+	cb->evcb_pri = priority;
+}
 
-	LOCK_DEFERRED_QUEUE(queue);
-	if (!cb->queued) {
-		cb->queued = 1;
-		TAILQ_INSERT_TAIL(&queue->deferred_cb_list, cb, cb_next);
-		++queue->active_count;
-		if (queue->notify_fn)
-			queue->notify_fn(queue, queue->notify_arg);
+void
+event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
+{
+	if (!base)
+		base = current_base;
+	event_callback_cancel_(base, cb);
+}
+
+#define MAX_DEFERREDS_QUEUED 32
+int
+event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
+{
+	int r = 1;
+	if (!base)
+		base = current_base;
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
+		r = event_callback_activate_later_nolock_(base, cb);
+	} else {
+		r = event_callback_activate_nolock_(base, cb);
+		if (r) {
+			++base->n_deferreds_queued;
+		}
 	}
-	UNLOCK_DEFERRED_QUEUE(queue);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return r;
 }
 
 static int
@@ -2403,7 +3093,7 @@
 	struct timeval *tv = *tv_p;
 	int res = 0;
 
-	ev = min_heap_top(&base->timeheap);
+	ev = min_heap_top_(&base->timeheap);
 
 	if (ev == NULL) {
 		/* if no time-based events are active wait for I/O */
@@ -2425,70 +3115,12 @@
 
 	EVUTIL_ASSERT(tv->tv_sec >= 0);
 	EVUTIL_ASSERT(tv->tv_usec >= 0);
-	event_debug(("timeout_next: in %d seconds", (int)tv->tv_sec));
+	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
 
 out:
 	return (res);
 }
 
-/*
- * Determines if the time is running backwards by comparing the current time
- * against the last time we checked.  Not needed when using clock monotonic.
- * If time is running backwards, we adjust the firing time of every event by
- * the amount that time seems to have jumped.
- */
-static void
-timeout_correct(struct event_base *base, struct timeval *tv)
-{
-	/* Caller must hold th_base_lock. */
-	struct event **pev;
-	unsigned int size;
-	struct timeval off;
-	int i;
-
-	if (use_monotonic)
-		return;
-
-	/* Check if time is running backwards */
-	gettime(base, tv);
-
-	if (evutil_timercmp(tv, &base->event_tv, >=)) {
-		base->event_tv = *tv;
-		return;
-	}
-
-	event_debug(("%s: time is running backwards, corrected",
-		    __func__));
-	evutil_timersub(&base->event_tv, tv, &off);
-
-	/*
-	 * We can modify the key element of the node without destroying
-	 * the minheap property, because we change every element.
-	 */
-	pev = base->timeheap.p;
-	size = base->timeheap.n;
-	for (; size-- > 0; ++pev) {
-		struct timeval *ev_tv = &(**pev).ev_timeout;
-		evutil_timersub(ev_tv, &off, ev_tv);
-	}
-	for (i=0; i<base->n_common_timeouts; ++i) {
-		struct event *ev;
-		struct common_timeout_list *ctl =
-		    base->common_timeout_queues[i];
-		TAILQ_FOREACH(ev, &ctl->events,
-		    ev_timeout_pos.ev_next_with_common_timeout) {
-			struct timeval *ev_tv = &ev->ev_timeout;
-			ev_tv->tv_usec &= MICROSECONDS_MASK;
-			evutil_timersub(ev_tv, &off, ev_tv);
-			ev_tv->tv_usec |= COMMON_TIMEOUT_MAGIC |
-			    (i<<COMMON_TIMEOUT_IDX_SHIFT);
-		}
-	}
-
-	/* Now remember what the new time turned out to be. */
-	base->event_tv = *tv;
-}
-
 /* Activate every event whose timeout has elapsed. */
 static void
 timeout_process(struct event_base *base)
@@ -2497,64 +3129,151 @@
 	struct timeval now;
 	struct event *ev;
 
-	if (min_heap_empty(&base->timeheap)) {
+	if (min_heap_empty_(&base->timeheap)) {
 		return;
 	}
 
 	gettime(base, &now);
 
-	while ((ev = min_heap_top(&base->timeheap))) {
+	while ((ev = min_heap_top_(&base->timeheap))) {
 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
 			break;
 
 		/* delete this event from the I/O queues */
-		event_del_internal(ev);
+		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
 
-		event_debug(("timeout_process: call %p",
-			 ev->ev_callback));
-		event_active_nolock(ev, EV_TIMEOUT, 1);
+		event_debug(("timeout_process: event: %p, call %p",
+			 ev, ev->ev_callback));
+		event_active_nolock_(ev, EV_TIMEOUT, 1);
 	}
 }
 
-/* Remove 'ev' from 'queue' (EVLIST_...) in base. */
+#if (EVLIST_INTERNAL >> 4) != 1
+#error "Mismatch for value of EVLIST_INTERNAL"
+#endif
+
+#ifndef MAX
+#define MAX(a,b) (((a)>(b))?(a):(b))
+#endif
+
+#define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
+
+/* These are a fancy way to spell
+     if (flags & EVLIST_INTERNAL)
+         base->event_count--/++;
+*/
+#define DECR_EVENT_COUNT(base,flags) \
+	((base)->event_count -= (~((flags) >> 4) & 1))
+#define INCR_EVENT_COUNT(base,flags) do {					\
+	((base)->event_count += (~((flags) >> 4) & 1));				\
+	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
+} while (0)
+
 static void
-event_queue_remove(struct event_base *base, struct event *ev, int queue)
+event_queue_remove_inserted(struct event_base *base, struct event *ev)
 {
 	EVENT_BASE_ASSERT_LOCKED(base);
-
-	if (!(ev->ev_flags & queue)) {
+	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
-		    ev, EV_SOCK_ARG(ev->ev_fd), queue);
+		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
+		return;
+	}
+	DECR_EVENT_COUNT(base, ev->ev_flags);
+	ev->ev_flags &= ~EVLIST_INSERTED;
+}
+static void
+event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
+{
+	EVENT_BASE_ASSERT_LOCKED(base);
+	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
+		event_errx(1, "%s: %p not on queue %x", __func__,
+			   evcb, EVLIST_ACTIVE);
+		return;
+	}
+	DECR_EVENT_COUNT(base, evcb->evcb_flags);
+	evcb->evcb_flags &= ~EVLIST_ACTIVE;
+	base->event_count_active--;
+
+	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
+	    evcb, evcb_active_next);
+}
+static void
+event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
+{
+	EVENT_BASE_ASSERT_LOCKED(base);
+	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
+		event_errx(1, "%s: %p not on queue %x", __func__,
+			   evcb, EVLIST_ACTIVE_LATER);
+		return;
+	}
+	DECR_EVENT_COUNT(base, evcb->evcb_flags);
+	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
+	base->event_count_active--;
+
+	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
+}
+static void
+event_queue_remove_timeout(struct event_base *base, struct event *ev)
+{
+	EVENT_BASE_ASSERT_LOCKED(base);
+	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
+		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
+		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
+		return;
+	}
+	DECR_EVENT_COUNT(base, ev->ev_flags);
+	ev->ev_flags &= ~EVLIST_TIMEOUT;
+
+	if (is_common_timeout(&ev->ev_timeout, base)) {
+		struct common_timeout_list *ctl =
+		    get_common_timeout_list(base, &ev->ev_timeout);
+		TAILQ_REMOVE(&ctl->events, ev,
+		    ev_timeout_pos.ev_next_with_common_timeout);
+	} else {
+		min_heap_erase_(&base->timeheap, ev);
+	}
+}
+
+#ifdef USE_REINSERT_TIMEOUT
+/* Remove and reinsert 'ev' into the timeout queue. */
+static void
+event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
+    int was_common, int is_common, int old_timeout_idx)
+{
+	struct common_timeout_list *ctl;
+	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
+		event_queue_insert_timeout(base, ev);
 		return;
 	}
 
-	if (~ev->ev_flags & EVLIST_INTERNAL)
-		base->event_count--;
-
-	ev->ev_flags &= ~queue;
-	switch (queue) {
-	case EVLIST_INSERTED:
-		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
+	switch ((was_common<<1) | is_common) {
+	case 3: /* Changing from one common timeout to another */
+		ctl = base->common_timeout_queues[old_timeout_idx];
+		TAILQ_REMOVE(&ctl->events, ev,
+		    ev_timeout_pos.ev_next_with_common_timeout);
+		ctl = get_common_timeout_list(base, &ev->ev_timeout);
+		insert_common_timeout_inorder(ctl, ev);
 		break;
-	case EVLIST_ACTIVE:
-		base->event_count_active--;
-		TAILQ_REMOVE(&base->activequeues[ev->ev_pri],
-		    ev, ev_active_next);
+	case 2: /* Was common; is no longer common */
+		ctl = base->common_timeout_queues[old_timeout_idx];
+		TAILQ_REMOVE(&ctl->events, ev,
+		    ev_timeout_pos.ev_next_with_common_timeout);
+		min_heap_push_(&base->timeheap, ev);
 		break;
-	case EVLIST_TIMEOUT:
-		if (is_common_timeout(&ev->ev_timeout, base)) {
-			struct common_timeout_list *ctl =
-			    get_common_timeout_list(base, &ev->ev_timeout);
-			TAILQ_REMOVE(&ctl->events, ev,
-			    ev_timeout_pos.ev_next_with_common_timeout);
-		} else {
-			min_heap_erase(&base->timeheap, ev);
-		}
+	case 1: /* Wasn't common; has become common. */
+		min_heap_erase_(&base->timeheap, ev);
+		ctl = get_common_timeout_list(base, &ev->ev_timeout);
+		insert_common_timeout_inorder(ctl, ev);
+		break;
+	case 0: /* was in heap; is still on heap. */
+		min_heap_adjust_(&base->timeheap, ev);
 		break;
 	default:
-		event_errx(1, "%s: unknown queue %x", __func__, queue);
+		EVUTIL_ASSERT(0); /* unreachable */
+		break;
 	}
 }
+#endif
 
 /* Add 'ev' to the common timeout list in 'ev'. */
 static void
@@ -2588,44 +3307,95 @@
 }
 
 static void
-event_queue_insert(struct event_base *base, struct event *ev, int queue)
+event_queue_insert_inserted(struct event_base *base, struct event *ev)
 {
 	EVENT_BASE_ASSERT_LOCKED(base);
 
-	if (ev->ev_flags & queue) {
-		/* Double insertion is possible for active events */
-		if (queue & EVLIST_ACTIVE)
-			return;
-
-		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on queue %x", __func__,
-		    ev, EV_SOCK_ARG(ev->ev_fd), queue);
+	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
+		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
+		    ev, EV_SOCK_ARG(ev->ev_fd));
 		return;
 	}
 
-	if (~ev->ev_flags & EVLIST_INTERNAL)
-		base->event_count++;
+	INCR_EVENT_COUNT(base, ev->ev_flags);
 
-	ev->ev_flags |= queue;
-	switch (queue) {
-	case EVLIST_INSERTED:
-		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
-		break;
-	case EVLIST_ACTIVE:
-		base->event_count_active++;
-		TAILQ_INSERT_TAIL(&base->activequeues[ev->ev_pri],
-		    ev,ev_active_next);
-		break;
-	case EVLIST_TIMEOUT: {
-		if (is_common_timeout(&ev->ev_timeout, base)) {
-			struct common_timeout_list *ctl =
-			    get_common_timeout_list(base, &ev->ev_timeout);
-			insert_common_timeout_inorder(ctl, ev);
-		} else
-			min_heap_push(&base->timeheap, ev);
-		break;
+	ev->ev_flags |= EVLIST_INSERTED;
+}
+
+static void
+event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
+{
+	EVENT_BASE_ASSERT_LOCKED(base);
+
+	if (evcb->evcb_flags & EVLIST_ACTIVE) {
+		/* Double insertion is possible for active events */
+		return;
 	}
-	default:
-		event_errx(1, "%s: unknown queue %x", __func__, queue);
+
+	INCR_EVENT_COUNT(base, evcb->evcb_flags);
+
+	evcb->evcb_flags |= EVLIST_ACTIVE;
+
+	base->event_count_active++;
+	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
+	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
+	    evcb, evcb_active_next);
+}
+
+static void
+event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
+{
+	EVENT_BASE_ASSERT_LOCKED(base);
+	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
+		/* Double insertion is possible */
+		return;
+	}
+
+	INCR_EVENT_COUNT(base, evcb->evcb_flags);
+	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
+	base->event_count_active++;
+	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
+	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
+}
+
+static void
+event_queue_insert_timeout(struct event_base *base, struct event *ev)
+{
+	EVENT_BASE_ASSERT_LOCKED(base);
+
+	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
+		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
+		    ev, EV_SOCK_ARG(ev->ev_fd));
+		return;
+	}
+
+	INCR_EVENT_COUNT(base, ev->ev_flags);
+
+	ev->ev_flags |= EVLIST_TIMEOUT;
+
+	if (is_common_timeout(&ev->ev_timeout, base)) {
+		struct common_timeout_list *ctl =
+		    get_common_timeout_list(base, &ev->ev_timeout);
+		insert_common_timeout_inorder(ctl, ev);
+	} else {
+		min_heap_push_(&base->timeheap, ev);
+	}
+}
+
+static void
+event_queue_make_later_events_active(struct event_base *base)
+{
+	struct event_callback *evcb;
+	EVENT_BASE_ASSERT_LOCKED(base);
+
+	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
+		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
+		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
+		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
+		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
+		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
 	}
 }
 
@@ -2634,13 +3404,13 @@
 const char *
 event_get_version(void)
 {
-	return (_EVENT_VERSION);
+	return (EVENT__VERSION);
 }
 
 ev_uint32_t
 event_get_version_number(void)
 {
-	return (_EVENT_NUMERIC_VERSION);
+	return (EVENT__NUMERIC_VERSION);
 }
 
 /*
@@ -2654,16 +3424,19 @@
 	return (current_base->evsel->name);
 }
 
-#ifndef _EVENT_DISABLE_MM_REPLACEMENT
-static void *(*_mm_malloc_fn)(size_t sz) = NULL;
-static void *(*_mm_realloc_fn)(void *p, size_t sz) = NULL;
-static void (*_mm_free_fn)(void *p) = NULL;
+#ifndef EVENT__DISABLE_MM_REPLACEMENT
+static void *(*mm_malloc_fn_)(size_t sz) = NULL;
+static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
+static void (*mm_free_fn_)(void *p) = NULL;
 
 void *
 event_mm_malloc_(size_t sz)
 {
-	if (_mm_malloc_fn)
-		return _mm_malloc_fn(sz);
+	if (sz == 0)
+		return NULL;
+
+	if (mm_malloc_fn_)
+		return mm_malloc_fn_(sz);
 	else
 		return malloc(sz);
 }
@@ -2671,38 +3444,65 @@
 void *
 event_mm_calloc_(size_t count, size_t size)
 {
-	if (_mm_malloc_fn) {
+	if (count == 0 || size == 0)
+		return NULL;
+
+	if (mm_malloc_fn_) {
 		size_t sz = count * size;
-		void *p = _mm_malloc_fn(sz);
+		void *p = NULL;
+		if (count > EV_SIZE_MAX / size)
+			goto error;
+		p = mm_malloc_fn_(sz);
 		if (p)
-			memset(p, 0, sz);
+			return memset(p, 0, sz);
+	} else {
+		void *p = calloc(count, size);
+#ifdef _WIN32
+		/* Windows calloc doesn't reliably set ENOMEM */
+		if (p == NULL)
+			goto error;
+#endif
 		return p;
-	} else
-		return calloc(count, size);
+	}
+
+error:
+	errno = ENOMEM;
+	return NULL;
 }
 
 char *
 event_mm_strdup_(const char *str)
 {
-	if (_mm_malloc_fn) {
+	if (!str) {
+		errno = EINVAL;
+		return NULL;
+	}
+
+	if (mm_malloc_fn_) {
 		size_t ln = strlen(str);
-		void *p = _mm_malloc_fn(ln+1);
+		void *p = NULL;
+		if (ln == EV_SIZE_MAX)
+			goto error;
+		p = mm_malloc_fn_(ln+1);
 		if (p)
-			memcpy(p, str, ln+1);
-		return p;
+			return memcpy(p, str, ln+1);
 	} else
-#ifdef WIN32
+#ifdef _WIN32
 		return _strdup(str);
 #else
 		return strdup(str);
 #endif
+
+error:
+	errno = ENOMEM;
+	return NULL;
 }
 
 void *
 event_mm_realloc_(void *ptr, size_t sz)
 {
-	if (_mm_realloc_fn)
-		return _mm_realloc_fn(ptr, sz);
+	if (mm_realloc_fn_)
+		return mm_realloc_fn_(ptr, sz);
 	else
 		return realloc(ptr, sz);
 }
@@ -2710,8 +3510,8 @@
 void
 event_mm_free_(void *ptr)
 {
-	if (_mm_free_fn)
-		_mm_free_fn(ptr);
+	if (mm_free_fn_)
+		mm_free_fn_(ptr);
 	else
 		free(ptr);
 }
@@ -2721,13 +3521,13 @@
 			void *(*realloc_fn)(void *ptr, size_t sz),
 			void (*free_fn)(void *ptr))
 {
-	_mm_malloc_fn = malloc_fn;
-	_mm_realloc_fn = realloc_fn;
-	_mm_free_fn = free_fn;
+	mm_malloc_fn_ = malloc_fn;
+	mm_realloc_fn_ = realloc_fn;
+	mm_free_fn_ = free_fn;
 }
 #endif
 
-#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
+#ifdef EVENT__HAVE_EVENTFD
 static void
 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
 {
@@ -2750,7 +3550,7 @@
 {
 	unsigned char buf[1024];
 	struct event_base *base = arg;
-#ifdef WIN32
+#ifdef _WIN32
 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
 		;
 #else
@@ -2766,71 +3566,54 @@
 int
 evthread_make_base_notifiable(struct event_base *base)
 {
-	void (*cb)(evutil_socket_t, short, void *) = evthread_notify_drain_default;
-	int (*notify)(struct event_base *) = evthread_notify_base_default;
-
-	/* XXXX grab the lock here? */
+	int r;
 	if (!base)
 		return -1;
 
-	if (base->th_notify_fd[0] >= 0)
-		return 0;
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	r = evthread_make_base_notifiable_nolock_(base);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return r;
+}
 
-#if defined(_EVENT_HAVE_EVENTFD) && defined(_EVENT_HAVE_SYS_EVENTFD_H)
-#ifndef EFD_CLOEXEC
-#define EFD_CLOEXEC 0
+static int
+evthread_make_base_notifiable_nolock_(struct event_base *base)
+{
+	void (*cb)(evutil_socket_t, short, void *);
+	int (*notify)(struct event_base *);
+
+	if (base->th_notify_fn != NULL) {
+		/* The base is already notifiable: we're doing fine. */
+		return 0;
+	}
+
+#if defined(EVENT__HAVE_WORKING_KQUEUE)
+	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
+		base->th_notify_fn = event_kq_notify_base_;
+		/* No need to add an event here; the backend can wake
+		 * itself up just fine. */
+		return 0;
+	}
 #endif
-	base->th_notify_fd[0] = eventfd(0, EFD_CLOEXEC);
+
+#ifdef EVENT__HAVE_EVENTFD
+	base->th_notify_fd[0] = evutil_eventfd_(0,
+	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
 	if (base->th_notify_fd[0] >= 0) {
-		evutil_make_socket_closeonexec(base->th_notify_fd[0]);
+		base->th_notify_fd[1] = -1;
 		notify = evthread_notify_base_eventfd;
 		cb = evthread_notify_drain_eventfd;
-	}
+	} else
 #endif
-#if defined(_EVENT_HAVE_PIPE)
-	if (base->th_notify_fd[0] < 0) {
-		if ((base->evsel->features & EV_FEATURE_FDS)) {
-			if (pipe(base->th_notify_fd) < 0) {
-				event_warn("%s: pipe", __func__);
-			} else {
-				evutil_make_socket_closeonexec(base->th_notify_fd[0]);
-				evutil_make_socket_closeonexec(base->th_notify_fd[1]);
-			}
-		}
+	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
+		notify = evthread_notify_base_default;
+		cb = evthread_notify_drain_default;
+	} else {
+		return -1;
 	}
-#endif
-
-#ifdef WIN32
-#define LOCAL_SOCKETPAIR_AF AF_INET
-#else
-#define LOCAL_SOCKETPAIR_AF AF_UNIX
-#endif
-	if (base->th_notify_fd[0] < 0) {
-		if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0,
-			base->th_notify_fd) == -1) {
-			event_sock_warn(-1, "%s: socketpair", __func__);
-			return (-1);
-		} else {
-			evutil_make_socket_closeonexec(base->th_notify_fd[0]);
-			evutil_make_socket_closeonexec(base->th_notify_fd[1]);
-		}
-	}
-
-	evutil_make_socket_nonblocking(base->th_notify_fd[0]);
 
 	base->th_notify_fn = notify;
 
-	/*
-	  Making the second socket nonblocking is a bit subtle, given that we
-	  ignore any EAGAIN returns when writing to it, and you don't usally
-	  do that for a nonblocking socket. But if the kernel gives us EAGAIN,
-	  then there's no need to add any more data to the buffer, since
-	  the main thread is already either about to wake up and drain it,
-	  or woken up and in the process of draining it.
-	*/
-	if (base->th_notify_fd[1] > 0)
-		evutil_make_socket_nonblocking(base->th_notify_fd[1]);
-
 	/* prepare an event that we can use for wakeup */
 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
 				 EV_READ|EV_PERSIST, cb, base);
@@ -2839,50 +3622,182 @@
 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
 	event_priority_set(&base->th_notify, 0);
 
-	return event_add(&base->th_notify, NULL);
+	return event_add_nolock_(&base->th_notify, NULL, 0);
 }
 
+int
+event_base_foreach_event_nolock_(struct event_base *base,
+    event_base_foreach_event_cb fn, void *arg)
+{
+	int r, i;
+	unsigned u;
+	struct event *ev;
+
+	/* Start out with all the EVLIST_INSERTED events. */
+	if ((r = evmap_foreach_event_(base, fn, arg)))
+		return r;
+
+	/* Okay, now we deal with those events that have timeouts and are in
+	 * the min-heap. */
+	for (u = 0; u < base->timeheap.n; ++u) {
+		ev = base->timeheap.p[u];
+		if (ev->ev_flags & EVLIST_INSERTED) {
+			/* we already processed this one */
+			continue;
+		}
+		if ((r = fn(base, ev, arg)))
+			return r;
+	}
+
+	/* Now for the events in one of the timeout queues.
+	 * the min-heap. */
+	for (i = 0; i < base->n_common_timeouts; ++i) {
+		struct common_timeout_list *ctl =
+		    base->common_timeout_queues[i];
+		TAILQ_FOREACH(ev, &ctl->events,
+		    ev_timeout_pos.ev_next_with_common_timeout) {
+			if (ev->ev_flags & EVLIST_INSERTED) {
+				/* we already processed this one */
+				continue;
+			}
+			if ((r = fn(base, ev, arg)))
+				return r;
+		}
+	}
+
+	/* Finally, we deal wit all the active events that we haven't touched
+	 * yet. */
+	for (i = 0; i < base->nactivequeues; ++i) {
+		struct event_callback *evcb;
+		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
+			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
+				/* This isn't an event (evlist_init clear), or
+				 * we already processed it. (inserted or
+				 * timeout set */
+				continue;
+			}
+			ev = event_callback_to_event(evcb);
+			if ((r = fn(base, ev, arg)))
+				return r;
+		}
+	}
+
+	return 0;
+}
+
+/* Helper for event_base_dump_events: called on each event in the event base;
+ * dumps only the inserted events. */
+static int
+dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
+{
+	FILE *output = arg;
+	const char *gloss = (e->ev_events & EV_SIGNAL) ?
+	    "sig" : "fd ";
+
+	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
+		return 0;
+
+	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
+	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
+	    (e->ev_events&EV_READ)?" Read":"",
+	    (e->ev_events&EV_WRITE)?" Write":"",
+	    (e->ev_events&EV_CLOSED)?" EOF":"",
+	    (e->ev_events&EV_SIGNAL)?" Signal":"",
+	    (e->ev_events&EV_PERSIST)?" Persist":"",
+	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
+	if (e->ev_flags & EVLIST_TIMEOUT) {
+		struct timeval tv;
+		tv.tv_sec = e->ev_timeout.tv_sec;
+		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
+		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
+		fprintf(output, " Timeout=%ld.%06d",
+		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
+	}
+	fputc('\n', output);
+
+	return 0;
+}
+
+/* Helper for event_base_dump_events: called on each event in the event base;
+ * dumps only the active events. */
+static int
+dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
+{
+	FILE *output = arg;
+	const char *gloss = (e->ev_events & EV_SIGNAL) ?
+	    "sig" : "fd ";
+
+	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
+		return 0;
+
+	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
+	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
+	    (e->ev_res&EV_READ)?" Read":"",
+	    (e->ev_res&EV_WRITE)?" Write":"",
+	    (e->ev_res&EV_CLOSED)?" EOF":"",
+	    (e->ev_res&EV_SIGNAL)?" Signal":"",
+	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
+	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
+	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
+
+	return 0;
+}
+
+int
+event_base_foreach_event(struct event_base *base,
+    event_base_foreach_event_cb fn, void *arg)
+{
+	int r;
+	if ((!fn) || (!base)) {
+		return -1;
+	}
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	r = event_base_foreach_event_nolock_(base, fn, arg);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	return r;
+}
+
+
 void
 event_base_dump_events(struct event_base *base, FILE *output)
 {
-	struct event *e;
-	int i;
-	fprintf(output, "Inserted events:\n");
-	TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
-		fprintf(output, "  %p [fd "EV_SOCK_FMT"]%s%s%s%s%s\n",
-				(void*)e, EV_SOCK_ARG(e->ev_fd),
-				(e->ev_events&EV_READ)?" Read":"",
-				(e->ev_events&EV_WRITE)?" Write":"",
-				(e->ev_events&EV_SIGNAL)?" Signal":"",
-				(e->ev_events&EV_TIMEOUT)?" Timeout":"",
-				(e->ev_events&EV_PERSIST)?" Persist":"");
-
-	}
-	for (i = 0; i < base->nactivequeues; ++i) {
-		if (TAILQ_EMPTY(&base->activequeues[i]))
-			continue;
-		fprintf(output, "Active events [priority %d]:\n", i);
-		TAILQ_FOREACH(e, &base->eventqueue, ev_next) {
-			fprintf(output, "  %p [fd "EV_SOCK_FMT"]%s%s%s%s\n",
-					(void*)e, EV_SOCK_ARG(e->ev_fd),
-					(e->ev_res&EV_READ)?" Read active":"",
-					(e->ev_res&EV_WRITE)?" Write active":"",
-					(e->ev_res&EV_SIGNAL)?" Signal active":"",
-					(e->ev_res&EV_TIMEOUT)?" Timeout active":"");
-		}
-	}
-}
-
-void
-event_base_add_virtual(struct event_base *base)
-{
 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
-	base->virtual_event_count++;
+	fprintf(output, "Inserted events:\n");
+	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
+
+	fprintf(output, "Active events:\n");
+	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
 	EVBASE_RELEASE_LOCK(base, th_base_lock);
 }
 
 void
-event_base_del_virtual(struct event_base *base)
+event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
+{
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_active_by_signal(struct event_base *base, int sig)
+{
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	evmap_signal_active_(base, sig, 1);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+
+void
+event_base_add_virtual_(struct event_base *base)
+{
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	base->virtual_event_count++;
+	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_del_virtual_(struct event_base *base)
 {
 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
 	EVUTIL_ASSERT(base->virtual_event_count > 0);
@@ -2892,15 +3807,65 @@
 	EVBASE_RELEASE_LOCK(base, th_base_lock);
 }
 
-#ifndef _EVENT_DISABLE_THREAD_SUPPORT
+static void
+event_free_debug_globals_locks(void)
+{
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
+#ifndef EVENT__DISABLE_DEBUG_MODE
+	if (event_debug_map_lock_ != NULL) {
+		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
+		event_debug_map_lock_ = NULL;
+		evthreadimpl_disable_lock_debugging_();
+	}
+#endif /* EVENT__DISABLE_DEBUG_MODE */
+#endif /* EVENT__DISABLE_THREAD_SUPPORT */
+	return;
+}
+
+static void
+event_free_debug_globals(void)
+{
+	event_free_debug_globals_locks();
+}
+
+static void
+event_free_evsig_globals(void)
+{
+	evsig_free_globals_();
+}
+
+static void
+event_free_evutil_globals(void)
+{
+	evutil_free_globals_();
+}
+
+static void
+event_free_globals(void)
+{
+	event_free_debug_globals();
+	event_free_evsig_globals();
+	event_free_evutil_globals();
+}
+
+void
+libevent_global_shutdown(void)
+{
+	event_disable_debug_mode();
+	event_free_globals();
+}
+
+#ifndef EVENT__DISABLE_THREAD_SUPPORT
 int
 event_global_setup_locks_(const int enable_locks)
 {
-#ifndef _EVENT_DISABLE_DEBUG_MODE
-	EVTHREAD_SETUP_GLOBAL_LOCK(_event_debug_map_lock, 0);
+#ifndef EVENT__DISABLE_DEBUG_MODE
+	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
 #endif
 	if (evsig_global_setup_locks_(enable_locks) < 0)
 		return -1;
+	if (evutil_global_setup_locks_(enable_locks) < 0)
+		return -1;
 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
 		return -1;
 	return 0;
@@ -2908,11 +3873,21 @@
 #endif
 
 void
-event_base_assert_ok(struct event_base *base)
+event_base_assert_ok_(struct event_base *base)
+{
+	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
+	event_base_assert_ok_nolock_(base);
+	EVBASE_RELEASE_LOCK(base, th_base_lock);
+}
+
+void
+event_base_assert_ok_nolock_(struct event_base *base)
 {
 	int i;
-	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
-	evmap_check_integrity(base);
+	int count;
+
+	/* First do checks on the per-fd and per-signal lists */
+	evmap_check_integrity_(base);
 
 	/* Check the heap property */
 	for (i = 1; i < (int)base->timeheap.n; ++i) {
@@ -2920,7 +3895,7 @@
 		struct event *ev, *p_ev;
 		ev = base->timeheap.p[i];
 		p_ev = base->timeheap.p[parent];
-		EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
+		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
 	}
@@ -2929,15 +3904,37 @@
 	for (i = 0; i < base->n_common_timeouts; ++i) {
 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
 		struct event *last=NULL, *ev;
+
+		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
+
 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
 			if (last)
 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
-			EVUTIL_ASSERT(ev->ev_flags & EV_TIMEOUT);
+			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
 			last = ev;
 		}
 	}
 
-	EVBASE_RELEASE_LOCK(base, th_base_lock);
+	/* Check the active queues. */
+	count = 0;
+	for (i = 0; i < base->nactivequeues; ++i) {
+		struct event_callback *evcb;
+		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
+		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
+			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
+			EVUTIL_ASSERT(evcb->evcb_pri == i);
+			++count;
+		}
+	}
+
+	{
+		struct event_callback *evcb;
+		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
+			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
+			++count;
+		}
+	}
+	EVUTIL_ASSERT(count == base->event_count_active);
 }