Home | History | Annotate | Line # | Download | only in dist
event.c revision 1.4
      1 /*	$NetBSD: event.c,v 1.4 2017/01/31 23:17:39 christos Exp $	*/
      2 /*
      3  * Copyright (c) 2000-2007 Niels Provos <provos (at) citi.umich.edu>
      4  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  * 3. The name of the author may not be used to endorse or promote products
     15  *    derived from this software without specific prior written permission.
     16  *
     17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 #include "event2/event-config.h"
     29 #include <sys/cdefs.h>
     30 __RCSID("$NetBSD: event.c,v 1.4 2017/01/31 23:17:39 christos Exp $");
     31 #include "evconfig-private.h"
     32 
     33 #ifdef _WIN32
     34 #include <winsock2.h>
     35 #define WIN32_LEAN_AND_MEAN
     36 #include <windows.h>
     37 #undef WIN32_LEAN_AND_MEAN
     38 #endif
     39 #include <sys/types.h>
     40 #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
     41 #include <sys/time.h>
     42 #endif
     43 #include <sys/queue.h>
     44 #ifdef EVENT__HAVE_SYS_SOCKET_H
     45 #include <sys/socket.h>
     46 #endif
     47 #include <stdio.h>
     48 #include <stdlib.h>
     49 #ifdef EVENT__HAVE_UNISTD_H
     50 #include <unistd.h>
     51 #endif
     52 #include <ctype.h>
     53 #include <errno.h>
     54 #include <signal.h>
     55 #include <string.h>
     56 #include <time.h>
     57 #include <limits.h>
     58 
     59 #include "event2/event.h"
     60 #include "event2/event_struct.h"
     61 #include "event2/event_compat.h"
     62 #include "event-internal.h"
     63 #include "defer-internal.h"
     64 #include "evthread-internal.h"
     65 #include "event2/thread.h"
     66 #include "event2/util.h"
     67 #include "log-internal.h"
     68 #include "evmap-internal.h"
     69 #include "iocp-internal.h"
     70 #include "changelist-internal.h"
     71 #define HT_NO_CACHE_HASH_VALUES
     72 #include "ht-internal.h"
     73 #include "util-internal.h"
     74 
     75 
     76 #ifdef EVENT__HAVE_WORKING_KQUEUE
     77 #include "kqueue-internal.h"
     78 #endif
     79 
     80 #ifdef EVENT__HAVE_EVENT_PORTS
     81 extern const struct eventop evportops;
     82 #endif
     83 #ifdef EVENT__HAVE_SELECT
     84 extern const struct eventop selectops;
     85 #endif
     86 #ifdef EVENT__HAVE_POLL
     87 extern const struct eventop pollops;
     88 #endif
     89 #ifdef EVENT__HAVE_EPOLL
     90 extern const struct eventop epollops;
     91 #endif
     92 #ifdef EVENT__HAVE_WORKING_KQUEUE
     93 extern const struct eventop kqops;
     94 #endif
     95 #ifdef EVENT__HAVE_DEVPOLL
     96 extern const struct eventop devpollops;
     97 #endif
     98 #ifdef _WIN32
     99 extern const struct eventop win32ops;
    100 #endif
    101 
    102 /* Array of backends in order of preference. */
    103 static const struct eventop *eventops[] = {
    104 #ifdef EVENT__HAVE_EVENT_PORTS
    105 	&evportops,
    106 #endif
    107 #ifdef EVENT__HAVE_WORKING_KQUEUE
    108 	&kqops,
    109 #endif
    110 #ifdef EVENT__HAVE_EPOLL
    111 	&epollops,
    112 #endif
    113 #ifdef EVENT__HAVE_DEVPOLL
    114 	&devpollops,
    115 #endif
    116 #ifdef EVENT__HAVE_POLL
    117 	&pollops,
    118 #endif
    119 #ifdef EVENT__HAVE_SELECT
    120 	&selectops,
    121 #endif
    122 #ifdef _WIN32
    123 	&win32ops,
    124 #endif
    125 	NULL
    126 };
    127 
    128 /* Global state; deprecated */
    129 struct event_base *event_global_current_base_ = NULL;
    130 #define current_base event_global_current_base_
    131 
    132 /* Global state */
    133 
    134 static void *event_self_cbarg_ptr_ = NULL;
    135 
    136 /* Prototypes */
    137 static void	event_queue_insert_active(struct event_base *, struct event_callback *);
    138 static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
    139 static void	event_queue_insert_timeout(struct event_base *, struct event *);
    140 static void	event_queue_insert_inserted(struct event_base *, struct event *);
    141 static void	event_queue_remove_active(struct event_base *, struct event_callback *);
    142 static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
    143 static void	event_queue_remove_timeout(struct event_base *, struct event *);
    144 static void	event_queue_remove_inserted(struct event_base *, struct event *);
    145 static void event_queue_make_later_events_active(struct event_base *base);
    146 
    147 static int evthread_make_base_notifiable_nolock_(struct event_base *base);
    148 static int event_del_(struct event *ev, int blocking);
    149 
    150 #ifdef USE_REINSERT_TIMEOUT
    151 /* This code seems buggy; only turn it on if we find out what the trouble is. */
    152 static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
    153 #endif
    154 
    155 static int	event_haveevents(struct event_base *);
    156 
    157 static int	event_process_active(struct event_base *);
    158 
    159 static int	timeout_next(struct event_base *, struct timeval **);
    160 static void	timeout_process(struct event_base *);
    161 
    162 static inline void	event_signal_closure(struct event_base *, struct event *ev);
    163 static inline void	event_persist_closure(struct event_base *, struct event *ev);
    164 
    165 static int	evthread_notify_base(struct event_base *base);
    166 
    167 static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
    168     struct event *ev);
    169 
    170 #ifndef EVENT__DISABLE_DEBUG_MODE
    171 /* These functions implement a hashtable of which 'struct event *' structures
    172  * have been setup or added.  We don't want to trust the content of the struct
    173  * event itself, since we're trying to work through cases where an event gets
    174  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
    175  */
    176 
    177 struct event_debug_entry {
    178 	HT_ENTRY(event_debug_entry) node;
    179 	const struct event *ptr;
    180 	unsigned added : 1;
    181 };
    182 
    183 static inline unsigned
    184 hash_debug_entry(const struct event_debug_entry *e)
    185 {
    186 	/* We need to do this silliness to convince compilers that we
    187 	 * honestly mean to cast e->ptr to an integer, and discard any
    188 	 * part of it that doesn't fit in an unsigned.
    189 	 */
    190 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
    191 	/* Our hashtable implementation is pretty sensitive to low bits,
    192 	 * and every struct event is over 64 bytes in size, so we can
    193 	 * just say >>6. */
    194 	return (u >> 6);
    195 }
    196 
    197 static inline int
    198 eq_debug_entry(const struct event_debug_entry *a,
    199     const struct event_debug_entry *b)
    200 {
    201 	return a->ptr == b->ptr;
    202 }
    203 
    204 int event_debug_mode_on_ = 0;
    205 
    206 
    207 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
    208 /**
    209  * @brief debug mode variable which is set for any function/structure that needs
    210  *        to be shared across threads (if thread support is enabled).
    211  *
    212  *        When and if evthreads are initialized, this variable will be evaluated,
    213  *        and if set to something other than zero, this means the evthread setup
    214  *        functions were called out of order.
    215  *
    216  *        See: "Locks and threading" in the documentation.
    217  */
    218 int event_debug_created_threadable_ctx_ = 0;
    219 #endif
    220 
    221 /* Set if it's too late to enable event_debug_mode. */
    222 static int event_debug_mode_too_late = 0;
    223 #ifndef EVENT__DISABLE_THREAD_SUPPORT
    224 static void *event_debug_map_lock_ = NULL;
    225 #endif
    226 static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
    227 	HT_INITIALIZER();
    228 
    229 HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
    230     eq_debug_entry)
    231 HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
    232     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
    233 
    234 /* Macro: record that ev is now setup (that is, ready for an add) */
    235 #define event_debug_note_setup_(ev) do {				\
    236 	if (event_debug_mode_on_) {					\
    237 		struct event_debug_entry *dent,find;			\
    238 		find.ptr = (ev);					\
    239 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
    240 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
    241 		if (dent) {						\
    242 			dent->added = 0;				\
    243 		} else {						\
    244 			dent = mm_malloc(sizeof(*dent));		\
    245 			if (!dent)					\
    246 				event_err(1,				\
    247 				    "Out of memory in debugging code");	\
    248 			dent->ptr = (ev);				\
    249 			dent->added = 0;				\
    250 			HT_INSERT(event_debug_map, &global_debug_map, dent); \
    251 		}							\
    252 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
    253 	}								\
    254 	event_debug_mode_too_late = 1;					\
    255 	} while (/*CONSTCOND*/0)
    256 /* Macro: record that ev is no longer setup */
    257 #define event_debug_note_teardown_(ev) do {				\
    258 	if (event_debug_mode_on_) {					\
    259 		struct event_debug_entry *dent,find;			\
    260 		find.ptr = (ev);					\
    261 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
    262 		dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \
    263 		if (dent)						\
    264 			mm_free(dent);					\
    265 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
    266 	}								\
    267 	event_debug_mode_too_late = 1;					\
    268 	} while (/*CONSTCOND*/0)
    269 /* Macro: record that ev is now added */
    270 #define event_debug_note_add_(ev)	do {				\
    271 	if (event_debug_mode_on_) {					\
    272 		struct event_debug_entry *dent,find;			\
    273 		find.ptr = (ev);					\
    274 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
    275 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
    276 		if (dent) {						\
    277 			dent->added = 1;				\
    278 		} else {						\
    279 			event_errx(EVENT_ERR_ABORT_,			\
    280 			    "%s: noting an add on a non-setup event %p" \
    281 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
    282 			    ", flags: 0x%x)",				\
    283 			    __func__, (ev), (ev)->ev_events,		\
    284 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
    285 		}							\
    286 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
    287 	}								\
    288 	event_debug_mode_too_late = 1;					\
    289 	} while (/*CONSTCOND*/0)
    290 /* Macro: record that ev is no longer added */
    291 #define event_debug_note_del_(ev) do {					\
    292 	if (event_debug_mode_on_) {					\
    293 		struct event_debug_entry *dent,find;			\
    294 		find.ptr = (ev);					\
    295 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
    296 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
    297 		if (dent) {						\
    298 			dent->added = 0;				\
    299 		} else {						\
    300 			event_errx(EVENT_ERR_ABORT_,			\
    301 			    "%s: noting a del on a non-setup event %p"	\
    302 			    " (events: 0x%x, fd: "EV_SOCK_FMT		\
    303 			    ", flags: 0x%x)",				\
    304 			    __func__, (ev), (ev)->ev_events,		\
    305 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
    306 		}							\
    307 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
    308 	}								\
    309 	event_debug_mode_too_late = 1;					\
    310 	} while (/*CONSTCOND*/0)
    311 /* Macro: assert that ev is setup (i.e., okay to add or inspect) */
    312 #define event_debug_assert_is_setup_(ev) do {				\
    313 	if (event_debug_mode_on_) {					\
    314 		struct event_debug_entry *dent,find;			\
    315 		find.ptr = (ev);					\
    316 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
    317 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
    318 		if (!dent) {						\
    319 			event_errx(EVENT_ERR_ABORT_,			\
    320 			    "%s called on a non-initialized event %p"	\
    321 			    " (events: 0x%x, fd: "EV_SOCK_FMT\
    322 			    ", flags: 0x%x)",				\
    323 			    __func__, (ev), (ev)->ev_events,		\
    324 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
    325 		}							\
    326 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
    327 	}								\
    328 	} while (/*CONSTCOND*/0)
    329 /* Macro: assert that ev is not added (i.e., okay to tear down or set
    330  * up again) */
    331 #define event_debug_assert_not_added_(ev) do {				\
    332 	if (event_debug_mode_on_) {					\
    333 		struct event_debug_entry *dent,find;			\
    334 		find.ptr = (ev);					\
    335 		EVLOCK_LOCK(event_debug_map_lock_, 0);			\
    336 		dent = HT_FIND(event_debug_map, &global_debug_map, &find); \
    337 		if (dent && dent->added) {				\
    338 			event_errx(EVENT_ERR_ABORT_,			\
    339 			    "%s called on an already added event %p"	\
    340 			    " (events: 0x%x, fd: "EV_SOCK_FMT", "	\
    341 			    "flags: 0x%x)",				\
    342 			    __func__, (ev), (ev)->ev_events,		\
    343 			    EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags);	\
    344 		}							\
    345 		EVLOCK_UNLOCK(event_debug_map_lock_, 0);		\
    346 	}								\
    347 	} while (/*CONSTCOND*/0)
    348 #else
    349 #define event_debug_note_setup_(ev) \
    350 	((void)0)
    351 #define event_debug_note_teardown_(ev) \
    352 	((void)0)
    353 #define event_debug_note_add_(ev) \
    354 	((void)0)
    355 #define event_debug_note_del_(ev) \
    356 	((void)0)
    357 #define event_debug_assert_is_setup_(ev) \
    358 	((void)0)
    359 #define event_debug_assert_not_added_(ev) \
    360 	((void)0)
    361 #endif
    362 
    363 #define EVENT_BASE_ASSERT_LOCKED(base)		\
    364 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
    365 
    366 /* How often (in seconds) do we check for changes in wall clock time relative
    367  * to monotonic time?  Set this to -1 for 'never.' */
    368 #define CLOCK_SYNC_INTERVAL 5
    369 
    370 /** Set 'tp' to the current time according to 'base'.  We must hold the lock
    371  * on 'base'.  If there is a cached time, return it.  Otherwise, use
    372  * clock_gettime or gettimeofday as appropriate to find out the right time.
    373  * Return 0 on success, -1 on failure.
    374  */
    375 static int
    376 gettime(struct event_base *base, struct timeval *tp)
    377 {
    378 	EVENT_BASE_ASSERT_LOCKED(base);
    379 
    380 	if (base->tv_cache.tv_sec) {
    381 		*tp = base->tv_cache;
    382 		return (0);
    383 	}
    384 
    385 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
    386 		return -1;
    387 	}
    388 
    389 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
    390 	    < tp->tv_sec) {
    391 		struct timeval tv;
    392 		evutil_gettimeofday(&tv,NULL);
    393 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
    394 		base->last_updated_clock_diff = tp->tv_sec;
    395 	}
    396 
    397 	return 0;
    398 }
    399 
    400 int
    401 event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
    402 {
    403 	int r;
    404 	if (!base) {
    405 		base = current_base;
    406 		if (!current_base)
    407 			return evutil_gettimeofday(tv, NULL);
    408 	}
    409 
    410 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    411 	if (base->tv_cache.tv_sec == 0) {
    412 		r = evutil_gettimeofday(tv, NULL);
    413 	} else {
    414 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
    415 		r = 0;
    416 	}
    417 	EVBASE_RELEASE_LOCK(base, th_base_lock);
    418 	return r;
    419 }
    420 
    421 /** Make 'base' have no current cached time. */
    422 static inline void
    423 clear_time_cache(struct event_base *base)
    424 {
    425 	base->tv_cache.tv_sec = 0;
    426 }
    427 
    428 /** Replace the cached time in 'base' with the current time. */
    429 static inline void
    430 update_time_cache(struct event_base *base)
    431 {
    432 	base->tv_cache.tv_sec = 0;
    433 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
    434 	    gettime(base, &base->tv_cache);
    435 }
    436 
    437 int
    438 event_base_update_cache_time(struct event_base *base)
    439 {
    440 
    441 	if (!base) {
    442 		base = current_base;
    443 		if (!current_base)
    444 			return -1;
    445 	}
    446 
    447 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    448 	if (base->running_loop)
    449 		update_time_cache(base);
    450 	EVBASE_RELEASE_LOCK(base, th_base_lock);
    451 	return 0;
    452 }
    453 
    454 static inline struct event *
    455 event_callback_to_event(struct event_callback *evcb)
    456 {
    457 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
    458 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
    459 }
    460 
    461 static inline struct event_callback *
    462 event_to_event_callback(struct event *ev)
    463 {
    464 	return &ev->ev_evcallback;
    465 }
    466 
    467 struct event_base *
    468 event_init(void)
    469 {
    470 	struct event_base *base = event_base_new_with_config(NULL);
    471 
    472 	if (base == NULL) {
    473 		event_errx(1, "%s: Unable to construct event_base", __func__);
    474 		return NULL;
    475 	}
    476 
    477 	current_base = base;
    478 
    479 	return (base);
    480 }
    481 
    482 struct event_base *
    483 event_base_new(void)
    484 {
    485 	struct event_base *base = NULL;
    486 	struct event_config *cfg = event_config_new();
    487 	if (cfg) {
    488 		base = event_base_new_with_config(cfg);
    489 		event_config_free(cfg);
    490 	}
    491 	return base;
    492 }
    493 
    494 /** Return true iff 'method' is the name of a method that 'cfg' tells us to
    495  * avoid. */
    496 static int
    497 event_config_is_avoided_method(const struct event_config *cfg,
    498     const char *method)
    499 {
    500 	struct event_config_entry *entry;
    501 
    502 	TAILQ_FOREACH(entry, &cfg->entries, next) {
    503 		if (entry->avoid_method != NULL &&
    504 		    strcmp(entry->avoid_method, method) == 0)
    505 			return (1);
    506 	}
    507 
    508 	return (0);
    509 }
    510 
    511 /** Return true iff 'method' is disabled according to the environment. */
    512 static int
    513 event_is_method_disabled(const char *name)
    514 {
    515 	char environment[64];
    516 	int i;
    517 
    518 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
    519 	for (i = 8; environment[i] != '\0'; ++i)
    520 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
    521 	/* Note that evutil_getenv_() ignores the environment entirely if
    522 	 * we're setuid */
    523 	return (evutil_getenv_(environment) != NULL);
    524 }
    525 
    526 int
    527 event_base_get_features(const struct event_base *base)
    528 {
    529 	return base->evsel->features;
    530 }
    531 
    532 void
    533 event_enable_debug_mode(void)
    534 {
    535 #ifndef EVENT__DISABLE_DEBUG_MODE
    536 	if (event_debug_mode_on_)
    537 		event_errx(1, "%s was called twice!", __func__);
    538 	if (event_debug_mode_too_late)
    539 		event_errx(1, "%s must be called *before* creating any events "
    540 		    "or event_bases",__func__);
    541 
    542 	event_debug_mode_on_ = 1;
    543 
    544 	HT_INIT(event_debug_map, &global_debug_map);
    545 #endif
    546 }
    547 
    548 void
    549 event_disable_debug_mode(void)
    550 {
    551 #ifndef EVENT__DISABLE_DEBUG_MODE
    552 	struct event_debug_entry **ent, *victim;
    553 
    554 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    555 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
    556 		victim = *ent;
    557 		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
    558 		mm_free(victim);
    559 	}
    560 	HT_CLEAR(event_debug_map, &global_debug_map);
    561 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
    562 
    563 	event_debug_mode_on_  = 0;
    564 #endif
    565 }
    566 
    567 struct event_base *
    568 event_base_new_with_config(const struct event_config *cfg)
    569 {
    570 	int i;
    571 	struct event_base *base;
    572 	int should_check_environment;
    573 
    574 #ifndef EVENT__DISABLE_DEBUG_MODE
    575 	event_debug_mode_too_late = 1;
    576 #endif
    577 
    578 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
    579 		event_warn("%s: calloc", __func__);
    580 		return NULL;
    581 	}
    582 
    583 	if (cfg)
    584 		base->flags = cfg->flags;
    585 
    586 	should_check_environment =
    587 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
    588 
    589 	{
    590 		struct timeval tmp;
    591 		int precise_time =
    592 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
    593 		int flags;
    594 		if (should_check_environment && !precise_time) {
    595 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
    596 			base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
    597 		}
    598 		flags = precise_time ? EV_MONOT_PRECISE : 0;
    599 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
    600 
    601 		gettime(base, &tmp);
    602 	}
    603 
    604 	min_heap_ctor_(&base->timeheap);
    605 
    606 	base->sig.ev_signal_pair[0] = -1;
    607 	base->sig.ev_signal_pair[1] = -1;
    608 	base->th_notify_fd[0] = -1;
    609 	base->th_notify_fd[1] = -1;
    610 
    611 	TAILQ_INIT(&base->active_later_queue);
    612 
    613 	evmap_io_initmap_(&base->io);
    614 	evmap_signal_initmap_(&base->sigmap);
    615 	event_changelist_init_(&base->changelist);
    616 
    617 	base->evbase = NULL;
    618 
    619 	if (cfg) {
    620 		memcpy(&base->max_dispatch_time,
    621 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
    622 		base->limit_callbacks_after_prio =
    623 		    cfg->limit_callbacks_after_prio;
    624 	} else {
    625 		base->max_dispatch_time.tv_sec = -1;
    626 		base->limit_callbacks_after_prio = 1;
    627 	}
    628 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
    629 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
    630 	} else {
    631 		base->max_dispatch_callbacks = INT_MAX;
    632 	}
    633 	if (base->max_dispatch_callbacks == INT_MAX &&
    634 	    base->max_dispatch_time.tv_sec == -1)
    635 		base->limit_callbacks_after_prio = INT_MAX;
    636 
    637 	for (i = 0; eventops[i] && !base->evbase; i++) {
    638 		if (cfg != NULL) {
    639 			/* determine if this backend should be avoided */
    640 			if (event_config_is_avoided_method(cfg,
    641 				eventops[i]->name))
    642 				continue;
    643 			if ((eventops[i]->features & cfg->require_features)
    644 			    != cfg->require_features)
    645 				continue;
    646 		}
    647 
    648 		/* also obey the environment variables */
    649 		if (should_check_environment &&
    650 		    event_is_method_disabled(eventops[i]->name))
    651 			continue;
    652 
    653 		base->evsel = eventops[i];
    654 
    655 		base->evbase = base->evsel->init(base);
    656 	}
    657 
    658 	if (base->evbase == NULL) {
    659 		event_warnx("%s: no event mechanism available",
    660 		    __func__);
    661 		base->evsel = NULL;
    662 		event_base_free(base);
    663 		return NULL;
    664 	}
    665 
    666 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
    667 		event_msgx("libevent using: %s", base->evsel->name);
    668 
    669 	/* allocate a single active event queue */
    670 	if (event_base_priority_init(base, 1) < 0) {
    671 		event_base_free(base);
    672 		return NULL;
    673 	}
    674 
    675 	/* prepare for threading */
    676 
    677 #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
    678 	event_debug_created_threadable_ctx_ = 1;
    679 #endif
    680 
    681 #ifndef EVENT__DISABLE_THREAD_SUPPORT
    682 	if (EVTHREAD_LOCKING_ENABLED() &&
    683 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
    684 		int r;
    685 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
    686 		EVTHREAD_ALLOC_COND(base->current_event_cond);
    687 		r = evthread_make_base_notifiable(base);
    688 		if (r<0) {
    689 			event_warnx("%s: Unable to make base notifiable.", __func__);
    690 			event_base_free(base);
    691 			return NULL;
    692 		}
    693 	}
    694 #endif
    695 
    696 #ifdef _WIN32
    697 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
    698 		event_base_start_iocp_(base, cfg->n_cpus_hint);
    699 #endif
    700 
    701 	return (base);
    702 }
    703 
    704 int
    705 event_base_start_iocp_(struct event_base *base, int n_cpus)
    706 {
    707 #ifdef _WIN32
    708 	if (base->iocp)
    709 		return 0;
    710 	base->iocp = event_iocp_port_launch_(n_cpus);
    711 	if (!base->iocp) {
    712 		event_warnx("%s: Couldn't launch IOCP", __func__);
    713 		return -1;
    714 	}
    715 	return 0;
    716 #else
    717 	return -1;
    718 #endif
    719 }
    720 
    721 void
    722 event_base_stop_iocp_(struct event_base *base)
    723 {
    724 #ifdef _WIN32
    725 	int rv;
    726 
    727 	if (!base->iocp)
    728 		return;
    729 	rv = event_iocp_shutdown_(base->iocp, -1);
    730 	EVUTIL_ASSERT(rv >= 0);
    731 	base->iocp = NULL;
    732 #endif
    733 }
    734 
    735 static int
    736 event_base_cancel_single_callback_(struct event_base *base,
    737     struct event_callback *evcb,
    738     int run_finalizers)
    739 {
    740 	int result = 0;
    741 
    742 	if (evcb->evcb_flags & EVLIST_INIT) {
    743 		struct event *ev = event_callback_to_event(evcb);
    744 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
    745 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
    746 			result = 1;
    747 		}
    748 	} else {
    749 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    750 		event_callback_cancel_nolock_(base, evcb, 1);
    751 		EVBASE_RELEASE_LOCK(base, th_base_lock);
    752 		result = 1;
    753 	}
    754 
    755 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
    756 		switch (evcb->evcb_closure) {
    757 		case EV_CLOSURE_EVENT_FINALIZE:
    758 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
    759 			struct event *ev = event_callback_to_event(evcb);
    760 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
    761 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
    762 				mm_free(ev);
    763 			break;
    764 		}
    765 		case EV_CLOSURE_CB_FINALIZE:
    766 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
    767 			break;
    768 		default:
    769 			break;
    770 		}
    771 	}
    772 	return result;
    773 }
    774 
    775 static int event_base_free_queues_(struct event_base *base, int run_finalizers)
    776 {
    777 	int deleted = 0, i;
    778 
    779 	for (i = 0; i < base->nactivequeues; ++i) {
    780 		struct event_callback *evcb, *next;
    781 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
    782 			next = TAILQ_NEXT(evcb, evcb_active_next);
    783 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
    784 			evcb = next;
    785 		}
    786 	}
    787 
    788 	{
    789 		struct event_callback *evcb;
    790 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
    791 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
    792 		}
    793 	}
    794 
    795 	return deleted;
    796 }
    797 
    798 static void
    799 event_base_free_(struct event_base *base, int run_finalizers)
    800 {
    801 	int i, n_deleted=0;
    802 	struct event *ev;
    803 	/* XXXX grab the lock? If there is contention when one thread frees
    804 	 * the base, then the contending thread will be very sad soon. */
    805 
    806 	/* event_base_free(NULL) is how to free the current_base if we
    807 	 * made it with event_init and forgot to hold a reference to it. */
    808 	if (base == NULL && current_base)
    809 		base = current_base;
    810 	/* Don't actually free NULL. */
    811 	if (base == NULL) {
    812 		event_warnx("%s: no base to free", __func__);
    813 		return;
    814 	}
    815 	/* XXX(niels) - check for internal events first */
    816 
    817 #ifdef _WIN32
    818 	event_base_stop_iocp_(base);
    819 #endif
    820 
    821 	/* threading fds if we have them */
    822 	if (base->th_notify_fd[0] != -1) {
    823 		event_del(&base->th_notify);
    824 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
    825 		if (base->th_notify_fd[1] != -1)
    826 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
    827 		base->th_notify_fd[0] = -1;
    828 		base->th_notify_fd[1] = -1;
    829 		event_debug_unassign(&base->th_notify);
    830 	}
    831 
    832 	/* Delete all non-internal events. */
    833 	evmap_delete_all_(base);
    834 
    835 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
    836 		event_del(ev);
    837 		++n_deleted;
    838 	}
    839 	for (i = 0; i < base->n_common_timeouts; ++i) {
    840 		struct common_timeout_list *ctl =
    841 		    base->common_timeout_queues[i];
    842 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
    843 		event_debug_unassign(&ctl->timeout_event);
    844 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
    845 			struct event *next = TAILQ_NEXT(ev,
    846 			    ev_timeout_pos.ev_next_with_common_timeout);
    847 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
    848 				event_del(ev);
    849 				++n_deleted;
    850 			}
    851 			ev = next;
    852 		}
    853 		mm_free(ctl);
    854 	}
    855 	if (base->common_timeout_queues)
    856 		mm_free(base->common_timeout_queues);
    857 
    858 	for (;;) {
    859 		/* For finalizers we can register yet another finalizer out from
    860 		 * finalizer, and iff finalizer will be in active_later_queue we can
    861 		 * add finalizer to activequeues, and we will have events in
    862 		 * activequeues after this function returns, which is not what we want
    863 		 * (we even have an assertion for this).
    864 		 *
    865 		 * A simple case is bufferevent with underlying (i.e. filters).
    866 		 */
    867 		int ii = event_base_free_queues_(base, run_finalizers);
    868 		if (!ii) {
    869 			break;
    870 		}
    871 		n_deleted += ii;
    872 	}
    873 
    874 	if (n_deleted)
    875 		event_debug(("%s: %d events were still set in base",
    876 			__func__, n_deleted));
    877 
    878 	while (LIST_FIRST(&base->once_events)) {
    879 		struct event_once *eonce = LIST_FIRST(&base->once_events);
    880 		LIST_REMOVE(eonce, next_once);
    881 		mm_free(eonce);
    882 	}
    883 
    884 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
    885 		base->evsel->dealloc(base);
    886 
    887 	for (i = 0; i < base->nactivequeues; ++i)
    888 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
    889 
    890 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
    891 	min_heap_dtor_(&base->timeheap);
    892 
    893 	mm_free(base->activequeues);
    894 
    895 	evmap_io_clear_(&base->io);
    896 	evmap_signal_clear_(&base->sigmap);
    897 	event_changelist_freemem_(&base->changelist);
    898 
    899 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
    900 	EVTHREAD_FREE_COND(base->current_event_cond);
    901 
    902 	/* If we're freeing current_base, there won't be a current_base. */
    903 	if (base == current_base)
    904 		current_base = NULL;
    905 	mm_free(base);
    906 }
    907 
    908 void
    909 event_base_free_nofinalize(struct event_base *base)
    910 {
    911 	event_base_free_(base, 0);
    912 }
    913 
    914 void
    915 event_base_free(struct event_base *base)
    916 {
    917 	event_base_free_(base, 1);
    918 }
    919 
    920 /* Fake eventop; used to disable the backend temporarily inside event_reinit
    921  * so that we can call event_del() on an event without telling the backend.
    922  */
    923 static int
    924 nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
    925     short events, void *fdinfo)
    926 {
    927 	return 0;
    928 }
    929 const struct eventop nil_eventop = {
    930 	"nil",
    931 	NULL, /* init: unused. */
    932 	NULL, /* add: unused. */
    933 	nil_backend_del, /* del: used, so needs to be killed. */
    934 	NULL, /* dispatch: unused. */
    935 	NULL, /* dealloc: unused. */
    936 	0, 0, 0
    937 };
    938 
    939 /* reinitialize the event base after a fork */
    940 int
    941 event_reinit(struct event_base *base)
    942 {
    943 	const struct eventop *evsel;
    944 	int res = 0;
    945 	int was_notifiable = 0;
    946 	int had_signal_added = 0;
    947 
    948 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    949 
    950 	evsel = base->evsel;
    951 
    952 	/* check if this event mechanism requires reinit on the backend */
    953 	if (evsel->need_reinit) {
    954 		/* We're going to call event_del() on our notify events (the
    955 		 * ones that tell about signals and wakeup events).  But we
    956 		 * don't actually want to tell the backend to change its
    957 		 * state, since it might still share some resource (a kqueue,
    958 		 * an epoll fd) with the parent process, and we don't want to
    959 		 * delete the fds from _that_ backend, we temporarily stub out
    960 		 * the evsel with a replacement.
    961 		 */
    962 		base->evsel = &nil_eventop;
    963 	}
    964 
    965 	/* We need to re-create a new signal-notification fd and a new
    966 	 * thread-notification fd.  Otherwise, we'll still share those with
    967 	 * the parent process, which would make any notification sent to them
    968 	 * get received by one or both of the event loops, more or less at
    969 	 * random.
    970 	 */
    971 	if (base->sig.ev_signal_added) {
    972 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
    973 		event_debug_unassign(&base->sig.ev_signal);
    974 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
    975 		had_signal_added = 1;
    976 		base->sig.ev_signal_added = 0;
    977 	}
    978 	if (base->sig.ev_signal_pair[0] != -1)
    979 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
    980 	if (base->sig.ev_signal_pair[1] != -1)
    981 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
    982 	if (base->th_notify_fn != NULL) {
    983 		was_notifiable = 1;
    984 		base->th_notify_fn = NULL;
    985 	}
    986 	if (base->th_notify_fd[0] != -1) {
    987 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
    988 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
    989 		if (base->th_notify_fd[1] != -1)
    990 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
    991 		base->th_notify_fd[0] = -1;
    992 		base->th_notify_fd[1] = -1;
    993 		event_debug_unassign(&base->th_notify);
    994 	}
    995 
    996 	/* Replace the original evsel. */
    997         base->evsel = evsel;
    998 
    999 	if (evsel->need_reinit) {
   1000 		/* Reconstruct the backend through brute-force, so that we do
   1001 		 * not share any structures with the parent process. For some
   1002 		 * backends, this is necessary: epoll and kqueue, for
   1003 		 * instance, have events associated with a kernel
   1004 		 * structure. If didn't reinitialize, we'd share that
   1005 		 * structure with the parent process, and any changes made by
   1006 		 * the parent would affect our backend's behavior (and vice
   1007 		 * versa).
   1008 		 */
   1009 		if (base->evsel->dealloc != NULL)
   1010 			base->evsel->dealloc(base);
   1011 		base->evbase = evsel->init(base);
   1012 		if (base->evbase == NULL) {
   1013 			event_errx(1,
   1014 			   "%s: could not reinitialize event mechanism",
   1015 			   __func__);
   1016 			res = -1;
   1017 			goto done;
   1018 		}
   1019 
   1020 		/* Empty out the changelist (if any): we are starting from a
   1021 		 * blank slate. */
   1022 		event_changelist_freemem_(&base->changelist);
   1023 
   1024 		/* Tell the event maps to re-inform the backend about all
   1025 		 * pending events. This will make the signal notification
   1026 		 * event get re-created if necessary. */
   1027 		if (evmap_reinit_(base) < 0)
   1028 			res = -1;
   1029 	} else {
   1030 		res = evsig_init_(base);
   1031 		if (res == 0 && had_signal_added) {
   1032 			res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
   1033 			if (res == 0)
   1034 				base->sig.ev_signal_added = 1;
   1035 		}
   1036 	}
   1037 
   1038 	/* If we were notifiable before, and nothing just exploded, become
   1039 	 * notifiable again. */
   1040 	if (was_notifiable && res == 0)
   1041 		res = evthread_make_base_notifiable_nolock_(base);
   1042 
   1043 done:
   1044 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1045 	return (res);
   1046 }
   1047 
   1048 /* Get the monotonic time for this event_base' timer */
   1049 int
   1050 event_gettime_monotonic(struct event_base *base, struct timeval *tv)
   1051 {
   1052   int rv = -1;
   1053 
   1054   if (base && tv) {
   1055     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1056     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
   1057     EVBASE_RELEASE_LOCK(base, th_base_lock);
   1058   }
   1059 
   1060   return rv;
   1061 }
   1062 
   1063 const char **
   1064 event_get_supported_methods(void)
   1065 {
   1066 	static const char **methods = NULL;
   1067 	const struct eventop **method;
   1068 	const char **tmp;
   1069 	int i = 0, k;
   1070 
   1071 	/* count all methods */
   1072 	for (method = &eventops[0]; *method != NULL; ++method) {
   1073 		++i;
   1074 	}
   1075 
   1076 	/* allocate one more than we need for the NULL pointer */
   1077 	tmp = mm_calloc((i + 1), sizeof(char *));
   1078 	if (tmp == NULL)
   1079 		return (NULL);
   1080 
   1081 	/* populate the array with the supported methods */
   1082 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
   1083 		tmp[i++] = eventops[k]->name;
   1084 	}
   1085 	tmp[i] = NULL;
   1086 
   1087 	if (methods != NULL)
   1088 		mm_free(__UNCONST(methods));
   1089 
   1090 	methods = tmp;
   1091 
   1092 	return (methods);
   1093 }
   1094 
   1095 struct event_config *
   1096 event_config_new(void)
   1097 {
   1098 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
   1099 
   1100 	if (cfg == NULL)
   1101 		return (NULL);
   1102 
   1103 	TAILQ_INIT(&cfg->entries);
   1104 	cfg->max_dispatch_interval.tv_sec = -1;
   1105 	cfg->max_dispatch_callbacks = INT_MAX;
   1106 	cfg->limit_callbacks_after_prio = 1;
   1107 
   1108 	return (cfg);
   1109 }
   1110 
   1111 static void
   1112 event_config_entry_free(struct event_config_entry *entry)
   1113 {
   1114 	if (entry->avoid_method != NULL)
   1115 		mm_free(__UNCONST(entry->avoid_method));
   1116 	mm_free(entry);
   1117 }
   1118 
   1119 void
   1120 event_config_free(struct event_config *cfg)
   1121 {
   1122 	struct event_config_entry *entry;
   1123 
   1124 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
   1125 		TAILQ_REMOVE(&cfg->entries, entry, next);
   1126 		event_config_entry_free(entry);
   1127 	}
   1128 	mm_free(cfg);
   1129 }
   1130 
   1131 int
   1132 event_config_set_flag(struct event_config *cfg, int flag)
   1133 {
   1134 	if (!cfg)
   1135 		return -1;
   1136 	cfg->flags |= flag;
   1137 	return 0;
   1138 }
   1139 
   1140 int
   1141 event_config_avoid_method(struct event_config *cfg, const char *method)
   1142 {
   1143 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
   1144 	if (entry == NULL)
   1145 		return (-1);
   1146 
   1147 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
   1148 		mm_free(entry);
   1149 		return (-1);
   1150 	}
   1151 
   1152 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
   1153 
   1154 	return (0);
   1155 }
   1156 
   1157 int
   1158 event_config_require_features(struct event_config *cfg,
   1159     int features)
   1160 {
   1161 	if (!cfg)
   1162 		return (-1);
   1163 	cfg->require_features = features;
   1164 	return (0);
   1165 }
   1166 
   1167 int
   1168 event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
   1169 {
   1170 	if (!cfg)
   1171 		return (-1);
   1172 	cfg->n_cpus_hint = cpus;
   1173 	return (0);
   1174 }
   1175 
   1176 int
   1177 event_config_set_max_dispatch_interval(struct event_config *cfg,
   1178     const struct timeval *max_interval, int max_callbacks, int min_priority)
   1179 {
   1180 	if (max_interval)
   1181 		memcpy(&cfg->max_dispatch_interval, max_interval,
   1182 		    sizeof(struct timeval));
   1183 	else
   1184 		cfg->max_dispatch_interval.tv_sec = -1;
   1185 	cfg->max_dispatch_callbacks =
   1186 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
   1187 	if (min_priority < 0)
   1188 		min_priority = 0;
   1189 	cfg->limit_callbacks_after_prio = min_priority;
   1190 	return (0);
   1191 }
   1192 
   1193 int
   1194 event_priority_init(int npriorities)
   1195 {
   1196 	return event_base_priority_init(current_base, npriorities);
   1197 }
   1198 
   1199 int
   1200 event_base_priority_init(struct event_base *base, int npriorities)
   1201 {
   1202 	int i, r;
   1203 	r = -1;
   1204 
   1205 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1206 
   1207 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
   1208 	    || npriorities >= EVENT_MAX_PRIORITIES)
   1209 		goto err;
   1210 
   1211 	if (npriorities == base->nactivequeues)
   1212 		goto ok;
   1213 
   1214 	if (base->nactivequeues) {
   1215 		mm_free(base->activequeues);
   1216 		base->nactivequeues = 0;
   1217 	}
   1218 
   1219 	/* Allocate our priority queues */
   1220 	base->activequeues = (struct evcallback_list *)
   1221 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
   1222 	if (base->activequeues == NULL) {
   1223 		event_warn("%s: calloc", __func__);
   1224 		goto err;
   1225 	}
   1226 	base->nactivequeues = npriorities;
   1227 
   1228 	for (i = 0; i < base->nactivequeues; ++i) {
   1229 		TAILQ_INIT(&base->activequeues[i]);
   1230 	}
   1231 
   1232 ok:
   1233 	r = 0;
   1234 err:
   1235 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1236 	return (r);
   1237 }
   1238 
   1239 int
   1240 event_base_get_npriorities(struct event_base *base)
   1241 {
   1242 
   1243 	int n;
   1244 	if (base == NULL)
   1245 		base = current_base;
   1246 
   1247 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1248 	n = base->nactivequeues;
   1249 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1250 	return (n);
   1251 }
   1252 
   1253 int
   1254 event_base_get_num_events(struct event_base *base, unsigned int type)
   1255 {
   1256 	int r = 0;
   1257 
   1258 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1259 
   1260 	if (type & EVENT_BASE_COUNT_ACTIVE)
   1261 		r += base->event_count_active;
   1262 
   1263 	if (type & EVENT_BASE_COUNT_VIRTUAL)
   1264 		r += base->virtual_event_count;
   1265 
   1266 	if (type & EVENT_BASE_COUNT_ADDED)
   1267 		r += base->event_count;
   1268 
   1269 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1270 
   1271 	return r;
   1272 }
   1273 
   1274 int
   1275 event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
   1276 {
   1277 	int r = 0;
   1278 
   1279 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1280 
   1281 	if (type & EVENT_BASE_COUNT_ACTIVE) {
   1282 		r += base->event_count_active_max;
   1283 		if (clear)
   1284 			base->event_count_active_max = 0;
   1285 	}
   1286 
   1287 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
   1288 		r += base->virtual_event_count_max;
   1289 		if (clear)
   1290 			base->virtual_event_count_max = 0;
   1291 	}
   1292 
   1293 	if (type & EVENT_BASE_COUNT_ADDED) {
   1294 		r += base->event_count_max;
   1295 		if (clear)
   1296 			base->event_count_max = 0;
   1297 	}
   1298 
   1299 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1300 
   1301 	return r;
   1302 }
   1303 
   1304 /* Returns true iff we're currently watching any events. */
   1305 static int
   1306 event_haveevents(struct event_base *base)
   1307 {
   1308 	/* Caller must hold th_base_lock */
   1309 	return (base->virtual_event_count > 0 || base->event_count > 0);
   1310 }
   1311 
   1312 /* "closure" function called when processing active signal events */
   1313 static inline void
   1314 event_signal_closure(struct event_base *base, struct event *ev)
   1315 {
   1316 	short ncalls;
   1317 	int should_break;
   1318 
   1319 	/* Allows deletes to work */
   1320 	ncalls = ev->ev_ncalls;
   1321 	if (ncalls != 0)
   1322 		ev->ev_pncalls = &ncalls;
   1323 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1324 	while (ncalls) {
   1325 		ncalls--;
   1326 		ev->ev_ncalls = ncalls;
   1327 		if (ncalls == 0)
   1328 			ev->ev_pncalls = NULL;
   1329 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
   1330 
   1331 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1332 		should_break = base->event_break;
   1333 		EVBASE_RELEASE_LOCK(base, th_base_lock);
   1334 
   1335 		if (should_break) {
   1336 			if (ncalls != 0)
   1337 				ev->ev_pncalls = NULL;
   1338 			return;
   1339 		}
   1340 	}
   1341 }
   1342 
   1343 /* Common timeouts are special timeouts that are handled as queues rather than
   1344  * in the minheap.  This is more efficient than the minheap if we happen to
   1345  * know that we're going to get several thousands of timeout events all with
   1346  * the same timeout value.
   1347  *
   1348  * Since all our timeout handling code assumes timevals can be copied,
   1349  * assigned, etc, we can't use "magic pointer" to encode these common
   1350  * timeouts.  Searching through a list to see if every timeout is common could
   1351  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
   1352  * is 32 bits long, but only uses 20 of those bits (since it can never be over
   1353  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
   1354  * of index into the event_base's aray of common timeouts.
   1355  */
   1356 
   1357 #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
   1358 #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
   1359 #define COMMON_TIMEOUT_IDX_SHIFT 20
   1360 #define COMMON_TIMEOUT_MASK     0xf0000000
   1361 #define COMMON_TIMEOUT_MAGIC    0x50000000
   1362 
   1363 #define COMMON_TIMEOUT_IDX(tv) \
   1364 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
   1365 
   1366 /** Return true iff if 'tv' is a common timeout in 'base' */
   1367 static inline int
   1368 is_common_timeout(const struct timeval *tv,
   1369     const struct event_base *base)
   1370 {
   1371 	int idx;
   1372 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
   1373 		return 0;
   1374 	idx = COMMON_TIMEOUT_IDX(tv);
   1375 	return idx < base->n_common_timeouts;
   1376 }
   1377 
   1378 /* True iff tv1 and tv2 have the same common-timeout index, or if neither
   1379  * one is a common timeout. */
   1380 static inline int
   1381 is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
   1382 {
   1383 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
   1384 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
   1385 }
   1386 
   1387 /** Requires that 'tv' is a common timeout.  Return the corresponding
   1388  * common_timeout_list. */
   1389 static inline struct common_timeout_list *
   1390 get_common_timeout_list(struct event_base *base, const struct timeval *tv)
   1391 {
   1392 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
   1393 }
   1394 
   1395 #if 0
   1396 static inline int
   1397 common_timeout_ok(const struct timeval *tv,
   1398     struct event_base *base)
   1399 {
   1400 	const struct timeval *expect =
   1401 	    &get_common_timeout_list(base, tv)->duration;
   1402 	return tv->tv_sec == expect->tv_sec &&
   1403 	    tv->tv_usec == expect->tv_usec;
   1404 }
   1405 #endif
   1406 
   1407 /* Add the timeout for the first event in given common timeout list to the
   1408  * event_base's minheap. */
   1409 static void
   1410 common_timeout_schedule(struct common_timeout_list *ctl,
   1411     const struct timeval *now, struct event *head)
   1412 {
   1413 	struct timeval timeout = head->ev_timeout;
   1414 	timeout.tv_usec &= MICROSECONDS_MASK;
   1415 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
   1416 }
   1417 
   1418 /* Callback: invoked when the timeout for a common timeout queue triggers.
   1419  * This means that (at least) the first event in that queue should be run,
   1420  * and the timeout should be rescheduled if there are more events. */
   1421 static void
   1422 common_timeout_callback(evutil_socket_t fd, short what, void *arg)
   1423 {
   1424 	struct timeval now;
   1425 	struct common_timeout_list *ctl = arg;
   1426 	struct event_base *base = ctl->base;
   1427 	struct event *ev = NULL;
   1428 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1429 	gettime(base, &now);
   1430 	while (1) {
   1431 		ev = TAILQ_FIRST(&ctl->events);
   1432 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
   1433 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
   1434 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
   1435 			break;
   1436 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   1437 		event_active_nolock_(ev, EV_TIMEOUT, 1);
   1438 	}
   1439 	if (ev)
   1440 		common_timeout_schedule(ctl, &now, ev);
   1441 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1442 }
   1443 
   1444 #define MAX_COMMON_TIMEOUTS 256
   1445 
   1446 const struct timeval *
   1447 event_base_init_common_timeout(struct event_base *base,
   1448     const struct timeval *duration)
   1449 {
   1450 	int i;
   1451 	struct timeval tv;
   1452 	const struct timeval *result=NULL;
   1453 	struct common_timeout_list *new_ctl;
   1454 
   1455 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1456 	if (duration->tv_usec > 1000000) {
   1457 		memcpy(&tv, duration, sizeof(struct timeval));
   1458 		if (is_common_timeout(duration, base))
   1459 			tv.tv_usec &= MICROSECONDS_MASK;
   1460 		tv.tv_sec += tv.tv_usec / 1000000;
   1461 		tv.tv_usec %= 1000000;
   1462 		duration = &tv;
   1463 	}
   1464 	for (i = 0; i < base->n_common_timeouts; ++i) {
   1465 		const struct common_timeout_list *ctl =
   1466 		    base->common_timeout_queues[i];
   1467 		if (duration->tv_sec == ctl->duration.tv_sec &&
   1468 		    duration->tv_usec ==
   1469 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
   1470 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
   1471 			result = &ctl->duration;
   1472 			goto done;
   1473 		}
   1474 	}
   1475 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
   1476 		event_warnx("%s: Too many common timeouts already in use; "
   1477 		    "we only support %d per event_base", __func__,
   1478 		    MAX_COMMON_TIMEOUTS);
   1479 		goto done;
   1480 	}
   1481 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
   1482 		int n = base->n_common_timeouts < 16 ? 16 :
   1483 		    base->n_common_timeouts*2;
   1484 		struct common_timeout_list **newqueues =
   1485 		    mm_realloc(base->common_timeout_queues,
   1486 			n*sizeof(struct common_timeout_queue *));
   1487 		if (!newqueues) {
   1488 			event_warn("%s: realloc",__func__);
   1489 			goto done;
   1490 		}
   1491 		base->n_common_timeouts_allocated = n;
   1492 		base->common_timeout_queues = newqueues;
   1493 	}
   1494 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
   1495 	if (!new_ctl) {
   1496 		event_warn("%s: calloc",__func__);
   1497 		goto done;
   1498 	}
   1499 	TAILQ_INIT(&new_ctl->events);
   1500 	new_ctl->duration.tv_sec = duration->tv_sec;
   1501 	new_ctl->duration.tv_usec =
   1502 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
   1503 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
   1504 	evtimer_assign(&new_ctl->timeout_event, base,
   1505 	    common_timeout_callback, new_ctl);
   1506 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
   1507 	event_priority_set(&new_ctl->timeout_event, 0);
   1508 	new_ctl->base = base;
   1509 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
   1510 	result = &new_ctl->duration;
   1511 
   1512 done:
   1513 	if (result)
   1514 		EVUTIL_ASSERT(is_common_timeout(result, base));
   1515 
   1516 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1517 	return result;
   1518 }
   1519 
   1520 /* Closure function invoked when we're activating a persistent event. */
   1521 static inline void
   1522 event_persist_closure(struct event_base *base, struct event *ev)
   1523 {
   1524 	void (*evcb_callback)(evutil_socket_t, short, void *);
   1525 
   1526         // Other fields of *ev that must be stored before executing
   1527         evutil_socket_t evcb_fd;
   1528         short evcb_res;
   1529         void *evcb_arg;
   1530 
   1531 	/* reschedule the persistent event if we have a timeout. */
   1532 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
   1533 		/* If there was a timeout, we want it to run at an interval of
   1534 		 * ev_io_timeout after the last time it was _scheduled_ for,
   1535 		 * not ev_io_timeout after _now_.  If it fired for another
   1536 		 * reason, though, the timeout ought to start ticking _now_. */
   1537 		struct timeval run_at, relative_to, delay, now;
   1538 		ev_uint32_t usec_mask = 0;
   1539 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
   1540 			&ev->ev_io_timeout));
   1541 		gettime(base, &now);
   1542 		if (is_common_timeout(&ev->ev_timeout, base)) {
   1543 			delay = ev->ev_io_timeout;
   1544 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
   1545 			delay.tv_usec &= MICROSECONDS_MASK;
   1546 			if (ev->ev_res & EV_TIMEOUT) {
   1547 				relative_to = ev->ev_timeout;
   1548 				relative_to.tv_usec &= MICROSECONDS_MASK;
   1549 			} else {
   1550 				relative_to = now;
   1551 			}
   1552 		} else {
   1553 			delay = ev->ev_io_timeout;
   1554 			if (ev->ev_res & EV_TIMEOUT) {
   1555 				relative_to = ev->ev_timeout;
   1556 			} else {
   1557 				relative_to = now;
   1558 			}
   1559 		}
   1560 		evutil_timeradd(&relative_to, &delay, &run_at);
   1561 		if (evutil_timercmp(&run_at, &now, <)) {
   1562 			/* Looks like we missed at least one invocation due to
   1563 			 * a clock jump, not running the event loop for a
   1564 			 * while, really slow callbacks, or
   1565 			 * something. Reschedule relative to now.
   1566 			 */
   1567 			evutil_timeradd(&now, &delay, &run_at);
   1568 		}
   1569 		run_at.tv_usec |= usec_mask;
   1570 		event_add_nolock_(ev, &run_at, 1);
   1571 	}
   1572 
   1573 	// Save our callback before we release the lock
   1574 	evcb_callback = ev->ev_callback;
   1575         evcb_fd = ev->ev_fd;
   1576         evcb_res = ev->ev_res;
   1577         evcb_arg = ev->ev_arg;
   1578 
   1579 	// Release the lock
   1580  	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1581 
   1582 	// Execute the callback
   1583         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
   1584 }
   1585 
   1586 /*
   1587   Helper for event_process_active to process all the events in a single queue,
   1588   releasing the lock as we go.  This function requires that the lock be held
   1589   when it's invoked.  Returns -1 if we get a signal or an event_break that
   1590   means we should stop processing any active events now.  Otherwise returns
   1591   the number of non-internal event_callbacks that we processed.
   1592 */
   1593 static int
   1594 event_process_active_single_queue(struct event_base *base,
   1595     struct evcallback_list *activeq,
   1596     int max_to_process, const struct timeval *endtime)
   1597 {
   1598 	struct event_callback *evcb;
   1599 	int count = 0;
   1600 
   1601 	EVUTIL_ASSERT(activeq != NULL);
   1602 
   1603 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
   1604 		struct event *ev=NULL;
   1605 		if (evcb->evcb_flags & EVLIST_INIT) {
   1606 			ev = event_callback_to_event(evcb);
   1607 
   1608 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
   1609 				event_queue_remove_active(base, evcb);
   1610 			else
   1611 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   1612 			event_debug((
   1613 			    "event_process_active: event: %p, %s%s%scall %p",
   1614 			    ev,
   1615 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
   1616 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
   1617 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
   1618 			    ev->ev_callback));
   1619 		} else {
   1620 			event_queue_remove_active(base, evcb);
   1621 			event_debug(("event_process_active: event_callback %p, "
   1622 				"closure %d, call %p",
   1623 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
   1624 		}
   1625 
   1626 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
   1627 			++count;
   1628 
   1629 
   1630 		base->current_event = evcb;
   1631 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   1632 		base->current_event_waiters = 0;
   1633 #endif
   1634 
   1635 		switch (evcb->evcb_closure) {
   1636 		case EV_CLOSURE_EVENT_SIGNAL:
   1637 			EVUTIL_ASSERT(ev != NULL);
   1638 			event_signal_closure(base, ev);
   1639 			break;
   1640 		case EV_CLOSURE_EVENT_PERSIST:
   1641 			EVUTIL_ASSERT(ev != NULL);
   1642 			event_persist_closure(base, ev);
   1643 			break;
   1644 		case EV_CLOSURE_EVENT: {
   1645 			void (*evcb_callback)(evutil_socket_t, short, void *);
   1646 			EVUTIL_ASSERT(ev != NULL);
   1647 			evcb_callback = *ev->ev_callback;
   1648 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1649 			evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg);
   1650 		}
   1651 		break;
   1652 		case EV_CLOSURE_CB_SELF: {
   1653 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
   1654 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1655 			evcb_selfcb(evcb, evcb->evcb_arg);
   1656 		}
   1657 		break;
   1658 		case EV_CLOSURE_EVENT_FINALIZE:
   1659 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
   1660 			void (*evcb_evfinalize)(struct event *, void *);
   1661 			int evcb_closure = evcb->evcb_closure;
   1662 			EVUTIL_ASSERT(ev != NULL);
   1663 			base->current_event = NULL;
   1664 			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
   1665 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
   1666 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1667 			evcb_evfinalize(ev, ev->ev_arg);
   1668 			event_debug_note_teardown_(ev);
   1669 			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
   1670 				mm_free(ev);
   1671 		}
   1672 		break;
   1673 		case EV_CLOSURE_CB_FINALIZE: {
   1674 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
   1675 			base->current_event = NULL;
   1676 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
   1677 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1678 			evcb_cbfinalize(evcb, evcb->evcb_arg);
   1679 		}
   1680 		break;
   1681 		default:
   1682 			EVUTIL_ASSERT(0);
   1683 		}
   1684 
   1685 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1686 		base->current_event = NULL;
   1687 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   1688 		if (base->current_event_waiters) {
   1689 			base->current_event_waiters = 0;
   1690 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
   1691 		}
   1692 #endif
   1693 
   1694 		if (base->event_break)
   1695 			return -1;
   1696 		if (count >= max_to_process)
   1697 			return count;
   1698 		if (count && endtime) {
   1699 			struct timeval now;
   1700 			update_time_cache(base);
   1701 			gettime(base, &now);
   1702 			if (evutil_timercmp(&now, endtime, >=))
   1703 				return count;
   1704 		}
   1705 		if (base->event_continue)
   1706 			break;
   1707 	}
   1708 	return count;
   1709 }
   1710 
   1711 /*
   1712  * Active events are stored in priority queues.  Lower priorities are always
   1713  * process before higher priorities.  Low priority events can starve high
   1714  * priority ones.
   1715  */
   1716 
   1717 static int
   1718 event_process_active(struct event_base *base)
   1719 {
   1720 	/* Caller must hold th_base_lock */
   1721 	struct evcallback_list *activeq = NULL;
   1722 	int i, c = 0;
   1723 	const struct timeval *endtime;
   1724 	struct timeval tv;
   1725 	const int maxcb = base->max_dispatch_callbacks;
   1726 	const int limit_after_prio = base->limit_callbacks_after_prio;
   1727 	if (base->max_dispatch_time.tv_sec >= 0) {
   1728 		update_time_cache(base);
   1729 		gettime(base, &tv);
   1730 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
   1731 		endtime = &tv;
   1732 	} else {
   1733 		endtime = NULL;
   1734 	}
   1735 
   1736 	for (i = 0; i < base->nactivequeues; ++i) {
   1737 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
   1738 			base->event_running_priority = i;
   1739 			activeq = &base->activequeues[i];
   1740 			if (i < limit_after_prio)
   1741 				c = event_process_active_single_queue(base, activeq,
   1742 				    INT_MAX, NULL);
   1743 			else
   1744 				c = event_process_active_single_queue(base, activeq,
   1745 				    maxcb, endtime);
   1746 			if (c < 0) {
   1747 				goto done;
   1748 			} else if (c > 0)
   1749 				break; /* Processed a real event; do not
   1750 					* consider lower-priority events */
   1751 			/* If we get here, all of the events we processed
   1752 			 * were internal.  Continue. */
   1753 		}
   1754 	}
   1755 
   1756 done:
   1757 	base->event_running_priority = -1;
   1758 
   1759 	return c;
   1760 }
   1761 
   1762 /*
   1763  * Wait continuously for events.  We exit only if no events are left.
   1764  */
   1765 
   1766 int
   1767 event_dispatch(void)
   1768 {
   1769 	return (event_loop(0));
   1770 }
   1771 
   1772 int
   1773 event_base_dispatch(struct event_base *event_base)
   1774 {
   1775 	return (event_base_loop(event_base, 0));
   1776 }
   1777 
   1778 const char *
   1779 event_base_get_method(const struct event_base *base)
   1780 {
   1781 	EVUTIL_ASSERT(base);
   1782 	return (base->evsel->name);
   1783 }
   1784 
   1785 /** Callback: used to implement event_base_loopexit by telling the event_base
   1786  * that it's time to exit its loop. */
   1787 static void
   1788 event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
   1789 {
   1790 	struct event_base *base = arg;
   1791 	base->event_gotterm = 1;
   1792 }
   1793 
   1794 int
   1795 event_loopexit(const struct timeval *tv)
   1796 {
   1797 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
   1798 		    current_base, tv));
   1799 }
   1800 
   1801 int
   1802 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
   1803 {
   1804 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
   1805 		    event_base, tv));
   1806 }
   1807 
   1808 int
   1809 event_loopbreak(void)
   1810 {
   1811 	return (event_base_loopbreak(current_base));
   1812 }
   1813 
   1814 int
   1815 event_base_loopbreak(struct event_base *event_base)
   1816 {
   1817 	int r = 0;
   1818 	if (event_base == NULL)
   1819 		return (-1);
   1820 
   1821 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1822 	event_base->event_break = 1;
   1823 
   1824 	if (EVBASE_NEED_NOTIFY(event_base)) {
   1825 		r = evthread_notify_base(event_base);
   1826 	} else {
   1827 		r = (0);
   1828 	}
   1829 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1830 	return r;
   1831 }
   1832 
   1833 int
   1834 event_base_loopcontinue(struct event_base *event_base)
   1835 {
   1836 	int r = 0;
   1837 	if (event_base == NULL)
   1838 		return (-1);
   1839 
   1840 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1841 	event_base->event_continue = 1;
   1842 
   1843 	if (EVBASE_NEED_NOTIFY(event_base)) {
   1844 		r = evthread_notify_base(event_base);
   1845 	} else {
   1846 		r = (0);
   1847 	}
   1848 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1849 	return r;
   1850 }
   1851 
   1852 int
   1853 event_base_got_break(struct event_base *event_base)
   1854 {
   1855 	int res;
   1856 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1857 	res = event_base->event_break;
   1858 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1859 	return res;
   1860 }
   1861 
   1862 int
   1863 event_base_got_exit(struct event_base *event_base)
   1864 {
   1865 	int res;
   1866 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1867 	res = event_base->event_gotterm;
   1868 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1869 	return res;
   1870 }
   1871 
   1872 /* not thread safe */
   1873 
   1874 int
   1875 event_loop(int flags)
   1876 {
   1877 	return event_base_loop(current_base, flags);
   1878 }
   1879 
   1880 int
   1881 event_base_loop(struct event_base *base, int flags)
   1882 {
   1883 	const struct eventop *evsel = base->evsel;
   1884 	struct timeval tv;
   1885 	struct timeval *tv_p;
   1886 	int res, done, retval = 0;
   1887 
   1888 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
   1889 	 * as we invoke user callbacks. */
   1890 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1891 
   1892 	if (base->running_loop) {
   1893 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
   1894 		    " can run on each event_base at once.", __func__);
   1895 		EVBASE_RELEASE_LOCK(base, th_base_lock);
   1896 		return -1;
   1897 	}
   1898 
   1899 	base->running_loop = 1;
   1900 
   1901 	clear_time_cache(base);
   1902 
   1903 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
   1904 		evsig_set_base_(base);
   1905 
   1906 	done = 0;
   1907 
   1908 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   1909 	base->th_owner_id = EVTHREAD_GET_ID();
   1910 #endif
   1911 
   1912 	base->event_gotterm = base->event_break = 0;
   1913 
   1914 	while (!done) {
   1915 		base->event_continue = 0;
   1916 		base->n_deferreds_queued = 0;
   1917 
   1918 		/* Terminate the loop if we have been asked to */
   1919 		if (base->event_gotterm) {
   1920 			break;
   1921 		}
   1922 
   1923 		if (base->event_break) {
   1924 			break;
   1925 		}
   1926 
   1927 		tv_p = &tv;
   1928 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
   1929 			timeout_next(base, &tv_p);
   1930 		} else {
   1931 			/*
   1932 			 * if we have active events, we just poll new events
   1933 			 * without waiting.
   1934 			 */
   1935 			evutil_timerclear(&tv);
   1936 		}
   1937 
   1938 		/* If we have no events, we just exit */
   1939 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
   1940 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
   1941 			event_debug(("%s: no events registered.", __func__));
   1942 			retval = 1;
   1943 			goto done;
   1944 		}
   1945 
   1946 		event_queue_make_later_events_active(base);
   1947 
   1948 		clear_time_cache(base);
   1949 
   1950 		res = evsel->dispatch(base, tv_p);
   1951 
   1952 		if (res == -1) {
   1953 			event_debug(("%s: dispatch returned unsuccessfully.",
   1954 				__func__));
   1955 			retval = -1;
   1956 			goto done;
   1957 		}
   1958 
   1959 		update_time_cache(base);
   1960 
   1961 		timeout_process(base);
   1962 
   1963 		if (N_ACTIVE_CALLBACKS(base)) {
   1964 			int n = event_process_active(base);
   1965 			if ((flags & EVLOOP_ONCE)
   1966 			    && N_ACTIVE_CALLBACKS(base) == 0
   1967 			    && n != 0)
   1968 				done = 1;
   1969 		} else if (flags & EVLOOP_NONBLOCK)
   1970 			done = 1;
   1971 	}
   1972 	event_debug(("%s: asked to terminate loop.", __func__));
   1973 
   1974 done:
   1975 	clear_time_cache(base);
   1976 	base->running_loop = 0;
   1977 
   1978 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1979 
   1980 	return (retval);
   1981 }
   1982 
   1983 /* One-time callback to implement event_base_once: invokes the user callback,
   1984  * then deletes the allocated storage */
   1985 static void
   1986 event_once_cb(evutil_socket_t fd, short events, void *arg)
   1987 {
   1988 	struct event_once *eonce = arg;
   1989 
   1990 	(*eonce->cb)(fd, events, eonce->arg);
   1991 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
   1992 	LIST_REMOVE(eonce, next_once);
   1993 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
   1994 	event_debug_unassign(&eonce->ev);
   1995 	mm_free(eonce);
   1996 }
   1997 
   1998 /* not threadsafe, event scheduled once. */
   1999 int
   2000 event_once(evutil_socket_t fd, short events,
   2001     void (*callback)(evutil_socket_t, short, void *),
   2002     void *arg, const struct timeval *tv)
   2003 {
   2004 	return event_base_once(current_base, fd, events, callback, arg, tv);
   2005 }
   2006 
   2007 /* Schedules an event once */
   2008 int
   2009 event_base_once(struct event_base *base, evutil_socket_t fd, short events,
   2010     void (*callback)(evutil_socket_t, short, void *),
   2011     void *arg, const struct timeval *tv)
   2012 {
   2013 	struct event_once *eonce;
   2014 	int res = 0;
   2015 	int activate = 0;
   2016 
   2017 	/* We cannot support signals that just fire once, or persistent
   2018 	 * events. */
   2019 	if (events & (EV_SIGNAL|EV_PERSIST))
   2020 		return (-1);
   2021 
   2022 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
   2023 		return (-1);
   2024 
   2025 	eonce->cb = callback;
   2026 	eonce->arg = arg;
   2027 
   2028 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
   2029 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
   2030 
   2031 		if (tv == NULL || ! evutil_timerisset(tv)) {
   2032 			/* If the event is going to become active immediately,
   2033 			 * don't put it on the timeout queue.  This is one
   2034 			 * idiom for scheduling a callback, so let's make
   2035 			 * it fast (and order-preserving). */
   2036 			activate = 1;
   2037 		}
   2038 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
   2039 		events &= EV_READ|EV_WRITE|EV_CLOSED;
   2040 
   2041 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
   2042 	} else {
   2043 		/* Bad event combination */
   2044 		mm_free(eonce);
   2045 		return (-1);
   2046 	}
   2047 
   2048 	if (res == 0) {
   2049 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2050 		if (activate)
   2051 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
   2052 		else
   2053 			res = event_add_nolock_(&eonce->ev, tv, 0);
   2054 
   2055 		if (res != 0) {
   2056 			mm_free(eonce);
   2057 			return (res);
   2058 		} else {
   2059 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
   2060 		}
   2061 		EVBASE_RELEASE_LOCK(base, th_base_lock);
   2062 	}
   2063 
   2064 	return (0);
   2065 }
   2066 
   2067 int
   2068 event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
   2069 {
   2070 	if (!base)
   2071 		base = current_base;
   2072 	if (arg == &event_self_cbarg_ptr_)
   2073 		arg = ev;
   2074 
   2075 	event_debug_assert_not_added_(ev);
   2076 
   2077 	ev->ev_base = base;
   2078 
   2079 	ev->ev_callback = callback;
   2080 	ev->ev_arg = arg;
   2081 	ev->ev_fd = fd;
   2082 	ev->ev_events = events;
   2083 	ev->ev_res = 0;
   2084 	ev->ev_flags = EVLIST_INIT;
   2085 	ev->ev_ncalls = 0;
   2086 	ev->ev_pncalls = NULL;
   2087 
   2088 	if (events & EV_SIGNAL) {
   2089 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
   2090 			event_warnx("%s: EV_SIGNAL is not compatible with "
   2091 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
   2092 			return -1;
   2093 		}
   2094 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
   2095 	} else {
   2096 		if (events & EV_PERSIST) {
   2097 			evutil_timerclear(&ev->ev_io_timeout);
   2098 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
   2099 		} else {
   2100 			ev->ev_closure = EV_CLOSURE_EVENT;
   2101 		}
   2102 	}
   2103 
   2104 	min_heap_elem_init_(ev);
   2105 
   2106 	if (base != NULL) {
   2107 		/* by default, we put new events into the middle priority */
   2108 		ev->ev_pri = base->nactivequeues / 2;
   2109 	}
   2110 
   2111 	event_debug_note_setup_(ev);
   2112 
   2113 	return 0;
   2114 }
   2115 
   2116 int
   2117 event_base_set(struct event_base *base, struct event *ev)
   2118 {
   2119 	/* Only innocent events may be assigned to a different base */
   2120 	if (ev->ev_flags != EVLIST_INIT)
   2121 		return (-1);
   2122 
   2123 	event_debug_assert_is_setup_(ev);
   2124 
   2125 	ev->ev_base = base;
   2126 	ev->ev_pri = base->nactivequeues/2;
   2127 
   2128 	return (0);
   2129 }
   2130 
   2131 void
   2132 event_set(struct event *ev, evutil_socket_t fd, short events,
   2133 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
   2134 {
   2135 	int r;
   2136 	r = event_assign(ev, current_base, fd, events, callback, arg);
   2137 	EVUTIL_ASSERT(r == 0);
   2138 }
   2139 
   2140 void *
   2141 event_self_cbarg(void)
   2142 {
   2143 	return &event_self_cbarg_ptr_;
   2144 }
   2145 
   2146 struct event *
   2147 event_base_get_running_event(struct event_base *base)
   2148 {
   2149 	struct event *ev = NULL;
   2150 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2151 	if (EVBASE_IN_THREAD(base)) {
   2152 		struct event_callback *evcb = base->current_event;
   2153 		if (evcb->evcb_flags & EVLIST_INIT)
   2154 			ev = event_callback_to_event(evcb);
   2155 	}
   2156 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2157 	return ev;
   2158 }
   2159 
   2160 struct event *
   2161 event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
   2162 {
   2163 	struct event *ev;
   2164 	ev = mm_malloc(sizeof(struct event));
   2165 	if (ev == NULL)
   2166 		return (NULL);
   2167 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
   2168 		mm_free(ev);
   2169 		return (NULL);
   2170 	}
   2171 
   2172 	return (ev);
   2173 }
   2174 
   2175 void
   2176 event_free(struct event *ev)
   2177 {
   2178 	/* This is disabled, so that events which have been finalized be a
   2179 	 * valid target for event_free(). That's */
   2180 	// event_debug_assert_is_setup_(ev);
   2181 
   2182 	/* make sure that this event won't be coming back to haunt us. */
   2183 	event_del(ev);
   2184 	event_debug_note_teardown_(ev);
   2185 	mm_free(ev);
   2186 
   2187 }
   2188 
   2189 void
   2190 event_debug_unassign(struct event *ev)
   2191 {
   2192 	event_debug_assert_not_added_(ev);
   2193 	event_debug_note_teardown_(ev);
   2194 
   2195 	ev->ev_flags &= ~EVLIST_INIT;
   2196 }
   2197 
   2198 #define EVENT_FINALIZE_FREE_ 0x10000
   2199 static int
   2200 event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2201 {
   2202 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
   2203 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
   2204 
   2205 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   2206 	ev->ev_closure = closure;
   2207 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
   2208 	event_active_nolock_(ev, EV_FINALIZE, 1);
   2209 	ev->ev_flags |= EVLIST_FINALIZING;
   2210 	return 0;
   2211 }
   2212 
   2213 static int
   2214 event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2215 {
   2216 	int r;
   2217 	struct event_base *base = ev->ev_base;
   2218 	if (EVUTIL_FAILURE_CHECK(!base)) {
   2219 		event_warnx("%s: event has no event_base set.", __func__);
   2220 		return -1;
   2221 	}
   2222 
   2223 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2224 	r = event_finalize_nolock_(base, flags, ev, cb);
   2225 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2226 	return r;
   2227 }
   2228 
   2229 int
   2230 event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2231 {
   2232 	return event_finalize_impl_(flags, ev, cb);
   2233 }
   2234 
   2235 int
   2236 event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2237 {
   2238 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
   2239 }
   2240 
   2241 void
   2242 event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
   2243 {
   2244 	struct event *ev = NULL;
   2245 	if (evcb->evcb_flags & EVLIST_INIT) {
   2246 		ev = event_callback_to_event(evcb);
   2247 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   2248 	} else {
   2249 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
   2250 	}
   2251 
   2252 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
   2253 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
   2254 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
   2255 	evcb->evcb_flags |= EVLIST_FINALIZING;
   2256 }
   2257 
   2258 void
   2259 event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
   2260 {
   2261 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2262 	event_callback_finalize_nolock_(base, flags, evcb, cb);
   2263 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2264 }
   2265 
   2266 /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
   2267  * callback will be invoked on *one of them*, after they have *all* been
   2268  * finalized. */
   2269 int
   2270 event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
   2271 {
   2272 	int n_pending = 0, i;
   2273 
   2274 	if (base == NULL)
   2275 		base = current_base;
   2276 
   2277 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2278 
   2279 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
   2280 
   2281 	/* At most one can be currently executing; the rest we just
   2282 	 * cancel... But we always make sure that the finalize callback
   2283 	 * runs. */
   2284 	for (i = 0; i < n_cbs; ++i) {
   2285 		struct event_callback *evcb = evcbs[i];
   2286 		if (evcb == base->current_event) {
   2287 			event_callback_finalize_nolock_(base, 0, evcb, cb);
   2288 			++n_pending;
   2289 		} else {
   2290 			event_callback_cancel_nolock_(base, evcb, 0);
   2291 		}
   2292 	}
   2293 
   2294 	if (n_pending == 0) {
   2295 		/* Just do the first one. */
   2296 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
   2297 	}
   2298 
   2299 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2300 	return 0;
   2301 }
   2302 
   2303 /*
   2304  * Set's the priority of an event - if an event is already scheduled
   2305  * changing the priority is going to fail.
   2306  */
   2307 
   2308 int
   2309 event_priority_set(struct event *ev, int pri)
   2310 {
   2311 	event_debug_assert_is_setup_(ev);
   2312 
   2313 	if (ev->ev_flags & EVLIST_ACTIVE)
   2314 		return (-1);
   2315 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
   2316 		return (-1);
   2317 
   2318 	ev->ev_pri = pri;
   2319 
   2320 	return (0);
   2321 }
   2322 
   2323 /*
   2324  * Checks if a specific event is pending or scheduled.
   2325  */
   2326 
   2327 int
   2328 event_pending(const struct event *ev, short event, struct timeval *tv)
   2329 {
   2330 	int flags = 0;
   2331 
   2332 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
   2333 		event_warnx("%s: event has no event_base set.", __func__);
   2334 		return 0;
   2335 	}
   2336 
   2337 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2338 	event_debug_assert_is_setup_(ev);
   2339 
   2340 	if (ev->ev_flags & EVLIST_INSERTED)
   2341 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
   2342 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
   2343 		flags |= ev->ev_res;
   2344 	if (ev->ev_flags & EVLIST_TIMEOUT)
   2345 		flags |= EV_TIMEOUT;
   2346 
   2347 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
   2348 
   2349 	/* See if there is a timeout that we should report */
   2350 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
   2351 		struct timeval tmp = ev->ev_timeout;
   2352 		tmp.tv_usec &= MICROSECONDS_MASK;
   2353 		/* correctly remamp to real time */
   2354 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
   2355 	}
   2356 
   2357 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2358 
   2359 	return (flags & event);
   2360 }
   2361 
   2362 int
   2363 event_initialized(const struct event *ev)
   2364 {
   2365 	if (!(ev->ev_flags & EVLIST_INIT))
   2366 		return 0;
   2367 
   2368 	return 1;
   2369 }
   2370 
   2371 void
   2372 event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
   2373 {
   2374 	event_debug_assert_is_setup_(event);
   2375 
   2376 	if (base_out)
   2377 		*base_out = event->ev_base;
   2378 	if (fd_out)
   2379 		*fd_out = event->ev_fd;
   2380 	if (events_out)
   2381 		*events_out = event->ev_events;
   2382 	if (callback_out)
   2383 		*callback_out = event->ev_callback;
   2384 	if (arg_out)
   2385 		*arg_out = event->ev_arg;
   2386 }
   2387 
   2388 size_t
   2389 event_get_struct_event_size(void)
   2390 {
   2391 	return sizeof(struct event);
   2392 }
   2393 
   2394 evutil_socket_t
   2395 event_get_fd(const struct event *ev)
   2396 {
   2397 	event_debug_assert_is_setup_(ev);
   2398 	return ev->ev_fd;
   2399 }
   2400 
   2401 struct event_base *
   2402 event_get_base(const struct event *ev)
   2403 {
   2404 	event_debug_assert_is_setup_(ev);
   2405 	return ev->ev_base;
   2406 }
   2407 
   2408 short
   2409 event_get_events(const struct event *ev)
   2410 {
   2411 	event_debug_assert_is_setup_(ev);
   2412 	return ev->ev_events;
   2413 }
   2414 
   2415 event_callback_fn
   2416 event_get_callback(const struct event *ev)
   2417 {
   2418 	event_debug_assert_is_setup_(ev);
   2419 	return ev->ev_callback;
   2420 }
   2421 
   2422 void *
   2423 event_get_callback_arg(const struct event *ev)
   2424 {
   2425 	event_debug_assert_is_setup_(ev);
   2426 	return ev->ev_arg;
   2427 }
   2428 
   2429 int
   2430 event_get_priority(const struct event *ev)
   2431 {
   2432 	event_debug_assert_is_setup_(ev);
   2433 	return ev->ev_pri;
   2434 }
   2435 
   2436 int
   2437 event_add(struct event *ev, const struct timeval *tv)
   2438 {
   2439 	int res;
   2440 
   2441 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2442 		event_warnx("%s: event has no event_base set.", __func__);
   2443 		return -1;
   2444 	}
   2445 
   2446 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2447 
   2448 	res = event_add_nolock_(ev, tv, 0);
   2449 
   2450 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2451 
   2452 	return (res);
   2453 }
   2454 
   2455 /* Helper callback: wake an event_base from another thread.  This version
   2456  * works by writing a byte to one end of a socketpair, so that the event_base
   2457  * listening on the other end will wake up as the corresponding event
   2458  * triggers */
   2459 static int
   2460 evthread_notify_base_default(struct event_base *base)
   2461 {
   2462 	char buf[1];
   2463 	int r;
   2464 	buf[0] = (char) 0;
   2465 #ifdef _WIN32
   2466 	r = send(base->th_notify_fd[1], buf, 1, 0);
   2467 #else
   2468 	r = write(base->th_notify_fd[1], buf, 1);
   2469 #endif
   2470 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
   2471 }
   2472 
   2473 #ifdef EVENT__HAVE_EVENTFD
   2474 /* Helper callback: wake an event_base from another thread.  This version
   2475  * assumes that you have a working eventfd() implementation. */
   2476 static int
   2477 evthread_notify_base_eventfd(struct event_base *base)
   2478 {
   2479 	ev_uint64_t msg = 1;
   2480 	int r;
   2481 	do {
   2482 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
   2483 	} while (r < 0 && errno == EAGAIN);
   2484 
   2485 	return (r < 0) ? -1 : 0;
   2486 }
   2487 #endif
   2488 
   2489 
   2490 /** Tell the thread currently running the event_loop for base (if any) that it
   2491  * needs to stop waiting in its dispatch function (if it is) and process all
   2492  * active callbacks. */
   2493 static int
   2494 evthread_notify_base(struct event_base *base)
   2495 {
   2496 	EVENT_BASE_ASSERT_LOCKED(base);
   2497 	if (!base->th_notify_fn)
   2498 		return -1;
   2499 	if (base->is_notify_pending)
   2500 		return 0;
   2501 	base->is_notify_pending = 1;
   2502 	return base->th_notify_fn(base);
   2503 }
   2504 
   2505 /* Implementation function to remove a timeout on a currently pending event.
   2506  */
   2507 int
   2508 event_remove_timer_nolock_(struct event *ev)
   2509 {
   2510 	struct event_base *base = ev->ev_base;
   2511 
   2512 	EVENT_BASE_ASSERT_LOCKED(base);
   2513 	event_debug_assert_is_setup_(ev);
   2514 
   2515 	event_debug(("event_remove_timer_nolock: event: %p", ev));
   2516 
   2517 	/* If it's not pending on a timeout, we don't need to do anything. */
   2518 	if (ev->ev_flags & EVLIST_TIMEOUT) {
   2519 		event_queue_remove_timeout(base, ev);
   2520 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
   2521 	}
   2522 
   2523 	return (0);
   2524 }
   2525 
   2526 int
   2527 event_remove_timer(struct event *ev)
   2528 {
   2529 	int res;
   2530 
   2531 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2532 		event_warnx("%s: event has no event_base set.", __func__);
   2533 		return -1;
   2534 	}
   2535 
   2536 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2537 
   2538 	res = event_remove_timer_nolock_(ev);
   2539 
   2540 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2541 
   2542 	return (res);
   2543 }
   2544 
   2545 /* Implementation function to add an event.  Works just like event_add,
   2546  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
   2547  * we treat tv as an absolute time, not as an interval to add to the current
   2548  * time */
   2549 int
   2550 event_add_nolock_(struct event *ev, const struct timeval *tv,
   2551     int tv_is_absolute)
   2552 {
   2553 	struct event_base *base = ev->ev_base;
   2554 	int res = 0;
   2555 	int notify = 0;
   2556 
   2557 	EVENT_BASE_ASSERT_LOCKED(base);
   2558 	event_debug_assert_is_setup_(ev);
   2559 
   2560 	event_debug((
   2561 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
   2562 		 ev,
   2563 		 EV_SOCK_ARG(ev->ev_fd),
   2564 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
   2565 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
   2566 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
   2567 		 tv ? "EV_TIMEOUT " : " ",
   2568 		 ev->ev_callback));
   2569 
   2570 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
   2571 
   2572 	if (ev->ev_flags & EVLIST_FINALIZING) {
   2573 		/* XXXX debug */
   2574 		return (-1);
   2575 	}
   2576 
   2577 	/*
   2578 	 * prepare for timeout insertion further below, if we get a
   2579 	 * failure on any step, we should not change any state.
   2580 	 */
   2581 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
   2582 		if (min_heap_reserve_(&base->timeheap,
   2583 			1 + min_heap_size_(&base->timeheap)) == -1)
   2584 			return (-1);  /* ENOMEM == errno */
   2585 	}
   2586 
   2587 	/* If the main thread is currently executing a signal event's
   2588 	 * callback, and we are not the main thread, then we want to wait
   2589 	 * until the callback is done before we mess with the event, or else
   2590 	 * we can race on ev_ncalls and ev_pncalls below. */
   2591 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   2592 	if (base->current_event == event_to_event_callback(ev) &&
   2593 	    (ev->ev_events & EV_SIGNAL)
   2594 	    && !EVBASE_IN_THREAD(base)) {
   2595 		++base->current_event_waiters;
   2596 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
   2597 	}
   2598 #endif
   2599 
   2600 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
   2601 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
   2602 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
   2603 			res = evmap_io_add_(base, ev->ev_fd, ev);
   2604 		else if (ev->ev_events & EV_SIGNAL)
   2605 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
   2606 		if (res != -1)
   2607 			event_queue_insert_inserted(base, ev);
   2608 		if (res == 1) {
   2609 			/* evmap says we need to notify the main thread. */
   2610 			notify = 1;
   2611 			res = 0;
   2612 		}
   2613 	}
   2614 
   2615 	/*
   2616 	 * we should change the timeout state only if the previous event
   2617 	 * addition succeeded.
   2618 	 */
   2619 	if (res != -1 && tv != NULL) {
   2620 		struct timeval now;
   2621 		int common_timeout;
   2622 #ifdef USE_REINSERT_TIMEOUT
   2623 		int was_common;
   2624 		int old_timeout_idx;
   2625 #endif
   2626 
   2627 		/*
   2628 		 * for persistent timeout events, we remember the
   2629 		 * timeout value and re-add the event.
   2630 		 *
   2631 		 * If tv_is_absolute, this was already set.
   2632 		 */
   2633 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
   2634 			ev->ev_io_timeout = *tv;
   2635 
   2636 #ifndef USE_REINSERT_TIMEOUT
   2637 		if (ev->ev_flags & EVLIST_TIMEOUT) {
   2638 			event_queue_remove_timeout(base, ev);
   2639 		}
   2640 #endif
   2641 
   2642 		/* Check if it is active due to a timeout.  Rescheduling
   2643 		 * this timeout before the callback can be executed
   2644 		 * removes it from the active list. */
   2645 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
   2646 		    (ev->ev_res & EV_TIMEOUT)) {
   2647 			if (ev->ev_events & EV_SIGNAL) {
   2648 				/* See if we are just active executing
   2649 				 * this event in a loop
   2650 				 */
   2651 				if (ev->ev_ncalls && ev->ev_pncalls) {
   2652 					/* Abort loop */
   2653 					*ev->ev_pncalls = 0;
   2654 				}
   2655 			}
   2656 
   2657 			event_queue_remove_active(base, event_to_event_callback(ev));
   2658 		}
   2659 
   2660 		gettime(base, &now);
   2661 
   2662 		common_timeout = is_common_timeout(tv, base);
   2663 #ifdef USE_REINSERT_TIMEOUT
   2664 		was_common = is_common_timeout(&ev->ev_timeout, base);
   2665 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
   2666 #endif
   2667 
   2668 		if (tv_is_absolute) {
   2669 			ev->ev_timeout = *tv;
   2670 		} else if (common_timeout) {
   2671 			struct timeval tmp = *tv;
   2672 			tmp.tv_usec &= MICROSECONDS_MASK;
   2673 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
   2674 			ev->ev_timeout.tv_usec |=
   2675 			    (tv->tv_usec & ~MICROSECONDS_MASK);
   2676 		} else {
   2677 			evutil_timeradd(&now, tv, &ev->ev_timeout);
   2678 		}
   2679 
   2680 		event_debug((
   2681 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
   2682 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
   2683 
   2684 #ifdef USE_REINSERT_TIMEOUT
   2685 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
   2686 #else
   2687 		event_queue_insert_timeout(base, ev);
   2688 #endif
   2689 
   2690 		if (common_timeout) {
   2691 			struct common_timeout_list *ctl =
   2692 			    get_common_timeout_list(base, &ev->ev_timeout);
   2693 			if (ev == TAILQ_FIRST(&ctl->events)) {
   2694 				common_timeout_schedule(ctl, &now, ev);
   2695 			}
   2696 		} else {
   2697 			struct event* top = NULL;
   2698 			/* See if the earliest timeout is now earlier than it
   2699 			 * was before: if so, we will need to tell the main
   2700 			 * thread to wake up earlier than it would otherwise.
   2701 			 * We double check the timeout of the top element to
   2702 			 * handle time distortions due to system suspension.
   2703 			 */
   2704 			if (min_heap_elt_is_top_(ev))
   2705 				notify = 1;
   2706 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
   2707 					 evutil_timercmp(&top->ev_timeout, &now, <))
   2708 				notify = 1;
   2709 		}
   2710 	}
   2711 
   2712 	/* if we are not in the right thread, we need to wake up the loop */
   2713 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
   2714 		evthread_notify_base(base);
   2715 
   2716 	event_debug_note_add_(ev);
   2717 
   2718 	return (res);
   2719 }
   2720 
   2721 static int
   2722 event_del_(struct event *ev, int blocking)
   2723 {
   2724 	int res;
   2725 
   2726 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2727 		event_warnx("%s: event has no event_base set.", __func__);
   2728 		return -1;
   2729 	}
   2730 
   2731 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2732 
   2733 	res = event_del_nolock_(ev, blocking);
   2734 
   2735 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2736 
   2737 	return (res);
   2738 }
   2739 
   2740 int
   2741 event_del(struct event *ev)
   2742 {
   2743 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
   2744 }
   2745 
   2746 int
   2747 event_del_block(struct event *ev)
   2748 {
   2749 	return event_del_(ev, EVENT_DEL_BLOCK);
   2750 }
   2751 
   2752 int
   2753 event_del_noblock(struct event *ev)
   2754 {
   2755 	return event_del_(ev, EVENT_DEL_NOBLOCK);
   2756 }
   2757 
   2758 /** Helper for event_del: always called with th_base_lock held.
   2759  *
   2760  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
   2761  * EVEN_IF_FINALIZING} values. See those for more information.
   2762  */
   2763 int
   2764 event_del_nolock_(struct event *ev, int blocking)
   2765 {
   2766 	struct event_base *base;
   2767 	int res = 0, notify = 0;
   2768 
   2769 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
   2770 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
   2771 
   2772 	/* An event without a base has not been added */
   2773 	if (ev->ev_base == NULL)
   2774 		return (-1);
   2775 
   2776 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
   2777 
   2778 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
   2779 		if (ev->ev_flags & EVLIST_FINALIZING) {
   2780 			/* XXXX Debug */
   2781 			return 0;
   2782 		}
   2783 	}
   2784 
   2785 	/* If the main thread is currently executing this event's callback,
   2786 	 * and we are not the main thread, then we want to wait until the
   2787 	 * callback is done before we start removing the event.  That way,
   2788 	 * when this function returns, it will be safe to free the
   2789 	 * user-supplied argument. */
   2790 	base = ev->ev_base;
   2791 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   2792 	if (blocking != EVENT_DEL_NOBLOCK &&
   2793 	    base->current_event == event_to_event_callback(ev) &&
   2794 	    !EVBASE_IN_THREAD(base) &&
   2795 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
   2796 		++base->current_event_waiters;
   2797 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
   2798 	}
   2799 #endif
   2800 
   2801 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
   2802 
   2803 	/* See if we are just active executing this event in a loop */
   2804 	if (ev->ev_events & EV_SIGNAL) {
   2805 		if (ev->ev_ncalls && ev->ev_pncalls) {
   2806 			/* Abort loop */
   2807 			*ev->ev_pncalls = 0;
   2808 		}
   2809 	}
   2810 
   2811 	if (ev->ev_flags & EVLIST_TIMEOUT) {
   2812 		/* NOTE: We never need to notify the main thread because of a
   2813 		 * deleted timeout event: all that could happen if we don't is
   2814 		 * that the dispatch loop might wake up too early.  But the
   2815 		 * point of notifying the main thread _is_ to wake up the
   2816 		 * dispatch loop early anyway, so we wouldn't gain anything by
   2817 		 * doing it.
   2818 		 */
   2819 		event_queue_remove_timeout(base, ev);
   2820 	}
   2821 
   2822 	if (ev->ev_flags & EVLIST_ACTIVE)
   2823 		event_queue_remove_active(base, event_to_event_callback(ev));
   2824 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
   2825 		event_queue_remove_active_later(base, event_to_event_callback(ev));
   2826 
   2827 	if (ev->ev_flags & EVLIST_INSERTED) {
   2828 		event_queue_remove_inserted(base, ev);
   2829 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
   2830 			res = evmap_io_del_(base, ev->ev_fd, ev);
   2831 		else
   2832 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
   2833 		if (res == 1) {
   2834 			/* evmap says we need to notify the main thread. */
   2835 			notify = 1;
   2836 			res = 0;
   2837 		}
   2838 	}
   2839 
   2840 	/* if we are not in the right thread, we need to wake up the loop */
   2841 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
   2842 		evthread_notify_base(base);
   2843 
   2844 	event_debug_note_del_(ev);
   2845 
   2846 	return (res);
   2847 }
   2848 
   2849 void
   2850 event_active(struct event *ev, int res, short ncalls)
   2851 {
   2852 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2853 		event_warnx("%s: event has no event_base set.", __func__);
   2854 		return;
   2855 	}
   2856 
   2857 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2858 
   2859 	event_debug_assert_is_setup_(ev);
   2860 
   2861 	event_active_nolock_(ev, res, ncalls);
   2862 
   2863 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2864 }
   2865 
   2866 
   2867 void
   2868 event_active_nolock_(struct event *ev, int res, short ncalls)
   2869 {
   2870 	struct event_base *base;
   2871 
   2872 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
   2873 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
   2874 
   2875 	base = ev->ev_base;
   2876 	EVENT_BASE_ASSERT_LOCKED(base);
   2877 
   2878 	if (ev->ev_flags & EVLIST_FINALIZING) {
   2879 		/* XXXX debug */
   2880 		return;
   2881 	}
   2882 
   2883 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
   2884 	default:
   2885 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
   2886 		EVUTIL_ASSERT(0);
   2887 		break;
   2888 	case EVLIST_ACTIVE:
   2889 		/* We get different kinds of events, add them together */
   2890 		ev->ev_res |= res;
   2891 		return;
   2892 	case EVLIST_ACTIVE_LATER:
   2893 		ev->ev_res |= res;
   2894 		break;
   2895 	case 0:
   2896 		ev->ev_res = res;
   2897 		break;
   2898 	}
   2899 
   2900 	if (ev->ev_pri < base->event_running_priority)
   2901 		base->event_continue = 1;
   2902 
   2903 	if (ev->ev_events & EV_SIGNAL) {
   2904 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   2905 		if (base->current_event == event_to_event_callback(ev) &&
   2906 		    !EVBASE_IN_THREAD(base)) {
   2907 			++base->current_event_waiters;
   2908 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
   2909 		}
   2910 #endif
   2911 		ev->ev_ncalls = ncalls;
   2912 		ev->ev_pncalls = NULL;
   2913 	}
   2914 
   2915 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
   2916 }
   2917 
   2918 void
   2919 event_active_later_(struct event *ev, int res)
   2920 {
   2921 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2922 	event_active_later_nolock_(ev, res);
   2923 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2924 }
   2925 
   2926 void
   2927 event_active_later_nolock_(struct event *ev, int res)
   2928 {
   2929 	struct event_base *base = ev->ev_base;
   2930 	EVENT_BASE_ASSERT_LOCKED(base);
   2931 
   2932 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
   2933 		/* We get different kinds of events, add them together */
   2934 		ev->ev_res |= res;
   2935 		return;
   2936 	}
   2937 
   2938 	ev->ev_res = res;
   2939 
   2940 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
   2941 }
   2942 
   2943 int
   2944 event_callback_activate_(struct event_base *base,
   2945     struct event_callback *evcb)
   2946 {
   2947 	int r;
   2948 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2949 	r = event_callback_activate_nolock_(base, evcb);
   2950 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2951 	return r;
   2952 }
   2953 
   2954 int
   2955 event_callback_activate_nolock_(struct event_base *base,
   2956     struct event_callback *evcb)
   2957 {
   2958 	int r = 1;
   2959 
   2960 	if (evcb->evcb_flags & EVLIST_FINALIZING)
   2961 		return 0;
   2962 
   2963 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
   2964 	default:
   2965 		EVUTIL_ASSERT(0);
   2966 	case EVLIST_ACTIVE_LATER:
   2967 		event_queue_remove_active_later(base, evcb);
   2968 		r = 0;
   2969 		break;
   2970 	case EVLIST_ACTIVE:
   2971 		return 0;
   2972 	case 0:
   2973 		break;
   2974 	}
   2975 
   2976 	event_queue_insert_active(base, evcb);
   2977 
   2978 	if (EVBASE_NEED_NOTIFY(base))
   2979 		evthread_notify_base(base);
   2980 
   2981 	return r;
   2982 }
   2983 
   2984 int
   2985 event_callback_activate_later_nolock_(struct event_base *base,
   2986     struct event_callback *evcb)
   2987 {
   2988 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
   2989 		return 0;
   2990 
   2991 	event_queue_insert_active_later(base, evcb);
   2992 	if (EVBASE_NEED_NOTIFY(base))
   2993 		evthread_notify_base(base);
   2994 	return 1;
   2995 }
   2996 
   2997 void
   2998 event_callback_init_(struct event_base *base,
   2999     struct event_callback *cb)
   3000 {
   3001 	memset(cb, 0, sizeof(*cb));
   3002 	cb->evcb_pri = base->nactivequeues - 1;
   3003 }
   3004 
   3005 int
   3006 event_callback_cancel_(struct event_base *base,
   3007     struct event_callback *evcb)
   3008 {
   3009 	int r;
   3010 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3011 	r = event_callback_cancel_nolock_(base, evcb, 0);
   3012 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3013 	return r;
   3014 }
   3015 
   3016 int
   3017 event_callback_cancel_nolock_(struct event_base *base,
   3018     struct event_callback *evcb, int even_if_finalizing)
   3019 {
   3020 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
   3021 		return 0;
   3022 
   3023 	if (evcb->evcb_flags & EVLIST_INIT)
   3024 		return event_del_nolock_(event_callback_to_event(evcb),
   3025 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
   3026 
   3027 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
   3028 	default:
   3029 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
   3030 		EVUTIL_ASSERT(0);
   3031 		break;
   3032 	case EVLIST_ACTIVE:
   3033 		/* We get different kinds of events, add them together */
   3034 		event_queue_remove_active(base, evcb);
   3035 		return 0;
   3036 	case EVLIST_ACTIVE_LATER:
   3037 		event_queue_remove_active_later(base, evcb);
   3038 		break;
   3039 	case 0:
   3040 		break;
   3041 	}
   3042 
   3043 	return 0;
   3044 }
   3045 
   3046 void
   3047 event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
   3048 {
   3049 	memset(cb, 0, sizeof(*cb));
   3050 	cb->evcb_cb_union.evcb_selfcb = fn;
   3051 	cb->evcb_arg = arg;
   3052 	cb->evcb_pri = priority;
   3053 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
   3054 }
   3055 
   3056 void
   3057 event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
   3058 {
   3059 	cb->evcb_pri = priority;
   3060 }
   3061 
   3062 void
   3063 event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
   3064 {
   3065 	if (!base)
   3066 		base = current_base;
   3067 	event_callback_cancel_(base, cb);
   3068 }
   3069 
   3070 #define MAX_DEFERREDS_QUEUED 32
   3071 int
   3072 event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
   3073 {
   3074 	int r = 1;
   3075 	if (!base)
   3076 		base = current_base;
   3077 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3078 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
   3079 		r = event_callback_activate_later_nolock_(base, cb);
   3080 	} else {
   3081 		r = event_callback_activate_nolock_(base, cb);
   3082 		if (r) {
   3083 			++base->n_deferreds_queued;
   3084 		}
   3085 	}
   3086 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3087 	return r;
   3088 }
   3089 
   3090 static int
   3091 timeout_next(struct event_base *base, struct timeval **tv_p)
   3092 {
   3093 	/* Caller must hold th_base_lock */
   3094 	struct timeval now;
   3095 	struct event *ev;
   3096 	struct timeval *tv = *tv_p;
   3097 	int res = 0;
   3098 
   3099 	ev = min_heap_top_(&base->timeheap);
   3100 
   3101 	if (ev == NULL) {
   3102 		/* if no time-based events are active wait for I/O */
   3103 		*tv_p = NULL;
   3104 		goto out;
   3105 	}
   3106 
   3107 	if (gettime(base, &now) == -1) {
   3108 		res = -1;
   3109 		goto out;
   3110 	}
   3111 
   3112 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
   3113 		evutil_timerclear(tv);
   3114 		goto out;
   3115 	}
   3116 
   3117 	evutil_timersub(&ev->ev_timeout, &now, tv);
   3118 
   3119 	EVUTIL_ASSERT(tv->tv_sec >= 0);
   3120 	EVUTIL_ASSERT(tv->tv_usec >= 0);
   3121 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
   3122 
   3123 out:
   3124 	return (res);
   3125 }
   3126 
   3127 /* Activate every event whose timeout has elapsed. */
   3128 static void
   3129 timeout_process(struct event_base *base)
   3130 {
   3131 	/* Caller must hold lock. */
   3132 	struct timeval now;
   3133 	struct event *ev;
   3134 
   3135 	if (min_heap_empty_(&base->timeheap)) {
   3136 		return;
   3137 	}
   3138 
   3139 	gettime(base, &now);
   3140 
   3141 	while ((ev = min_heap_top_(&base->timeheap))) {
   3142 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
   3143 			break;
   3144 
   3145 		/* delete this event from the I/O queues */
   3146 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   3147 
   3148 		event_debug(("timeout_process: event: %p, call %p",
   3149 			 ev, ev->ev_callback));
   3150 		event_active_nolock_(ev, EV_TIMEOUT, 1);
   3151 	}
   3152 }
   3153 
   3154 #if (EVLIST_INTERNAL >> 4) != 1
   3155 #error "Mismatch for value of EVLIST_INTERNAL"
   3156 #endif
   3157 
   3158 #ifndef MAX
   3159 #define MAX(a,b) (((a)>(b))?(a):(b))
   3160 #endif
   3161 
   3162 #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
   3163 
   3164 /* These are a fancy way to spell
   3165      if (flags & EVLIST_INTERNAL)
   3166          base->event_count--/++;
   3167 */
   3168 #define DECR_EVENT_COUNT(base,flags) \
   3169 	((base)->event_count -= (~((flags) >> 4) & 1))
   3170 #define INCR_EVENT_COUNT(base,flags) do {					\
   3171 	((base)->event_count += (~((flags) >> 4) & 1));				\
   3172 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
   3173 } while (0)
   3174 
   3175 static void
   3176 event_queue_remove_inserted(struct event_base *base, struct event *ev)
   3177 {
   3178 	EVENT_BASE_ASSERT_LOCKED(base);
   3179 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
   3180 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
   3181 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
   3182 		return;
   3183 	}
   3184 	DECR_EVENT_COUNT(base, ev->ev_flags);
   3185 	ev->ev_flags &= ~EVLIST_INSERTED;
   3186 }
   3187 static void
   3188 event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
   3189 {
   3190 	EVENT_BASE_ASSERT_LOCKED(base);
   3191 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
   3192 		event_errx(1, "%s: %p not on queue %x", __func__,
   3193 			   evcb, EVLIST_ACTIVE);
   3194 		return;
   3195 	}
   3196 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
   3197 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
   3198 	base->event_count_active--;
   3199 
   3200 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
   3201 	    evcb, evcb_active_next);
   3202 }
   3203 static void
   3204 event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
   3205 {
   3206 	EVENT_BASE_ASSERT_LOCKED(base);
   3207 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
   3208 		event_errx(1, "%s: %p not on queue %x", __func__,
   3209 			   evcb, EVLIST_ACTIVE_LATER);
   3210 		return;
   3211 	}
   3212 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
   3213 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
   3214 	base->event_count_active--;
   3215 
   3216 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
   3217 }
   3218 static void
   3219 event_queue_remove_timeout(struct event_base *base, struct event *ev)
   3220 {
   3221 	EVENT_BASE_ASSERT_LOCKED(base);
   3222 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
   3223 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
   3224 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
   3225 		return;
   3226 	}
   3227 	DECR_EVENT_COUNT(base, ev->ev_flags);
   3228 	ev->ev_flags &= ~EVLIST_TIMEOUT;
   3229 
   3230 	if (is_common_timeout(&ev->ev_timeout, base)) {
   3231 		struct common_timeout_list *ctl =
   3232 		    get_common_timeout_list(base, &ev->ev_timeout);
   3233 		TAILQ_REMOVE(&ctl->events, ev,
   3234 		    ev_timeout_pos.ev_next_with_common_timeout);
   3235 	} else {
   3236 		min_heap_erase_(&base->timeheap, ev);
   3237 	}
   3238 }
   3239 
   3240 #ifdef USE_REINSERT_TIMEOUT
   3241 /* Remove and reinsert 'ev' into the timeout queue. */
   3242 static void
   3243 event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
   3244     int was_common, int is_common, int old_timeout_idx)
   3245 {
   3246 	struct common_timeout_list *ctl;
   3247 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
   3248 		event_queue_insert_timeout(base, ev);
   3249 		return;
   3250 	}
   3251 
   3252 	switch ((was_common<<1) | is_common) {
   3253 	case 3: /* Changing from one common timeout to another */
   3254 		ctl = base->common_timeout_queues[old_timeout_idx];
   3255 		TAILQ_REMOVE(&ctl->events, ev,
   3256 		    ev_timeout_pos.ev_next_with_common_timeout);
   3257 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
   3258 		insert_common_timeout_inorder(ctl, ev);
   3259 		break;
   3260 	case 2: /* Was common; is no longer common */
   3261 		ctl = base->common_timeout_queues[old_timeout_idx];
   3262 		TAILQ_REMOVE(&ctl->events, ev,
   3263 		    ev_timeout_pos.ev_next_with_common_timeout);
   3264 		min_heap_push_(&base->timeheap, ev);
   3265 		break;
   3266 	case 1: /* Wasn't common; has become common. */
   3267 		min_heap_erase_(&base->timeheap, ev);
   3268 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
   3269 		insert_common_timeout_inorder(ctl, ev);
   3270 		break;
   3271 	case 0: /* was in heap; is still on heap. */
   3272 		min_heap_adjust_(&base->timeheap, ev);
   3273 		break;
   3274 	default:
   3275 		EVUTIL_ASSERT(0); /* unreachable */
   3276 		break;
   3277 	}
   3278 }
   3279 #endif
   3280 
   3281 /* Add 'ev' to the common timeout list in 'ev'. */
   3282 static void
   3283 insert_common_timeout_inorder(struct common_timeout_list *ctl,
   3284     struct event *ev)
   3285 {
   3286 	struct event *e;
   3287 	/* By all logic, we should just be able to append 'ev' to the end of
   3288 	 * ctl->events, since the timeout on each 'ev' is set to {the common
   3289 	 * timeout} + {the time when we add the event}, and so the events
   3290 	 * should arrive in order of their timeeouts.  But just in case
   3291 	 * there's some wacky threading issue going on, we do a search from
   3292 	 * the end of 'ev' to find the right insertion point.
   3293 	 */
   3294 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
   3295 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
   3296 		/* This timercmp is a little sneaky, since both ev and e have
   3297 		 * magic values in tv_usec.  Fortunately, they ought to have
   3298 		 * the _same_ magic values in tv_usec.  Let's assert for that.
   3299 		 */
   3300 		EVUTIL_ASSERT(
   3301 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
   3302 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
   3303 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
   3304 			    ev_timeout_pos.ev_next_with_common_timeout);
   3305 			return;
   3306 		}
   3307 	}
   3308 	TAILQ_INSERT_HEAD(&ctl->events, ev,
   3309 	    ev_timeout_pos.ev_next_with_common_timeout);
   3310 }
   3311 
   3312 static void
   3313 event_queue_insert_inserted(struct event_base *base, struct event *ev)
   3314 {
   3315 	EVENT_BASE_ASSERT_LOCKED(base);
   3316 
   3317 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
   3318 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
   3319 		    ev, EV_SOCK_ARG(ev->ev_fd));
   3320 		return;
   3321 	}
   3322 
   3323 	INCR_EVENT_COUNT(base, ev->ev_flags);
   3324 
   3325 	ev->ev_flags |= EVLIST_INSERTED;
   3326 }
   3327 
   3328 static void
   3329 event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
   3330 {
   3331 	EVENT_BASE_ASSERT_LOCKED(base);
   3332 
   3333 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
   3334 		/* Double insertion is possible for active events */
   3335 		return;
   3336 	}
   3337 
   3338 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
   3339 
   3340 	evcb->evcb_flags |= EVLIST_ACTIVE;
   3341 
   3342 	base->event_count_active++;
   3343 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
   3344 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
   3345 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
   3346 	    evcb, evcb_active_next);
   3347 }
   3348 
   3349 static void
   3350 event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
   3351 {
   3352 	EVENT_BASE_ASSERT_LOCKED(base);
   3353 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
   3354 		/* Double insertion is possible */
   3355 		return;
   3356 	}
   3357 
   3358 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
   3359 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
   3360 	base->event_count_active++;
   3361 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
   3362 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
   3363 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
   3364 }
   3365 
   3366 static void
   3367 event_queue_insert_timeout(struct event_base *base, struct event *ev)
   3368 {
   3369 	EVENT_BASE_ASSERT_LOCKED(base);
   3370 
   3371 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
   3372 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
   3373 		    ev, EV_SOCK_ARG(ev->ev_fd));
   3374 		return;
   3375 	}
   3376 
   3377 	INCR_EVENT_COUNT(base, ev->ev_flags);
   3378 
   3379 	ev->ev_flags |= EVLIST_TIMEOUT;
   3380 
   3381 	if (is_common_timeout(&ev->ev_timeout, base)) {
   3382 		struct common_timeout_list *ctl =
   3383 		    get_common_timeout_list(base, &ev->ev_timeout);
   3384 		insert_common_timeout_inorder(ctl, ev);
   3385 	} else {
   3386 		min_heap_push_(&base->timeheap, ev);
   3387 	}
   3388 }
   3389 
   3390 static void
   3391 event_queue_make_later_events_active(struct event_base *base)
   3392 {
   3393 	struct event_callback *evcb;
   3394 	EVENT_BASE_ASSERT_LOCKED(base);
   3395 
   3396 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
   3397 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
   3398 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
   3399 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
   3400 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
   3401 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
   3402 	}
   3403 }
   3404 
   3405 /* Functions for debugging */
   3406 
   3407 const char *
   3408 event_get_version(void)
   3409 {
   3410 	return (EVENT__VERSION);
   3411 }
   3412 
   3413 ev_uint32_t
   3414 event_get_version_number(void)
   3415 {
   3416 	return (EVENT__NUMERIC_VERSION);
   3417 }
   3418 
   3419 /*
   3420  * No thread-safe interface needed - the information should be the same
   3421  * for all threads.
   3422  */
   3423 
   3424 const char *
   3425 event_get_method(void)
   3426 {
   3427 	return (current_base->evsel->name);
   3428 }
   3429 
   3430 #ifndef EVENT__DISABLE_MM_REPLACEMENT
   3431 static void *(*mm_malloc_fn_)(size_t sz) = NULL;
   3432 static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
   3433 static void (*mm_free_fn_)(void *p) = NULL;
   3434 
   3435 void *
   3436 event_mm_malloc_(size_t sz)
   3437 {
   3438 	if (sz == 0)
   3439 		return NULL;
   3440 
   3441 	if (mm_malloc_fn_)
   3442 		return mm_malloc_fn_(sz);
   3443 	else
   3444 		return malloc(sz);
   3445 }
   3446 
   3447 void *
   3448 event_mm_calloc_(size_t count, size_t size)
   3449 {
   3450 	if (count == 0 || size == 0)
   3451 		return NULL;
   3452 
   3453 	if (mm_malloc_fn_) {
   3454 		size_t sz = count * size;
   3455 		void *p = NULL;
   3456 		if (count > EV_SIZE_MAX / size)
   3457 			goto error;
   3458 		p = mm_malloc_fn_(sz);
   3459 		if (p)
   3460 			return memset(p, 0, sz);
   3461 	} else {
   3462 		void *p = calloc(count, size);
   3463 #ifdef _WIN32
   3464 		/* Windows calloc doesn't reliably set ENOMEM */
   3465 		if (p == NULL)
   3466 			goto error;
   3467 #endif
   3468 		return p;
   3469 	}
   3470 
   3471 error:
   3472 	errno = ENOMEM;
   3473 	return NULL;
   3474 }
   3475 
   3476 char *
   3477 event_mm_strdup_(const char *str)
   3478 {
   3479 	if (!str) {
   3480 		errno = EINVAL;
   3481 		return NULL;
   3482 	}
   3483 
   3484 	if (mm_malloc_fn_) {
   3485 		size_t ln = strlen(str);
   3486 		void *p = NULL;
   3487 		if (ln == EV_SIZE_MAX)
   3488 			goto error;
   3489 		p = mm_malloc_fn_(ln+1);
   3490 		if (p)
   3491 			return memcpy(p, str, ln+1);
   3492 	} else
   3493 #ifdef _WIN32
   3494 		return _strdup(str);
   3495 #else
   3496 		return strdup(str);
   3497 #endif
   3498 
   3499 error:
   3500 	errno = ENOMEM;
   3501 	return NULL;
   3502 }
   3503 
   3504 void *
   3505 event_mm_realloc_(void *ptr, size_t sz)
   3506 {
   3507 	if (mm_realloc_fn_)
   3508 		return mm_realloc_fn_(ptr, sz);
   3509 	else
   3510 		return realloc(ptr, sz);
   3511 }
   3512 
   3513 void
   3514 event_mm_free_(void *ptr)
   3515 {
   3516 	if (mm_free_fn_)
   3517 		mm_free_fn_(ptr);
   3518 	else
   3519 		free(ptr);
   3520 }
   3521 
   3522 void
   3523 event_set_mem_functions(void *(*malloc_fn)(size_t sz),
   3524 			void *(*realloc_fn)(void *ptr, size_t sz),
   3525 			void (*free_fn)(void *ptr))
   3526 {
   3527 	mm_malloc_fn_ = malloc_fn;
   3528 	mm_realloc_fn_ = realloc_fn;
   3529 	mm_free_fn_ = free_fn;
   3530 }
   3531 #endif
   3532 
   3533 #ifdef EVENT__HAVE_EVENTFD
   3534 static void
   3535 evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
   3536 {
   3537 	ev_uint64_t msg;
   3538 	ev_ssize_t r;
   3539 	struct event_base *base = arg;
   3540 
   3541 	r = read(fd, (void*) &msg, sizeof(msg));
   3542 	if (r<0 && errno != EAGAIN) {
   3543 		event_sock_warn(fd, "Error reading from eventfd");
   3544 	}
   3545 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3546 	base->is_notify_pending = 0;
   3547 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3548 }
   3549 #endif
   3550 
   3551 static void
   3552 evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
   3553 {
   3554 	unsigned char buf[1024];
   3555 	struct event_base *base = arg;
   3556 #ifdef _WIN32
   3557 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
   3558 		;
   3559 #else
   3560 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
   3561 		;
   3562 #endif
   3563 
   3564 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3565 	base->is_notify_pending = 0;
   3566 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3567 }
   3568 
   3569 int
   3570 evthread_make_base_notifiable(struct event_base *base)
   3571 {
   3572 	int r;
   3573 	if (!base)
   3574 		return -1;
   3575 
   3576 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3577 	r = evthread_make_base_notifiable_nolock_(base);
   3578 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3579 	return r;
   3580 }
   3581 
   3582 static int
   3583 evthread_make_base_notifiable_nolock_(struct event_base *base)
   3584 {
   3585 	void (*cb)(evutil_socket_t, short, void *);
   3586 	int (*notify)(struct event_base *);
   3587 
   3588 	if (base->th_notify_fn != NULL) {
   3589 		/* The base is already notifiable: we're doing fine. */
   3590 		return 0;
   3591 	}
   3592 
   3593 #if defined(EVENT__HAVE_WORKING_KQUEUE)
   3594 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
   3595 		base->th_notify_fn = event_kq_notify_base_;
   3596 		/* No need to add an event here; the backend can wake
   3597 		 * itself up just fine. */
   3598 		return 0;
   3599 	}
   3600 #endif
   3601 
   3602 #ifdef EVENT__HAVE_EVENTFD
   3603 	base->th_notify_fd[0] = evutil_eventfd_(0,
   3604 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
   3605 	if (base->th_notify_fd[0] >= 0) {
   3606 		base->th_notify_fd[1] = -1;
   3607 		notify = evthread_notify_base_eventfd;
   3608 		cb = evthread_notify_drain_eventfd;
   3609 	} else
   3610 #endif
   3611 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
   3612 		notify = evthread_notify_base_default;
   3613 		cb = evthread_notify_drain_default;
   3614 	} else {
   3615 		return -1;
   3616 	}
   3617 
   3618 	base->th_notify_fn = notify;
   3619 
   3620 	/* prepare an event that we can use for wakeup */
   3621 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
   3622 				 EV_READ|EV_PERSIST, cb, base);
   3623 
   3624 	/* we need to mark this as internal event */
   3625 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
   3626 	event_priority_set(&base->th_notify, 0);
   3627 
   3628 	return event_add_nolock_(&base->th_notify, NULL, 0);
   3629 }
   3630 
   3631 int
   3632 event_base_foreach_event_nolock_(struct event_base *base,
   3633     event_base_foreach_event_cb fn, void *arg)
   3634 {
   3635 	int r, i;
   3636 	unsigned u;
   3637 	struct event *ev;
   3638 
   3639 	/* Start out with all the EVLIST_INSERTED events. */
   3640 	if ((r = evmap_foreach_event_(base, fn, arg)))
   3641 		return r;
   3642 
   3643 	/* Okay, now we deal with those events that have timeouts and are in
   3644 	 * the min-heap. */
   3645 	for (u = 0; u < base->timeheap.n; ++u) {
   3646 		ev = base->timeheap.p[u];
   3647 		if (ev->ev_flags & EVLIST_INSERTED) {
   3648 			/* we already processed this one */
   3649 			continue;
   3650 		}
   3651 		if ((r = fn(base, ev, arg)))
   3652 			return r;
   3653 	}
   3654 
   3655 	/* Now for the events in one of the timeout queues.
   3656 	 * the min-heap. */
   3657 	for (i = 0; i < base->n_common_timeouts; ++i) {
   3658 		struct common_timeout_list *ctl =
   3659 		    base->common_timeout_queues[i];
   3660 		TAILQ_FOREACH(ev, &ctl->events,
   3661 		    ev_timeout_pos.ev_next_with_common_timeout) {
   3662 			if (ev->ev_flags & EVLIST_INSERTED) {
   3663 				/* we already processed this one */
   3664 				continue;
   3665 			}
   3666 			if ((r = fn(base, ev, arg)))
   3667 				return r;
   3668 		}
   3669 	}
   3670 
   3671 	/* Finally, we deal wit all the active events that we haven't touched
   3672 	 * yet. */
   3673 	for (i = 0; i < base->nactivequeues; ++i) {
   3674 		struct event_callback *evcb;
   3675 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
   3676 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
   3677 				/* This isn't an event (evlist_init clear), or
   3678 				 * we already processed it. (inserted or
   3679 				 * timeout set */
   3680 				continue;
   3681 			}
   3682 			ev = event_callback_to_event(evcb);
   3683 			if ((r = fn(base, ev, arg)))
   3684 				return r;
   3685 		}
   3686 	}
   3687 
   3688 	return 0;
   3689 }
   3690 
   3691 /* Helper for event_base_dump_events: called on each event in the event base;
   3692  * dumps only the inserted events. */
   3693 static int
   3694 dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
   3695 {
   3696 	FILE *output = arg;
   3697 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
   3698 	    "sig" : "fd ";
   3699 
   3700 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
   3701 		return 0;
   3702 
   3703 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s",
   3704 	    e, gloss, EV_SOCK_ARG(e->ev_fd),
   3705 	    (e->ev_events&EV_READ)?" Read":"",
   3706 	    (e->ev_events&EV_WRITE)?" Write":"",
   3707 	    (e->ev_events&EV_CLOSED)?" EOF":"",
   3708 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
   3709 	    (e->ev_events&EV_PERSIST)?" Persist":"",
   3710 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
   3711 	if (e->ev_flags & EVLIST_TIMEOUT) {
   3712 		struct timeval tv;
   3713 		tv.tv_sec = e->ev_timeout.tv_sec;
   3714 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
   3715 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
   3716 		fprintf(output, " Timeout=%ld.%06d",
   3717 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
   3718 	}
   3719 	fputc('\n', output);
   3720 
   3721 	return 0;
   3722 }
   3723 
   3724 /* Helper for event_base_dump_events: called on each event in the event base;
   3725  * dumps only the active events. */
   3726 static int
   3727 dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
   3728 {
   3729 	FILE *output = arg;
   3730 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
   3731 	    "sig" : "fd ";
   3732 
   3733 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
   3734 		return 0;
   3735 
   3736 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
   3737 	    e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
   3738 	    (e->ev_res&EV_READ)?" Read":"",
   3739 	    (e->ev_res&EV_WRITE)?" Write":"",
   3740 	    (e->ev_res&EV_CLOSED)?" EOF":"",
   3741 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
   3742 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
   3743 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
   3744 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
   3745 
   3746 	return 0;
   3747 }
   3748 
   3749 int
   3750 event_base_foreach_event(struct event_base *base,
   3751     event_base_foreach_event_cb fn, void *arg)
   3752 {
   3753 	int r;
   3754 	if ((!fn) || (!base)) {
   3755 		return -1;
   3756 	}
   3757 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3758 	r = event_base_foreach_event_nolock_(base, fn, arg);
   3759 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3760 	return r;
   3761 }
   3762 
   3763 
   3764 void
   3765 event_base_dump_events(struct event_base *base, FILE *output)
   3766 {
   3767 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3768 	fprintf(output, "Inserted events:\n");
   3769 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
   3770 
   3771 	fprintf(output, "Active events:\n");
   3772 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
   3773 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3774 }
   3775 
   3776 void
   3777 event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
   3778 {
   3779 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3780 	evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
   3781 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3782 }
   3783 
   3784 void
   3785 event_base_active_by_signal(struct event_base *base, int sig)
   3786 {
   3787 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3788 	evmap_signal_active_(base, sig, 1);
   3789 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3790 }
   3791 
   3792 
   3793 void
   3794 event_base_add_virtual_(struct event_base *base)
   3795 {
   3796 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3797 	base->virtual_event_count++;
   3798 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
   3799 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3800 }
   3801 
   3802 void
   3803 event_base_del_virtual_(struct event_base *base)
   3804 {
   3805 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3806 	EVUTIL_ASSERT(base->virtual_event_count > 0);
   3807 	base->virtual_event_count--;
   3808 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
   3809 		evthread_notify_base(base);
   3810 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3811 }
   3812 
   3813 static void
   3814 event_free_debug_globals_locks(void)
   3815 {
   3816 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   3817 #ifndef EVENT__DISABLE_DEBUG_MODE
   3818 	if (event_debug_map_lock_ != NULL) {
   3819 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
   3820 		event_debug_map_lock_ = NULL;
   3821 		evthreadimpl_disable_lock_debugging_();
   3822 	}
   3823 #endif /* EVENT__DISABLE_DEBUG_MODE */
   3824 #endif /* EVENT__DISABLE_THREAD_SUPPORT */
   3825 	return;
   3826 }
   3827 
   3828 static void
   3829 event_free_debug_globals(void)
   3830 {
   3831 	event_free_debug_globals_locks();
   3832 }
   3833 
   3834 static void
   3835 event_free_evsig_globals(void)
   3836 {
   3837 	evsig_free_globals_();
   3838 }
   3839 
   3840 static void
   3841 event_free_evutil_globals(void)
   3842 {
   3843 	evutil_free_globals_();
   3844 }
   3845 
   3846 static void
   3847 event_free_globals(void)
   3848 {
   3849 	event_free_debug_globals();
   3850 	event_free_evsig_globals();
   3851 	event_free_evutil_globals();
   3852 }
   3853 
   3854 void
   3855 libevent_global_shutdown(void)
   3856 {
   3857 	event_disable_debug_mode();
   3858 	event_free_globals();
   3859 }
   3860 
   3861 #ifndef EVENT__DISABLE_THREAD_SUPPORT
   3862 int
   3863 event_global_setup_locks_(const int enable_locks)
   3864 {
   3865 #ifndef EVENT__DISABLE_DEBUG_MODE
   3866 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
   3867 #endif
   3868 	if (evsig_global_setup_locks_(enable_locks) < 0)
   3869 		return -1;
   3870 	if (evutil_global_setup_locks_(enable_locks) < 0)
   3871 		return -1;
   3872 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
   3873 		return -1;
   3874 	return 0;
   3875 }
   3876 #endif
   3877 
   3878 void
   3879 event_base_assert_ok_(struct event_base *base)
   3880 {
   3881 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3882 	event_base_assert_ok_nolock_(base);
   3883 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3884 }
   3885 
   3886 void
   3887 event_base_assert_ok_nolock_(struct event_base *base)
   3888 {
   3889 	int i;
   3890 	int count;
   3891 
   3892 	/* First do checks on the per-fd and per-signal lists */
   3893 	evmap_check_integrity_(base);
   3894 
   3895 	/* Check the heap property */
   3896 	for (i = 1; i < (int)base->timeheap.n; ++i) {
   3897 		int parent = (i - 1) / 2;
   3898 		struct event *ev, *p_ev;
   3899 		ev = base->timeheap.p[i];
   3900 		p_ev = base->timeheap.p[parent];
   3901 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
   3902 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
   3903 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
   3904 	}
   3905 
   3906 	/* Check that the common timeouts are fine */
   3907 	for (i = 0; i < base->n_common_timeouts; ++i) {
   3908 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
   3909 		struct event *last=NULL, *ev;
   3910 
   3911 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
   3912 
   3913 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
   3914 			if (last)
   3915 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
   3916 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
   3917 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
   3918 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
   3919 			last = ev;
   3920 		}
   3921 	}
   3922 
   3923 	/* Check the active queues. */
   3924 	count = 0;
   3925 	for (i = 0; i < base->nactivequeues; ++i) {
   3926 		struct event_callback *evcb;
   3927 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
   3928 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
   3929 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
   3930 			EVUTIL_ASSERT(evcb->evcb_pri == i);
   3931 			++count;
   3932 		}
   3933 	}
   3934 
   3935 	{
   3936 		struct event_callback *evcb;
   3937 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
   3938 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
   3939 			++count;
   3940 		}
   3941 	}
   3942 	EVUTIL_ASSERT(count == base->event_count_active);
   3943 }
   3944